_concurrent_queue_internal.h   _concurrent_queue_internal.h 
skipping to change at line 635 skipping to change at line 635
size_t i = k/concurrent_queue_rep<T>::n_queue & (my_queue.my_rep->i tems_per_page-1); size_t i = k/concurrent_queue_rep<T>::n_queue & (my_queue.my_rep->i tems_per_page-1);
item = &micro_queue<T>::get_ref(*p,i); item = &micro_queue<T>::get_ref(*p,i);
return (p->mask & uintptr_t(1)<<i)!=0; return (p->mask & uintptr_t(1)<<i)!=0;
} }
} }
//! Constness-independent portion of concurrent_queue_iterator. //! Constness-independent portion of concurrent_queue_iterator.
/** @ingroup containers */ /** @ingroup containers */
template<typename Value> template<typename Value>
class concurrent_queue_iterator_base_v3 : no_assign { class concurrent_queue_iterator_base_v3 : no_assign {
//! Concurrentconcurrent_queue over which we are iterating. //! Represents concurrent_queue over which we are iterating.
/** NULL if one past last element in queue. */ /** NULL if one past last element in queue. */
concurrent_queue_iterator_rep<Value>* my_rep; concurrent_queue_iterator_rep<Value>* my_rep;
template<typename C, typename T, typename U> template<typename C, typename T, typename U>
friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ); friend bool operator==( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
template<typename C, typename T, typename U> template<typename C, typename T, typename U>
friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j ); friend bool operator!=( const concurrent_queue_iterator<C,T>& i, const concurrent_queue_iterator<C,U>& j );
protected: protected:
//! Pointer to current item //! Pointer to current item
skipping to change at line 657 skipping to change at line 657
//! Default constructor //! Default constructor
concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {
#if __GNUC__==4&&__GNUC_MINOR__==3 #if __GNUC__==4&&__GNUC_MINOR__==3
// to get around a possible gcc 4.3 bug // to get around a possible gcc 4.3 bug
__asm__ __volatile__("": : :"memory"); __asm__ __volatile__("": : :"memory");
#endif #endif
} }
//! Copy constructor //! Copy constructor
concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base
_v3& i ) : my_rep(NULL), my_item(NULL) { _v3& i )
: no_assign(), my_rep(NULL), my_item(NULL) {
assign(i); assign(i);
} }
//! Construct iterator pointing to head of queue. //! Construct iterator pointing to head of queue.
concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value >& queue ) ; concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3<Value >& queue ) ;
//! Assignment //! Assignment
void assign( const concurrent_queue_iterator_base_v3<Value>& other ) ; void assign( const concurrent_queue_iterator_base_v3<Value>& other ) ;
//! Advance iterator one step towards tail of queue. //! Advance iterator one step towards tail of queue.
skipping to change at line 831 skipping to change at line 832
//! Capacity of the queue //! Capacity of the queue
ptrdiff_t my_capacity; ptrdiff_t my_capacity;
//! Always a power of 2 //! Always a power of 2
size_t items_per_page; size_t items_per_page;
//! Size of an item //! Size of an item
size_t item_size; size_t item_size;
#if __GNUC__==3&&__GNUC_MINOR__==3 #if __TBB_GCC_3_3_PROTECTED_BROKEN
public: public:
#endif /* __GNUC__==3&&__GNUC_MINOR__==3 */ #endif
template<typename T> template<typename T>
struct padded_page: page { struct padded_page: page {
//! Not defined anywhere - exists to quiet warnings. //! Not defined anywhere - exists to quiet warnings.
padded_page(); padded_page();
//! Not defined anywhere - exists to quiet warnings. //! Not defined anywhere - exists to quiet warnings.
void operator=( const padded_page& ); void operator=( const padded_page& );
//! Must be last field. //! Must be last field.
T last; T last;
}; };
 End of changes. 4 change blocks. 
5 lines changed or deleted 6 lines changed or added


 _concurrent_unordered_internal.h   _concurrent_unordered_internal.h 
skipping to change at line 29 skipping to change at line 29
As a special exception, you may use this file as part of a free softwar e As a special exception, you may use this file as part of a free softwar e
library without restriction. Specifically, if other files instantiate library without restriction. Specifically, if other files instantiate
templates or use macros or inline functions from this file, or you comp ile templates or use macros or inline functions from this file, or you comp ile
this file and link it with other files to produce an executable, this this file and link it with other files to produce an executable, this
file does not by itself cause the resulting executable to be covered by file does not by itself cause the resulting executable to be covered by
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
/* Container implementations in this header are based on PPL implementation
s
provided by Microsoft. */
#ifndef __TBB_concurrent_unordered_internal_H #ifndef __TBB_concurrent_unordered_internal_H
#define __TBB_concurrent_unordered_internal_H #define __TBB_concurrent_unordered_internal_H
#include "tbb_stddef.h" #include "tbb_stddef.h"
#if !TBB_USE_EXCEPTIONS && _MSC_VER #if !TBB_USE_EXCEPTIONS && _MSC_VER
// Suppress "C++ exception handler used, but unwind semantics are not e nabled" warning in STL headers // Suppress "C++ exception handler used, but unwind semantics are not e nabled" warning in STL headers
#pragma warning (push) #pragma warning (push)
#pragma warning (disable: 4530) #pragma warning (disable: 4530)
#endif #endif
skipping to change at line 442 skipping to change at line 445
static iterator get_iterator(const_iterator it) { static iterator get_iterator(const_iterator it) {
return iterator(it.my_node_ptr, it.my_list_ptr); return iterator(it.my_node_ptr, it.my_list_ptr);
} }
// Returns a public iterator version of a first non-dummy internal iter ator at or after // Returns a public iterator version of a first non-dummy internal iter ator at or after
// the passed in internal iterator. // the passed in internal iterator.
iterator first_real_iterator(raw_iterator it) iterator first_real_iterator(raw_iterator it)
{ {
// Skip all dummy, internal only iterators // Skip all dummy, internal only iterators
while (it != raw_end() && it.get_node_ptr()->is_dummy()) while (it != raw_end() && it.get_node_ptr()->is_dummy())
it++; ++it;
return iterator(it.get_node_ptr(), this); return iterator(it.get_node_ptr(), this);
} }
// Returns a public iterator version of a first non-dummy internal iter ator at or after // Returns a public iterator version of a first non-dummy internal iter ator at or after
// the passed in internal iterator. // the passed in internal iterator.
const_iterator first_real_iterator(raw_const_iterator it) const const_iterator first_real_iterator(raw_const_iterator it) const
{ {
// Skip all dummy, internal only iterators // Skip all dummy, internal only iterators
while (it != raw_end() && it.get_node_ptr()->is_dummy()) while (it != raw_end() && it.get_node_ptr()->is_dummy())
it++; ++it;
return const_iterator(it.get_node_ptr(), this); return const_iterator(it.get_node_ptr(), this);
} }
// Erase an element using the allocator // Erase an element using the allocator
void destroy_node(nodeptr_t pnode) { void destroy_node(nodeptr_t pnode) {
my_node_allocator.destroy(pnode); my_node_allocator.destroy(pnode);
my_node_allocator.deallocate(pnode, 1); my_node_allocator.deallocate(pnode, 1);
} }
skipping to change at line 500 skipping to change at line 503
} }
// Insert a new dummy element, starting search at a parent dummy elemen t // Insert a new dummy element, starting search at a parent dummy elemen t
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
{ {
raw_iterator last = raw_end(); raw_iterator last = raw_end();
raw_iterator where = it; raw_iterator where = it;
__TBB_ASSERT(where != last, "Invalid head node"); __TBB_ASSERT(where != last, "Invalid head node");
where++; ++where;
// Create a dummy element up front, even though it may be discarded (due to concurrent insertion) // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
nodeptr_t dummy_node = create_node(order_key); nodeptr_t dummy_node = create_node(order_key);
for (;;) for (;;)
{ {
__TBB_ASSERT(it != last, "Invalid head list node"); __TBB_ASSERT(it != last, "Invalid head list node");
// If the head iterator is at the end of the list, or past the point where this dummy // If the head iterator is at the end of the list, or past the point where this dummy
// node needs to be inserted, then try to insert it. // node needs to be inserted, then try to insert it.
skipping to change at line 532 skipping to change at line 535
return raw_iterator(dummy_node); return raw_iterator(dummy_node);
} }
else else
{ {
// Insertion failed: either dummy node was inserted by another thread, or // Insertion failed: either dummy node was inserted by another thread, or
// a real element was inserted at exactly the same plac e as dummy node. // a real element was inserted at exactly the same plac e as dummy node.
// Proceed with the search from the previous location w here order key was // Proceed with the search from the previous location w here order key was
// known to be larger (note: this is legal only because there is no safe // known to be larger (note: this is legal only because there is no safe
// concurrent erase operation supported). // concurrent erase operation supported).
where = it; where = it;
where++; ++where;
continue; continue;
} }
} }
else if (get_order_key(where) == order_key) else if (get_order_key(where) == order_key)
{ {
// Another dummy node with the same value found, discard th e new one. // Another dummy node with the same value found, discard th e new one.
destroy_node(dummy_node); destroy_node(dummy_node);
return where; return where;
} }
// Move the iterator forward // Move the iterator forward
it = where; it = where;
where++; ++where;
} }
} }
// This erase function can handle both real and dummy nodes // This erase function can handle both real and dummy nodes
void erase_node(raw_iterator previous, raw_const_iterator& where) void erase_node(raw_iterator previous, raw_const_iterator& where)
{ {
nodeptr_t pnode = (where++).get_node_ptr(); nodeptr_t pnode = (where++).get_node_ptr();
nodeptr_t prevnode = previous.get_node_ptr(); nodeptr_t prevnode = previous.get_node_ptr();
__TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecuti ve iterators"); __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecuti ve iterators");
prevnode->my_next = pnode->my_next; prevnode->my_next = pnode->my_next;
destroy_node(pnode); destroy_node(pnode);
} }
// Erase the element (previous node needs to be passed because this is a forward only list) // Erase the element (previous node needs to be passed because this is a forward only list)
iterator erase_node(raw_iterator previous, const_iterator where) iterator erase_node(raw_iterator previous, const_iterator where)
{ {
skipping to change at line 603 skipping to change at line 606
} }
check_range(); check_range();
} }
private: private:
// Check the list for order violations // Check the list for order violations
void check_range() void check_range()
{ {
#if TBB_USE_ASSERT #if TBB_USE_ASSERT
for (raw_iterator it = raw_begin(); it != raw_end(); it++) for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
{ {
raw_iterator next_iterator = it; raw_iterator next_iterator = it;
next_iterator++; ++next_iterator;
__TBB_ASSERT(next_iterator == end() || next_iterator.get_node_p tr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List orde r inconsistency !!!"); __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_p tr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List orde r inconsistency !!!");
} }
#endif #endif
} }
typename allocator_type::template rebind<node>::other my_node_allocator ; // allocator object for nodes typename allocator_type::template rebind<node>::other my_node_allocator ; // allocator object for nodes
size_type my_element_count; // Total item count, not counting dummy nodes size_type my_element_count; // Total item count, not counting dummy nodes
nodeptr_t my_head; // pointer to head node nodeptr_t my_head; // pointer to head node
}; };
skipping to change at line 859 skipping to change at line 862
return internal_insert(value); return internal_insert(value);
} }
iterator insert(const_iterator, const value_type& value) { iterator insert(const_iterator, const value_type& value) {
// Ignore hint // Ignore hint
return insert(value).first; return insert(value).first;
} }
template<class Iterator> template<class Iterator>
void insert(Iterator first, Iterator last) { void insert(Iterator first, Iterator last) {
for (Iterator it = first; it != last; it++) for (Iterator it = first; it != last; ++it)
insert(*it); insert(*it);
} }
iterator unsafe_erase(const_iterator where) { iterator unsafe_erase(const_iterator where) {
return internal_erase(where); return internal_erase(where);
} }
iterator unsafe_erase(const_iterator first, const_iterator last) { iterator unsafe_erase(const_iterator first, const_iterator last) {
while (first != last) while (first != last)
unsafe_erase(first++); unsafe_erase(first++);
skipping to change at line 935 skipping to change at line 938
} }
size_type unsafe_max_bucket_count() const { size_type unsafe_max_bucket_count() const {
return segment_size(pointers_per_table-1); return segment_size(pointers_per_table-1);
} }
size_type unsafe_bucket_size(size_type bucket) { size_type unsafe_bucket_size(size_type bucket) {
size_type item_count = 0; size_type item_count = 0;
if (is_initialized(bucket)) { if (is_initialized(bucket)) {
raw_iterator it = get_bucket(bucket); raw_iterator it = get_bucket(bucket);
it++; ++it;
for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dumm for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dumm
y(); it++) y(); ++it)
item_count++; ++item_count;
} }
return item_count; return item_count;
} }
size_type unsafe_bucket(const key_type& key) const { size_type unsafe_bucket(const key_type& key) const {
sokey_t order_key = (sokey_t) my_hash_compare(key); sokey_t order_key = (sokey_t) my_hash_compare(key);
size_type bucket = order_key % my_number_of_buckets; size_type bucket = order_key % my_number_of_buckets;
return bucket; return bucket;
} }
skipping to change at line 977 skipping to change at line 980
// @REVIEW: Takes O(n) // @REVIEW: Takes O(n)
// Returns the iterator after the last non-dummy element in the bucket // Returns the iterator after the last non-dummy element in the bucket
local_iterator unsafe_end(size_type bucket) local_iterator unsafe_end(size_type bucket)
{ {
if (!is_initialized(bucket)) if (!is_initialized(bucket))
return end(); return end();
raw_iterator it = get_bucket(bucket); raw_iterator it = get_bucket(bucket);
// Find the end of the bucket, denoted by the dummy element // Find the end of the bucket, denoted by the dummy element
do it++; do ++it;
while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
// Return the first real element past the end of the bucket // Return the first real element past the end of the bucket
return my_solist.first_real_iterator(it); return my_solist.first_real_iterator(it);
} }
// @REVIEW: Takes O(n) // @REVIEW: Takes O(n)
// Returns the iterator after the last non-dummy element in the bucket // Returns the iterator after the last non-dummy element in the bucket
const_local_iterator unsafe_end(size_type bucket) const const_local_iterator unsafe_end(size_type bucket) const
{ {
if (!is_initialized(bucket)) if (!is_initialized(bucket))
return end(); return end();
raw_const_iterator it = get_bucket(bucket); raw_const_iterator it = get_bucket(bucket);
// Find the end of the bucket, denoted by the dummy element // Find the end of the bucket, denoted by the dummy element
do it++; do ++it;
while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
// Return the first real element past the end of the bucket // Return the first real element past the end of the bucket
return my_solist.first_real_iterator(it); return my_solist.first_real_iterator(it);
} }
const_local_iterator unsafe_cbegin(size_type bucket) const { const_local_iterator unsafe_cbegin(size_type bucket) const {
return ((const self_type *) this)->begin(); return ((const self_type *) this)->begin();
} }
skipping to change at line 1050 skipping to change at line 1053
void internal_init() { void internal_init() {
// Allocate an array of segment pointers // Allocate an array of segment pointers
memset(my_buckets, 0, pointers_per_table * sizeof(void *)); memset(my_buckets, 0, pointers_per_table * sizeof(void *));
// Insert the first element in the split-ordered list // Insert the first element in the split-ordered list
raw_iterator dummy_node = my_solist.raw_begin(); raw_iterator dummy_node = my_solist.raw_begin();
set_bucket(0, dummy_node); set_bucket(0, dummy_node);
} }
void internal_clear() { void internal_clear() {
for (size_type index = 0; index < pointers_per_table; index++) { for (size_type index = 0; index < pointers_per_table; ++index) {
if (my_buckets[index] != NULL) { if (my_buckets[index] != NULL) {
size_type sz = segment_size(index); size_type sz = segment_size(index);
for (size_type index2 = 0; index2 < sz; index2++) for (size_type index2 = 0; index2 < sz; ++index2)
my_allocator.destroy(&my_buckets[index][index2]); my_allocator.destroy(&my_buckets[index][index2]);
my_allocator.deallocate(my_buckets[index], sz); my_allocator.deallocate(my_buckets[index], sz);
my_buckets[index] = 0; my_buckets[index] = 0;
} }
} }
} }
void internal_copy(const self_type& right) { void internal_copy(const self_type& right) {
clear(); clear();
skipping to change at line 1079 skipping to change at line 1082
my_hash_compare = right.my_hash_compare; my_hash_compare = right.my_hash_compare;
} __TBB_CATCH(...) { } __TBB_CATCH(...) {
my_solist.clear(); my_solist.clear();
__TBB_RETHROW(); __TBB_RETHROW();
} }
} }
void internal_swap_buckets(concurrent_unordered_base& right) void internal_swap_buckets(concurrent_unordered_base& right)
{ {
// Swap all node segments // Swap all node segments
for (size_type index = 0; index < pointers_per_table; index++) for (size_type index = 0; index < pointers_per_table; ++index)
{ {
raw_iterator * iterator_pointer = my_buckets[index]; raw_iterator * iterator_pointer = my_buckets[index];
my_buckets[index] = right.my_buckets[index]; my_buckets[index] = right.my_buckets[index];
right.my_buckets[index] = iterator_pointer; right.my_buckets[index] = iterator_pointer;
} }
} }
// Hash APIs // Hash APIs
size_type internal_distance(const_iterator first, const_iterator last) const size_type internal_distance(const_iterator first, const_iterator last) const
{ {
size_type num = 0; size_type num = 0;
for (const_iterator it = first; it != last; it++) for (const_iterator it = first; it != last; ++it)
num++; ++num;
return num; return num;
} }
// Insert an element in the hash given its value // Insert an element in the hash given its value
std::pair<iterator, bool> internal_insert(const value_type& value) std::pair<iterator, bool> internal_insert(const value_type& value)
{ {
sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
size_type bucket = order_key % my_number_of_buckets; size_type bucket = order_key % my_number_of_buckets;
skipping to change at line 1117 skipping to change at line 1120
size_type new_count; size_type new_count;
order_key = split_order_key_regular(order_key); order_key = split_order_key_regular(order_key);
raw_iterator it = get_bucket(bucket); raw_iterator it = get_bucket(bucket);
raw_iterator last = my_solist.raw_end(); raw_iterator last = my_solist.raw_end();
raw_iterator where = it; raw_iterator where = it;
__TBB_ASSERT(where != last, "Invalid head node"); __TBB_ASSERT(where != last, "Invalid head node");
// First node is a dummy node // First node is a dummy node
where++; ++where;
for (;;) for (;;)
{ {
if (where == last || solist_t::get_order_key(where) > order_key ) if (where == last || solist_t::get_order_key(where) > order_key )
{ {
// Try to insert it in the right place // Try to insert it in the right place
std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count); std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
if (result.second) if (result.second)
{ {
skipping to change at line 1140 skipping to change at line 1143
return result; return result;
} }
else else
{ {
// Insertion failed: either the same node was inserted by another thread, or // Insertion failed: either the same node was inserted by another thread, or
// another element was inserted at exactly the same pla ce as this node. // another element was inserted at exactly the same pla ce as this node.
// Proceed with the search from the previous location w here order key was // Proceed with the search from the previous location w here order key was
// known to be larger (note: this is legal only because there is no safe // known to be larger (note: this is legal only because there is no safe
// concurrent erase operation supported). // concurrent erase operation supported).
where = it; where = it;
where++; ++where;
continue; continue;
} }
} }
else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0) else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
{ {
// Element already in the list, return it // Element already in the list, return it
return std::pair<iterator, bool>(my_solist.get_iterator(whe re), false); return std::pair<iterator, bool>(my_solist.get_iterator(whe re), false);
} }
// Move the iterator forward // Move the iterator forward
it = where; it = where;
where++; ++where;
} }
} }
// Find the element in the split-ordered list // Find the element in the split-ordered list
iterator internal_find(const key_type& key) iterator internal_find(const key_type& key)
{ {
sokey_t order_key = (sokey_t) my_hash_compare(key); sokey_t order_key = (sokey_t) my_hash_compare(key);
size_type bucket = order_key % my_number_of_buckets; size_type bucket = order_key % my_number_of_buckets;
// If bucket is empty, initialize it first // If bucket is empty, initialize it first
if (!is_initialized(bucket)) if (!is_initialized(bucket))
init_bucket(bucket); init_bucket(bucket);
order_key = split_order_key_regular(order_key); order_key = split_order_key_regular(order_key);
raw_iterator last = my_solist.raw_end(); raw_iterator last = my_solist.raw_end();
for (raw_iterator it = get_bucket(bucket); it != last; it++) for (raw_iterator it = get_bucket(bucket); it != last; ++it)
{ {
if (solist_t::get_order_key(it) > order_key) if (solist_t::get_order_key(it) > order_key)
{ {
// If the order key is smaller than the current order key, the element // If the order key is smaller than the current order key, the element
// is not in the hash. // is not in the hash.
return end(); return end();
} }
else if (solist_t::get_order_key(it) == order_key) else if (solist_t::get_order_key(it) == order_key)
{ {
// The fact that order keys match does not mean that the el ement is found. // The fact that order keys match does not mean that the el ement is found.
skipping to change at line 1210 skipping to change at line 1213
order_key = split_order_key_regular(order_key); order_key = split_order_key_regular(order_key);
raw_iterator previous = get_bucket(bucket); raw_iterator previous = get_bucket(bucket);
raw_iterator last = my_solist.raw_end(); raw_iterator last = my_solist.raw_end();
raw_iterator where = previous; raw_iterator where = previous;
__TBB_ASSERT(where != last, "Invalid head node"); __TBB_ASSERT(where != last, "Invalid head node");
// First node is a dummy node // First node is a dummy node
where++; ++where;
for (;;) { for (;;) {
if (where == last) if (where == last)
return end(); return end();
else if (my_solist.get_iterator(where) == it) else if (my_solist.get_iterator(where) == it)
return my_solist.erase_node(previous, it); return my_solist.erase_node(previous, it);
// Move the iterator forward // Move the iterator forward
previous = where; previous = where;
where++; ++where;
} }
} }
// Return the [begin, end) pair of iterators with the same key values. // Return the [begin, end) pair of iterators with the same key values.
// This operation makes sense only if mapping is many-to-one. // This operation makes sense only if mapping is many-to-one.
pairii_t internal_equal_range(const key_type& key) pairii_t internal_equal_range(const key_type& key)
{ {
sokey_t order_key = (sokey_t) my_hash_compare(key); sokey_t order_key = (sokey_t) my_hash_compare(key);
size_type bucket = order_key % my_number_of_buckets; size_type bucket = order_key % my_number_of_buckets;
// If bucket is empty, initialize it first // If bucket is empty, initialize it first
if (!is_initialized(bucket)) if (!is_initialized(bucket))
init_bucket(bucket); init_bucket(bucket);
order_key = split_order_key_regular(order_key); order_key = split_order_key_regular(order_key);
raw_iterator end_it = my_solist.raw_end(); raw_iterator end_it = my_solist.raw_end();
for (raw_iterator it = get_bucket(bucket); it != end_it; it++) for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
{ {
if (solist_t::get_order_key(it) > order_key) if (solist_t::get_order_key(it) > order_key)
{ {
// There is no element with the given key // There is no element with the given key
return pairii_t(end(), end()); return pairii_t(end(), end());
} }
else if (solist_t::get_order_key(it) == order_key && !my_hash_c ompare(get_key(*it), key)) else if (solist_t::get_order_key(it) == order_key && !my_hash_c ompare(get_key(*it), key))
{ {
iterator first = my_solist.get_iterator(it); iterator first = my_solist.get_iterator(it);
iterator last = first; iterator last = first;
for (;last != end() && !my_hash_compare(get_key(*last), key while( last != end() && !my_hash_compare(get_key(*last), ke
); last++); y) )
++last;
return pairii_t(first, last); return pairii_t(first, last);
} }
} }
return pairii_t(end(), end()); return pairii_t(end(), end());
} }
// Return the [begin, end) pair of const iterators with the same key va lues. // Return the [begin, end) pair of const iterators with the same key va lues.
// This operation makes sense only if mapping is many-to-one. // This operation makes sense only if mapping is many-to-one.
paircc_t internal_equal_range(const key_type& key) const paircc_t internal_equal_range(const key_type& key) const
skipping to change at line 1273 skipping to change at line 1276
sokey_t order_key = (sokey_t) my_hash_compare(key); sokey_t order_key = (sokey_t) my_hash_compare(key);
size_type bucket = order_key % my_number_of_buckets; size_type bucket = order_key % my_number_of_buckets;
// If bucket is empty, initialize it first // If bucket is empty, initialize it first
if (!is_initialized(bucket)) if (!is_initialized(bucket))
return paircc_t(end(), end()); return paircc_t(end(), end());
order_key = split_order_key_regular(order_key); order_key = split_order_key_regular(order_key);
raw_const_iterator end_it = my_solist.raw_end(); raw_const_iterator end_it = my_solist.raw_end();
for (raw_const_iterator it = get_bucket(bucket); it != end_it; it++ ) for (raw_const_iterator it = get_bucket(bucket); it != end_it; ++it )
{ {
if (solist_t::get_order_key(it) > order_key) if (solist_t::get_order_key(it) > order_key)
{ {
// There is no element with the given key // There is no element with the given key
return paircc_t(end(), end()); return paircc_t(end(), end());
} }
else if (solist_t::get_order_key(it) == order_key && !my_hash_c ompare(get_key(*it), key)) else if (solist_t::get_order_key(it) == order_key && !my_hash_c ompare(get_key(*it), key))
{ {
const_iterator first = my_solist.get_iterator(it); const_iterator first = my_solist.get_iterator(it);
const_iterator last = first; const_iterator last = first;
for (; last != end() && !my_hash_compare(get_key(*last), ke while( last != end() && !my_hash_compare(get_key(*last), ke
y); last++); y ) )
++last;
return paircc_t(first, last); return paircc_t(first, last);
} }
} }
return paircc_t(end(), end()); return paircc_t(end(), end());
} }
// Bucket APIs // Bucket APIs
void init_bucket(size_type bucket) void init_bucket(size_type bucket)
{ {
skipping to change at line 1423 skipping to change at line 1426
return static_cast<size_t>( t ) * internal::hash_multiplier; return static_cast<size_t>( t ) * internal::hash_multiplier;
} }
template<typename P> template<typename P>
inline size_t tbb_hasher( P* ptr ) { inline size_t tbb_hasher( P* ptr ) {
size_t const h = reinterpret_cast<size_t>( ptr ); size_t const h = reinterpret_cast<size_t>( ptr );
return (h >> 3) ^ h; return (h >> 3) ^ h;
} }
template<typename E, typename S, typename A> template<typename E, typename S, typename A>
inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) { inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
size_t h = 0; size_t h = 0;
for( const E* c = s.c_str(); *c; c++ ) for( const E* c = s.c_str(); *c; ++c )
h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier); h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
return h; return h;
} }
template<typename F, typename S> template<typename F, typename S>
inline size_t tbb_hasher( const std::pair<F,S>& p ) { inline size_t tbb_hasher( const std::pair<F,S>& p ) {
return tbb_hasher(p.first) ^ tbb_hasher(p.second); return tbb_hasher(p.first) ^ tbb_hasher(p.second);
} }
} // namespace interface5 } // namespace interface5
using interface5::tbb_hasher; using interface5::tbb_hasher;
} // namespace tbb } // namespace tbb
 End of changes. 28 change blocks. 
35 lines changed or deleted 39 lines changed or added


 combinable.h   combinable.h 
skipping to change at line 32 skipping to change at line 32
this file and link it with other files to produce an executable, this this file and link it with other files to produce an executable, this
file does not by itself cause the resulting executable to be covered by file does not by itself cause the resulting executable to be covered by
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
#ifndef __TBB_combinable_H #ifndef __TBB_combinable_H
#define __TBB_combinable_H #define __TBB_combinable_H
#include "tbb/enumerable_thread_specific.h" #include "enumerable_thread_specific.h"
#include "tbb/cache_aligned_allocator.h" #include "cache_aligned_allocator.h"
namespace tbb { namespace tbb {
/** \name combinable /** \name combinable
**/ **/
//@{ //@{
//! Thread-local storage with optional reduction //! Thread-local storage with optional reduction
/** @ingroup containers */ /** @ingroup containers */
template <typename T> template <typename T>
class combinable { class combinable {
private: private:
 End of changes. 1 change blocks. 
2 lines changed or deleted 2 lines changed or added


 concurrent_hash_map.h   concurrent_hash_map.h 
skipping to change at line 278 skipping to change at line 278
return check_rehashing_collision( h, m_old, m = m_now ); return check_rehashing_collision( h, m_old, m = m_now );
return false; return false;
} }
//! Process mask race, check for rehashing collision //! Process mask race, check for rehashing collision
bool check_rehashing_collision( const hashcode_t h, hashcode_t m_ol d, hashcode_t m ) const { bool check_rehashing_collision( const hashcode_t h, hashcode_t m_ol d, hashcode_t m ) const {
__TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimi zed out by passing h = h&m __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimi zed out by passing h = h&m
if( (h & m_old) != (h & m) ) { // mask changed for this hashcod e, rare event if( (h & m_old) != (h & m) ) { // mask changed for this hashcod e, rare event
// condition above proves that 'h' has some other bits set beside 'm_old' // condition above proves that 'h' has some other bits set beside 'm_old'
// find next applicable mask after m_old //TODO: look at bsl instruction // find next applicable mask after m_old //TODO: look at bsl instruction
for( ++m_old; !(h & m_old); m_old <<= 1 ); // at maximum fe for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few
w rounds depending on the first block size rounds depending on the first block size
;
m_old = (m_old<<1) - 1; // get full mask from a bit m_old = (m_old<<1) - 1; // get full mask from a bit
__TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL); __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
// check whether it is rehashing/ed // check whether it is rehashing/ed
#if TBB_USE_THREADING_TOOLS #if TBB_USE_THREADING_TOOLS
if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_ol d)->node_list )) != rehash_req ) if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_ol d)->node_list )) != rehash_req )
#else #else
if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_l ist) != rehash_req ) if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_l ist) != rehash_req )
#endif #endif
return true; return true;
} }
skipping to change at line 760 skipping to change at line 761
} }
//! Return pointer to associated value in hash table. //! Return pointer to associated value in hash table.
pointer operator->() const { pointer operator->() const {
return &operator*(); return &operator*();
} }
}; };
//! Construct empty table. //! Construct empty table.
concurrent_hash_map(const allocator_type &a = allocator_type()) concurrent_hash_map(const allocator_type &a = allocator_type())
: my_allocator(a) : internal::hash_map_base(), my_allocator(a)
{} {}
//! Construct empty table with n preallocated buckets. This number serv es also as initial concurrency level. //! Construct empty table with n preallocated buckets. This number serv es also as initial concurrency level.
concurrent_hash_map(size_type n, const allocator_type &a = allocator_ty pe()) concurrent_hash_map(size_type n, const allocator_type &a = allocator_ty pe())
: my_allocator(a) : my_allocator(a)
{ {
reserve( n ); reserve( n );
} }
//! Copy constructor //! Copy constructor
concurrent_hash_map( const concurrent_hash_map& table, const allocator_ type &a = allocator_type()) concurrent_hash_map( const concurrent_hash_map& table, const allocator_ type &a = allocator_type())
: my_allocator(a) : internal::hash_map_base(), my_allocator(a)
{ {
internal_copy(table); internal_copy(table);
} }
//! Construction with copying iteration range and given allocator insta nce //! Construction with copying iteration range and given allocator insta nce
template<typename I> template<typename I>
concurrent_hash_map(I first, I last, const allocator_type &a = allocato r_type()) concurrent_hash_map(I first, I last, const allocator_type &a = allocato r_type())
: my_allocator(a) : my_allocator(a)
{ {
reserve( std::distance(first, last) ); // TODO: load_factor? reserve( std::distance(first, last) ); // TODO: load_factor?
 End of changes. 3 change blocks. 
4 lines changed or deleted 5 lines changed or added


 concurrent_unordered_map.h   concurrent_unordered_map.h 
skipping to change at line 29 skipping to change at line 29
As a special exception, you may use this file as part of a free softwar e As a special exception, you may use this file as part of a free softwar e
library without restriction. Specifically, if other files instantiate library without restriction. Specifically, if other files instantiate
templates or use macros or inline functions from this file, or you comp ile templates or use macros or inline functions from this file, or you comp ile
this file and link it with other files to produce an executable, this this file and link it with other files to produce an executable, this
file does not by itself cause the resulting executable to be covered by file does not by itself cause the resulting executable to be covered by
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
/* Container implementations in this header are based on PPL implementation
s
provided by Microsoft. */
#ifndef __TBB_concurrent_unordered_map_H #ifndef __TBB_concurrent_unordered_map_H
#define __TBB_concurrent_unordered_map_H #define __TBB_concurrent_unordered_map_H
#include "_concurrent_unordered_internal.h" #include "_concurrent_unordered_internal.h"
namespace tbb namespace tbb
{ {
// Template class for hash compare // Template class for hash compare
template<typename Key> template<typename Key>
 End of changes. 1 change blocks. 
0 lines changed or deleted 4 lines changed or added


 concurrent_vector.h   concurrent_vector.h 
skipping to change at line 508 skipping to change at line 508
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
typedef generic_range_type<iterator> range_type; typedef generic_range_type<iterator> range_type;
typedef generic_range_type<const_iterator> const_range_type; typedef generic_range_type<const_iterator> const_range_type;
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
// STL compatible constructors & destructors // STL compatible constructors & destructors
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
//! Construct empty vector. //! Construct empty vector.
explicit concurrent_vector(const allocator_type &a = allocator_type()) explicit concurrent_vector(const allocator_type &a = allocator_type())
: internal::allocator_base<T, A>(a) : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba se()
{ {
vector_allocator_ptr = &internal_allocator; vector_allocator_ptr = &internal_allocator;
} }
//! Copying constructor //! Copying constructor
concurrent_vector( const concurrent_vector& vector, const allocator_typ e& a = allocator_type() ) concurrent_vector( const concurrent_vector& vector, const allocator_typ e& a = allocator_type() )
: internal::allocator_base<T, A>(a) : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba se()
{ {
vector_allocator_ptr = &internal_allocator; vector_allocator_ptr = &internal_allocator;
__TBB_TRY { __TBB_TRY {
internal_copy(vector, sizeof(T), &copy_array); internal_copy(vector, sizeof(T), &copy_array);
} __TBB_CATCH(...) { } __TBB_CATCH(...) {
segment_t *table = my_segment; segment_t *table = my_segment;
internal_free_segments( reinterpret_cast<void**>(table), intern al_clear(&destroy_array), my_first_block ); internal_free_segments( reinterpret_cast<void**>(table), intern al_clear(&destroy_array), my_first_block );
__TBB_RETHROW(); __TBB_RETHROW();
} }
} }
//! Copying constructor for vector with different allocator type //! Copying constructor for vector with different allocator type
template<class M> template<class M>
concurrent_vector( const concurrent_vector<T, M>& vector, const allocat or_type& a = allocator_type() ) concurrent_vector( const concurrent_vector<T, M>& vector, const allocat or_type& a = allocator_type() )
: internal::allocator_base<T, A>(a) : internal::allocator_base<T, A>(a), internal::concurrent_vector_ba se()
{ {
vector_allocator_ptr = &internal_allocator; vector_allocator_ptr = &internal_allocator;
__TBB_TRY { __TBB_TRY {
internal_copy(vector.internal_vector_base(), sizeof(T), &copy_a rray); internal_copy(vector.internal_vector_base(), sizeof(T), &copy_a rray);
} __TBB_CATCH(...) { } __TBB_CATCH(...) {
segment_t *table = my_segment; segment_t *table = my_segment;
internal_free_segments( reinterpret_cast<void**>(table), intern al_clear(&destroy_array), my_first_block ); internal_free_segments( reinterpret_cast<void**>(table), intern al_clear(&destroy_array), my_first_block );
__TBB_RETHROW(); __TBB_RETHROW();
} }
} }
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 enumerable_thread_specific.h   enumerable_thread_specific.h 
skipping to change at line 34 skipping to change at line 34
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
#ifndef __TBB_enumerable_thread_specific_H #ifndef __TBB_enumerable_thread_specific_H
#define __TBB_enumerable_thread_specific_H #define __TBB_enumerable_thread_specific_H
#include "concurrent_vector.h" #include "concurrent_vector.h"
#include "tbb_thread.h" #include "tbb_thread.h"
#include "concurrent_hash_map.h"
#include "cache_aligned_allocator.h" #include "cache_aligned_allocator.h"
#if __SUNPRO_CC #if __SUNPRO_CC
#include <string.h> // for memcpy #include <string.h> // for memcpy
#endif #endif
#if _WIN32||_WIN64 #if _WIN32||_WIN64
#include <windows.h> #include <windows.h>
#else #else
#include <pthread.h> #include <pthread.h>
#endif #endif
namespace tbb { namespace tbb {
//! enum for selecting between single key and key-per-instance versions //! enum for selecting between single key and key-per-instance versions
enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
namespace interface5 {
//! @cond //! @cond
namespace internal { namespace internal {
template<ets_key_usage_type ETS_key_type>
class ets_base: tbb::internal::no_copy {
protected:
#if _WIN32||_WIN64
typedef DWORD key_type;
#else
typedef pthread_t key_type;
#endif
#if __TBB_GCC_3_3_PROTECTED_BROKEN
public:
#endif
struct slot;
struct array {
array* next;
size_t lg_size;
slot& at( size_t k ) {
return ((slot*)(void*)(this+1))[k];
}
size_t size() const {return (size_t)1<<lg_size;}
size_t mask() const {return size()-1;}
size_t start( size_t h ) const {
return h>>(8*sizeof(size_t)-lg_size);
}
};
struct slot {
key_type key;
void* ptr;
bool empty() const {return !key;}
bool match( key_type k ) const {return key==k;}
bool claim( key_type k ) {
__TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_
type), NULL);
__TBB_ASSERT(sizeof(void*)==sizeof(tbb::atomic<key_type
>*), NULL);
union { void* space; tbb::atomic<key_type>* key_atomic;
} helper;
helper.space = &key;
return helper.key_atomic->compare_and_swap(k,0)==0;
}
};
#if __TBB_GCC_3_3_PROTECTED_BROKEN
protected:
#endif
static key_type key_of_current_thread() {
tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
key_type k;
memcpy( &k, &id, sizeof(k) );
return k;
}
//! Root of linked list of arrays of decreasing size.
/** NULL if and only if my_count==0.
Each array in the list is half the size of its predecessor.
*/
atomic<array*> my_root;
atomic<size_t> my_count;
virtual void* create_local() = 0;
virtual void* create_array(size_t _size) = 0; // _size in byte
s
virtual void free_array(void* ptr, size_t _size) = 0; // _size
in bytes
array* allocate( size_t lg_size ) {
size_t n = 1<<lg_size;
array* a = static_cast<array*>(create_array( sizeof(array)+
n*sizeof(slot) ));
a->lg_size = lg_size;
std::memset( a+1, 0, n*sizeof(slot) );
return a;
}
void free(array* a) {
size_t n = 1<<(a->lg_size);
free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot))
);
}
static size_t hash( key_type k ) {
// Multiplicative hashing. Client should use *upper* bits.
// casts required for Mac gcc4.* compiler
#if __TBB_WORDSIZE == 4
return uintptr_t(k)*0x9E3779B9;
#else
return uintptr_t(k)*0x9E3779B97F4A7C15;
#endif
}
ets_base() {my_root=NULL; my_count=0;}
virtual ~ets_base(); // g++ complains if this is not virtual..
.
void* table_lookup( bool& exists );
void table_clear();
slot& table_find( key_type k ) {
size_t h = hash(k);
array* r = my_root;
size_t mask = r->mask();
for(size_t i = r->start(h);;i=(i+1)&mask) {
slot& s = r->at(i);
if( s.empty() || s.match(k) )
return s;
}
}
void table_reserve_for_copy( const ets_base& other ) {
__TBB_ASSERT(!my_root,NULL);
__TBB_ASSERT(!my_count,NULL);
if( other.my_root ) {
array* a = allocate(other.my_root->lg_size);
a->next = NULL;
my_root = a;
my_count = other.my_count;
}
}
};
template<ets_key_usage_type ETS_key_type>
ets_base<ETS_key_type>::~ets_base() {
__TBB_ASSERT(!my_root, NULL);
}
template<ets_key_usage_type ETS_key_type>
void ets_base<ETS_key_type>::table_clear() {
while( array* r = my_root ) {
my_root = r->next;
free(r);
}
my_count = 0;
}
template<ets_key_usage_type ETS_key_type>
void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
const key_type k = key_of_current_thread();
__TBB_ASSERT(k!=0,NULL);
void* found;
size_t h = hash(k);
for( array* r=my_root; r; r=r->next ) {
size_t mask=r->mask();
for(size_t i = r->start(h); ;i=(i+1)&mask) {
slot& s = r->at(i);
if( s.empty() ) break;
if( s.match(k) ) {
if( r==my_root ) {
// Success at top level
exists = true;
return s.ptr;
} else {
// Success at some other level. Need to insert
at top level.
exists = true;
found = s.ptr;
goto insert;
}
}
}
}
// Key does not yet exist
exists = false;
found = create_local();
{
size_t c = ++my_count;
array* r = my_root;
if( !r || c>r->size()/2 ) {
size_t s = r ? r->lg_size : 2;
while( c>size_t(1)<<(s-1) ) ++s;
array* a = allocate(s);
for(;;) {
a->next = my_root;
array* new_r = my_root.compare_and_swap(a,r);
if( new_r==r ) break;
if( new_r->lg_size>=s ) {
// Another thread inserted an equal or bigger
array, so our array is superfluous.
free(a);
break;
}
r = new_r;
}
}
}
insert:
// Guaranteed to be room for it, and it is not present, so sear
ch for empty slot and grab it.
array* ir = my_root;
size_t mask = ir->mask();
for(size_t i = ir->start(h);;i=(i+1)&mask) {
slot& s = ir->at(i);
if( s.empty() ) {
if( s.claim(k) ) {
s.ptr = found;
return found;
}
}
}
};
//! Specialization that exploits native TLS
template <>
class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key
> {
typedef ets_base<ets_no_key> super;
#if _WIN32||_WIN64
typedef DWORD tls_key_t;
void create_key() { my_key = TlsAlloc(); }
void destroy_key() { TlsFree(my_key); }
void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value)
; }
void* get_tls() { return (void *)TlsGetValue(my_key); }
#else
typedef pthread_key_t tls_key_t;
void create_key() { pthread_key_create(&my_key, NULL); }
void destroy_key() { pthread_key_delete(my_key); }
void set_tls( void * value ) const { pthread_setspecific(my_key
, value); }
void* get_tls() const { return pthread_getspecific(my_key); }
#endif
tls_key_t my_key;
virtual void* create_local() = 0;
virtual void* create_array(size_t _size) = 0; // _size in byte
s
virtual void free_array(void* ptr, size_t _size) = 0; // size i
n bytes
public:
ets_base() {create_key();}
~ets_base() {destroy_key();}
void* table_lookup( bool& exists ) {
void* found = get_tls();
if( found ) {
exists=true;
} else {
found = super::table_lookup(exists);
set_tls(found);
}
return found;
}
void table_clear() {
destroy_key();
create_key();
super::table_clear();
}
};
//! Random access iterator for traversing the thread local copies. //! Random access iterator for traversing the thread local copies.
template< typename Container, typename Value > template< typename Container, typename Value >
class enumerable_thread_specific_iterator class enumerable_thread_specific_iterator
#if defined(_WIN64) && defined(_MSC_VER) #if defined(_WIN64) && defined(_MSC_VER)
// Ensure that Microsoft's internal template function _Val_type works correctly. // Ensure that Microsoft's internal template function _Val_type works correctly.
: public std::iterator<std::random_access_iterator_tag,Value> : public std::iterator<std::random_access_iterator_tag,Value>
#endif /* defined(_WIN64) && defined(_MSC_VER) */ #endif /* defined(_WIN64) && defined(_MSC_VER) */
{ {
//! current position in the concurrent_vector //! current position in the concurrent_vector
skipping to change at line 357 skipping to change at line 581
return i.inner_iter == j.inner_iter; return i.inner_iter == j.inner_iter;
} }
// != // !=
template<typename SegmentedContainer, typename T, typename U> template<typename SegmentedContainer, typename T, typename U>
bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, bool operator!=( const segmented_iterator<SegmentedContainer,T>& i,
const segmented_iterator<SegmentedContainer,U>& j ) { const segmented_iterator<SegmentedContainer,U>& j ) {
return !(i==j); return !(i==j);
} }
// empty template for following specializations
template<ets_key_usage_type et>
struct tls_manager {};
//! Struct that doesn't use a key
template <>
struct tls_manager<ets_no_key> {
typedef size_t tls_key_t;
static inline void create_key( tls_key_t &) { }
static inline void destroy_key( tls_key_t & ) { }
static inline void set_tls( tls_key_t &, void * ) { }
static inline void * get_tls( tls_key_t & ) { return (size_t)0;
}
};
//! Struct to use native TLS support directly
template <>
struct tls_manager <ets_key_per_instance> {
#if _WIN32||_WIN64
typedef DWORD tls_key_t;
static inline void create_key( tls_key_t &k) { k = TlsAlloc();
}
static inline void destroy_key( tls_key_t &k) { TlsFree(k); }
static inline void set_tls( tls_key_t &k, void * value) { TlsSe
tValue(k, (LPVOID)value); }
static inline void * get_tls( tls_key_t &k ) { return (void *)T
lsGetValue(k); }
#else
typedef pthread_key_t tls_key_t;
static inline void create_key( tls_key_t &k) { pthread_key_crea
te(&k, NULL); }
static inline void destroy_key( tls_key_t &k) { pthread_key_del
ete(k); }
static inline void set_tls( tls_key_t &k, void * value) { pthre
ad_setspecific(k, value); }
static inline void * get_tls( tls_key_t &k ) { return pthread_g
etspecific(k); }
#endif
};
class thread_hash_compare {
public:
// using hack suggested by Arch to get value for thread id for
hashing...
#if _WIN32||_WIN64
typedef DWORD thread_key;
#else
typedef pthread_t thread_key;
#endif
static thread_key my_thread_key(const tbb_thread::id j) {
thread_key key_val;
memcpy(&key_val, &j, sizeof(thread_key));
return key_val;
}
bool equal( const thread_key j, const thread_key k) const {
return j == k;
}
unsigned long hash(const thread_key k) const {
return (unsigned long)k;
}
};
// storage for initialization function pointer // storage for initialization function pointer
template<typename T> template<typename T>
struct callback_base { struct callback_base {
virtual T apply( ) = 0; virtual T apply( ) = 0;
virtual void destroy( ) = 0; virtual void destroy( ) = 0;
// need to be able to create copies of callback_base for copy c onstructor // need to be able to create copies of callback_base for copy c onstructor
virtual callback_base* make_copy() = 0; virtual callback_base* make_copy() = 0;
// need virtual destructor to satisfy GCC compiler warning // need virtual destructor to satisfy GCC compiler warning
virtual ~callback_base() { } virtual ~callback_base() { }
}; };
template <typename T, typename Functor> template <typename T, typename Functor>
struct callback_leaf : public callback_base<T> { struct callback_leaf : public callback_base<T>, public tbb::interna l::no_copy {
typedef Functor my_callback_type; typedef Functor my_callback_type;
typedef callback_leaf<T,Functor> my_type; typedef callback_leaf<T,Functor> my_type;
typedef my_type* callback_pointer; typedef my_type* callback_pointer;
typedef typename tbb::tbb_allocator<my_type> my_allocator_type; typedef typename tbb::tbb_allocator<my_type> my_allocator_type;
Functor f; Functor f;
callback_leaf( const Functor& f_) : f(f_) { callback_leaf( const Functor& f_) : f(f_) {
} }
static callback_pointer new_callback(const Functor& f_ ) { static callback_pointer new_callback(const Functor& f_ ) {
void* new_void = my_allocator_type().allocate(1); void* new_void = my_allocator_type().allocate(1);
skipping to change at line 450 skipping to change at line 620
} }
/* override */ void destroy( ) { /* override */ void destroy( ) {
callback_pointer my_ptr = this; callback_pointer my_ptr = this;
my_allocator_type().destroy(my_ptr); my_allocator_type().destroy(my_ptr);
my_allocator_type().deallocate(my_ptr,1); my_allocator_type().deallocate(my_ptr,1);
} }
/* override */ T apply() { return f(); } // does copy construc tion of returned value. /* override */ T apply() { return f(); } // does copy construc tion of returned value.
}; };
template<typename Key, typename T, typename HC, typename A>
class ets_concurrent_hash_map : public tbb::concurrent_hash_map<Key
, T, HC, A> {
public:
typedef tbb::concurrent_hash_map<Key, T, HC, A> base_type;
typedef typename base_type::const_pointer const_pointer;
typedef typename base_type::key_type key_type;
const_pointer find( const key_type &k ) {
return this->internal_fast_find( k );
} // make public
};
//! Template for adding padding in order to avoid false sharing //! Template for adding padding in order to avoid false sharing
/** ModularSize should be sizeof(U) modulo the cache line size. /** ModularSize should be sizeof(U) modulo the cache line size.
All maintenance of the space will be done explicitly on push_ba ck, All maintenance of the space will be done explicitly on push_ba ck,
and all thread local copies must be destroyed before the concur rent and all thread local copies must be destroyed before the concur rent
vector is deleted. vector is deleted.
*/ */
template<typename U, size_t ModularSize> template<typename U, size_t ModularSize>
struct ets_element { struct ets_element {
char value[sizeof(U) + NFS_MaxLineSize-ModularSize]; char value[sizeof(U) + tbb::internal::NFS_MaxLineSize-ModularSi ze];
void unconstruct() { void unconstruct() {
// "reinterpret_cast<U*>(&value)->~U();" causes type-punnin g warning with gcc 4.4, // "reinterpret_cast<U*>(&value)->~U();" causes type-punnin g warning with gcc 4.4,
// "U* u = reinterpret_cast<U*>(&value); u->~U();" causes u nused variable warning with VS2010. // "U* u = reinterpret_cast<U*>(&value); u->~U();" causes u nused variable warning with VS2010.
// Thus another "casting via union" hack. // Thus another "casting via union" hack.
__TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL);
union { void* space; U* val; } helper; union { void* space; U* val; } helper;
helper.space = &value; helper.space = &value;
helper.val->~U(); helper.val->~U();
} }
}; };
//! Partial specialization for case where no padding is needed. //! Partial specialization for case where no padding is needed.
template<typename U> template<typename U>
struct ets_element<U,0> { struct ets_element<U,0> {
char value[sizeof(U)]; char value[sizeof(U)];
void unconstruct() { // Same implementation as in general case void unconstruct() { // Same implementation as in general case
__TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL);
union { void* space; U* val; } helper; union { void* space; U* val; } helper;
helper.space = &value; helper.space = &value;
helper.val->~U(); helper.val->~U();
} }
}; };
} // namespace internal } // namespace internal
//! @endcond //! @endcond
//! The enumerable_thread_specific container //! The enumerable_thread_specific container
skipping to change at line 517 skipping to change at line 678
@par combine and combine_each @par combine and combine_each
- Both methods are defined for enumerable_thread_specific. - Both methods are defined for enumerable_thread_specific.
- combine() requires the the type T have operator=() defined. - combine() requires the the type T have operator=() defined.
- neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.)
- Both are evaluated in serial context (the methods are assumed to be non-benign.) - Both are evaluated in serial context (the methods are assumed to be non-benign.)
@ingroup containers */ @ingroup containers */
template <typename T, template <typename T,
typename Allocator=cache_aligned_allocator<T>, typename Allocator=cache_aligned_allocator<T>,
ets_key_usage_type ETS_key_type=ets_no_key > ets_key_usage_type ETS_key_type=ets_no_key >
class enumerable_thread_specific { class enumerable_thread_specific: internal::ets_base<ETS_key_type> {
template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific; template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
typedef internal::tls_manager< ETS_key_type > my_tls_manager; typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLin
eSize> padded_element;
typedef internal::ets_element<T,sizeof(T)%internal::NFS_MaxLineSize
> padded_element;
//! A generic range, used to create range objects from the iterator s //! A generic range, used to create range objects from the iterator s
template<typename I> template<typename I>
class generic_range_type: public blocked_range<I> { class generic_range_type: public blocked_range<I> {
public: public:
typedef T value_type; typedef T value_type;
typedef T& reference; typedef T& reference;
typedef const T& const_reference; typedef const T& const_reference;
typedef I iterator; typedef I iterator;
typedef ptrdiff_t difference_type; typedef ptrdiff_t difference_type;
generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {}
template<typename U> template<typename U>
generic_range_type( const generic_range_type<U>& r) : blocked_r ange<I>(r.begin(),r.end(),r.grainsize()) {} generic_range_type( const generic_range_type<U>& r) : blocked_r ange<I>(r.begin(),r.end(),r.grainsize()) {}
generic_range_type( generic_range_type& r, split ) : blocked_ra nge<I>(r,split()) {} generic_range_type( generic_range_type& r, split ) : blocked_ra nge<I>(r,split()) {}
}; };
typedef typename Allocator::template rebind< padded_element >::othe r padded_allocator_type; typedef typename Allocator::template rebind< padded_element >::othe r padded_allocator_type;
typedef tbb::concurrent_vector< padded_element, padded_allocator_ty pe > internal_collection_type; typedef tbb::concurrent_vector< padded_element, padded_allocator_ty pe > internal_collection_type;
typedef typename internal_collection_type::size_type hash_table_ind
ex_type; // storing array indices rather than iterators to simplify
// copying the hash table that correlates thread IDs with concurren
t vector elements.
typedef typename Allocator::template rebind< std::pair< typename in
ternal::thread_hash_compare::thread_key, hash_table_index_type > >::other h
ash_element_allocator;
typedef internal::ets_concurrent_hash_map< typename internal::threa
d_hash_compare::thread_key, hash_table_index_type, internal::thread_hash_co
mpare, hash_element_allocator > thread_to_index_type;
typename my_tls_manager::tls_key_t my_key;
void reset_key() {
my_tls_manager::destroy_key(my_key);
my_tls_manager::create_key(my_key);
}
internal::callback_base<T> *my_finit_callback; internal::callback_base<T> *my_finit_callback;
// need to use a pointed-to exemplar because T may not be assignabl e. // need to use a pointed-to exemplar because T may not be assignabl e.
// using tbb_allocator instead of padded_element_allocator because we may be // using tbb_allocator instead of padded_element_allocator because we may be
// copying an exemplar from one instantiation of ETS to another wit h a different // copying an exemplar from one instantiation of ETS to another wit h a different
// allocator. // allocator.
typedef typename tbb::tbb_allocator<padded_element > exemplar_alloc ator_type; typedef typename tbb::tbb_allocator<padded_element > exemplar_alloc ator_type;
static padded_element * create_exemplar(const T& my_value) { static padded_element * create_exemplar(const T& my_value) {
padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1)); padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1));
skipping to change at line 583 skipping to change at line 730
static void free_exemplar(padded_element *my_ptr) { static void free_exemplar(padded_element *my_ptr) {
my_ptr->unconstruct(); my_ptr->unconstruct();
exemplar_allocator_type().destroy(my_ptr); exemplar_allocator_type().destroy(my_ptr);
exemplar_allocator_type().deallocate(my_ptr,1); exemplar_allocator_type().deallocate(my_ptr,1);
} }
padded_element* my_exemplar_ptr; padded_element* my_exemplar_ptr;
internal_collection_type my_locals; internal_collection_type my_locals;
thread_to_index_type my_hash_tbl;
/*override*/ void* create_local() {
#if TBB_DEPRECATED
void* lref = &my_locals[my_locals.push_back(padded_element())];
#else
void* lref = &*my_locals.push_back(padded_element());
#endif
if(my_finit_callback) {
new(lref) T(my_finit_callback->apply());
} else if(my_exemplar_ptr) {
pointer t_exemp = reinterpret_cast<T *>(&(my_exemplar_ptr->
value));
new(lref) T(*t_exemp);
} else {
new(lref) T();
}
return lref;
}
void unconstruct_locals() {
for(typename internal_collection_type::iterator cvi = my_locals
.begin(); cvi != my_locals.end(); ++cvi) {
cvi->unconstruct();
}
}
typedef typename Allocator::template rebind< uintptr_t >::other arr
ay_allocator_type;
// _size is in bytes
/*override*/ void* create_array(size_t _size) {
size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uint
ptr_t);
return array_allocator_type().allocate(nelements);
}
/*override*/ void free_array( void* _ptr, size_t _size) {
size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uint
ptr_t);
array_allocator_type().deallocate( reinterpret_cast<uintptr_t *
>(_ptr),nelements);
}
public: public:
//! Basic types //! Basic types
typedef Allocator allocator_type; typedef Allocator allocator_type;
typedef T value_type; typedef T value_type;
typedef T& reference; typedef T& reference;
typedef const T& const_reference; typedef const T& const_reference;
typedef T* pointer; typedef T* pointer;
typedef const T* const_pointer; typedef const T* const_pointer;
skipping to change at line 608 skipping to change at line 790
typedef typename internal::enumerable_thread_specific_iterator< int ernal_collection_type, value_type > iterator; typedef typename internal::enumerable_thread_specific_iterator< int ernal_collection_type, value_type > iterator;
typedef typename internal::enumerable_thread_specific_iterator< int ernal_collection_type, const value_type > const_iterator; typedef typename internal::enumerable_thread_specific_iterator< int ernal_collection_type, const value_type > const_iterator;
// Parallel range types // Parallel range types
typedef generic_range_type< iterator > range_type; typedef generic_range_type< iterator > range_type;
typedef generic_range_type< const_iterator > const_range_type; typedef generic_range_type< const_iterator > const_range_type;
//! Default constructor, which leads to default construction of loc al copies //! Default constructor, which leads to default construction of loc al copies
enumerable_thread_specific() : my_finit_callback(0) { enumerable_thread_specific() : my_finit_callback(0) {
my_exemplar_ptr = 0; my_exemplar_ptr = 0;
my_tls_manager::create_key(my_key);
} }
//! construction with initializer method //! construction with initializer method
// Finit should be a function taking 0 parameters and returning a T // Finit should be a function taking 0 parameters and returning a T
template <typename Finit> template <typename Finit>
enumerable_thread_specific( Finit _finit ) enumerable_thread_specific( Finit _finit )
{ {
my_finit_callback = internal::callback_leaf<T,Finit>::new_callb ack( _finit ); my_finit_callback = internal::callback_leaf<T,Finit>::new_callb ack( _finit );
my_exemplar_ptr = 0; // don't need exemplar if function is prov ided my_exemplar_ptr = 0; // don't need exemplar if function is prov ided
my_tls_manager::create_key(my_key);
} }
//! Constuction with exemplar, which leads to copy construction of local copies //! Constuction with exemplar, which leads to copy construction of local copies
enumerable_thread_specific(const T &_exemplar) : my_finit_callback( 0) { enumerable_thread_specific(const T &_exemplar) : my_finit_callback( 0) {
my_exemplar_ptr = create_exemplar(_exemplar); my_exemplar_ptr = create_exemplar(_exemplar);
my_tls_manager::create_key(my_key);
} }
//! Destructor //! Destructor
~enumerable_thread_specific() { ~enumerable_thread_specific() {
unconstruct_locals();
my_tls_manager::destroy_key(my_key);
if(my_finit_callback) { if(my_finit_callback) {
my_finit_callback->destroy(); my_finit_callback->destroy();
} }
if(my_exemplar_ptr) if(my_exemplar_ptr) {
{
free_exemplar(my_exemplar_ptr); free_exemplar(my_exemplar_ptr);
} }
this->clear(); // deallocation before the derived class is fin
ished destructing
// So free(array *) is still accessible
} }
//! returns reference to local, discarding exists //! returns reference to local, discarding exists
reference local() { reference local() {
bool exists; bool exists;
return local(exists); return local(exists);
} }
//! Returns reference to calling thread's local copy, creating one if necessary //! Returns reference to calling thread's local copy, creating one if necessary
reference local(bool& exists) { reference local(bool& exists) {
if ( pointer local_ptr = static_cast<pointer>(my_tls_manager::g __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not
et_tls(my_key)) ) { yet implemented");
exists = true; void* ptr = this->table_lookup(exists);
return *local_ptr; return *(T*)ptr;
} }
hash_table_index_type local_index;
typename internal::thread_hash_compare::thread_key my_t_key = i
nternal::thread_hash_compare::my_thread_key(tbb::this_tbb_thread::get_id())
;
{
typename thread_to_index_type::const_pointer my_existing_en
try;
my_existing_entry = my_hash_tbl.find(my_t_key);
if(my_existing_entry) {
exists = true;
local_index = my_existing_entry->second;
}
else {
// see if the table entry can be found by accessor
typename thread_to_index_type::accessor a;
if(!my_hash_tbl.insert(a, my_t_key)) {
exists = true;
local_index = a->second;
}
else {
// create new entry
exists = false;
#if TBB_DEPRECATED
local_index = my_locals.push_back(padded_element())
;
#else
local_index = my_locals.push_back(padded_element())
- my_locals.begin();
#endif
pointer lref = reinterpret_cast<T*>((my_locals[loc
al_index].value));
if(my_finit_callback) {
new(lref) T(my_finit_callback->apply());
}
else if(my_exemplar_ptr) {
pointer t_exemp = reinterpret_cast<T *>(&(my_ex
emplar_ptr->value));
new(lref) T(*t_exemp);
}
else {
new(lref) T();
}
// insert into hash table
a->second = local_index;
}
}
}
pointer local_ref = reinterpret_cast<T*>((my_locals[local_index
].value));
my_tls_manager::set_tls( my_key, static_cast<void *>(local_ref)
);
return *local_ref;
} // local
//! Get the number of local copies //! Get the number of local copies
size_type size() const { return my_locals.size(); } size_type size() const { return my_locals.size(); }
//! true if there have been no local copies created //! true if there have been no local copies created
bool empty() const { return my_locals.empty(); } bool empty() const { return my_locals.empty(); }
//! begin iterator //! begin iterator
iterator begin() { return iterator( my_locals, 0 ); } iterator begin() { return iterator( my_locals, 0 ); }
//! end iterator //! end iterator
skipping to change at line 722 skipping to change at line 854
//! end const iterator //! end const iterator
const_iterator end() const { return const_iterator(my_locals, my_lo cals.size()); } const_iterator end() const { return const_iterator(my_locals, my_lo cals.size()); }
//! Get range for parallel algorithms //! Get range for parallel algorithms
range_type range( size_t grainsize=1 ) { return range_type( begin() , end(), grainsize ); } range_type range( size_t grainsize=1 ) { return range_type( begin() , end(), grainsize ); }
//! Get const range for parallel algorithms //! Get const range for parallel algorithms
const_range_type range( size_t grainsize=1 ) const { return const_r ange_type( begin(), end(), grainsize ); } const_range_type range( size_t grainsize=1 ) const { return const_r ange_type( begin(), end(), grainsize ); }
void unconstruct_locals() {
for(typename internal_collection_type::iterator cvi = my_locals
.begin(); cvi != my_locals.end(); ++cvi) {
cvi->unconstruct();
}
}
//! Destroys local copies //! Destroys local copies
void clear() { void clear() {
unconstruct_locals(); unconstruct_locals();
my_locals.clear(); my_locals.clear();
my_hash_tbl.clear(); this->table_clear();
reset_key();
// callback is not destroyed // callback is not destroyed
// exemplar is not destroyed // exemplar is not destroyed
} }
// STL container methods
// copy constructor
private: private:
template<typename U, typename A2, ets_key_usage_type C2> template<typename U, typename A2, ets_key_usage_type C2>
void void internal_copy( const enumerable_thread_specific<U, A2, C2>& ot
internal_copy_construct( const enumerable_thread_specific<U, A2, C2 her);
>& other) {
typedef typename tbb::enumerable_thread_specific<U, A2, C2> oth
er_type;
for(typename other_type::const_iterator ci = other.begin(); ci
!= other.end(); ++ci) {
hash_table_index_type local_index;
#if TBB_DEPRECATED
local_index = my_locals.push_back(padded_element());
#else
local_index = my_locals.push_back(padded_element()) - my_lo
cals.begin();
#endif
(void) new(my_locals[local_index].value) T(*ci);
}
if(other.my_finit_callback) {
my_finit_callback = other.my_finit_callback->make_copy();
}
else {
my_finit_callback = 0;
}
if(other.my_exemplar_ptr) {
pointer local_ref = reinterpret_cast<T*>(other.my_exemplar_
ptr->value);
my_exemplar_ptr = create_exemplar(*local_ref);
}
else {
my_exemplar_ptr = 0;
}
my_tls_manager::create_key(my_key);
}
public: public:
template<typename U, typename Alloc, ets_key_usage_type Cachetype> template<typename U, typename Alloc, ets_key_usage_type Cachetype>
enumerable_thread_specific( const enumerable_thread_specific<U, All oc, Cachetype>& other ) : my_hash_tbl(other.my_hash_tbl) enumerable_thread_specific( const enumerable_thread_specific<U, All oc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
{ {
internal_copy_construct(other); internal_copy(other);
} }
enumerable_thread_specific( const enumerable_thread_specific& other ) : my_hash_tbl(other.my_hash_tbl) enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
{ {
internal_copy_construct(other); internal_copy(other);
} }
private: private:
template<typename U, typename A2, ets_key_usage_type C2> template<typename U, typename A2, ets_key_usage_type C2>
enumerable_thread_specific & enumerable_thread_specific &
internal_assign(const enumerable_thread_specific<U, A2, C2>& other) { internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
typedef typename tbb::enumerable_thread_specific<U, A2, C2> oth er_type;
if(static_cast<void *>( this ) != static_cast<const void *>( &o ther )) { if(static_cast<void *>( this ) != static_cast<const void *>( &o ther )) {
this->clear(); // resets TLS key this->clear();
my_hash_tbl = other.my_hash_tbl;
for(typename other_type::const_iterator ci = other.begin();
ci != other.end(); ++ci) {
hash_table_index_type local_index;
#if TBB_DEPRECATED
local_index = my_locals.push_back(padded_element())
;
#else
local_index = my_locals.push_back(padded_element())
- my_locals.begin();
#endif
(void) new(my_locals[local_index].value) T(*ci);
}
if(my_finit_callback) { if(my_finit_callback) {
my_finit_callback->destroy(); my_finit_callback->destroy();
my_finit_callback = 0; my_finit_callback = 0;
} }
if(my_exemplar_ptr) { if(my_exemplar_ptr) {
free_exemplar(my_exemplar_ptr); free_exemplar(my_exemplar_ptr);
my_exemplar_ptr = 0; my_exemplar_ptr = 0;
} }
if(other.my_finit_callback) { internal_copy( other );
my_finit_callback = other.my_finit_callback->make_copy(
);
}
if(other.my_exemplar_ptr) {
pointer local_ref = reinterpret_cast<T*>(other.my_exemp
lar_ptr->value);
my_exemplar_ptr = create_exemplar(*local_ref);
}
} }
return *this; return *this;
} }
public: public:
// assignment // assignment
enumerable_thread_specific& operator=(const enumerable_thread_speci fic& other) { enumerable_thread_specific& operator=(const enumerable_thread_speci fic& other) {
return internal_assign(other); return internal_assign(other);
} }
skipping to change at line 864 skipping to change at line 941
// combine_func_t has signature void(T) or void(const T&) // combine_func_t has signature void(T) or void(const T&)
template <typename combine_func_t> template <typename combine_func_t>
void combine_each(combine_func_t f_combine) { void combine_each(combine_func_t f_combine) {
for(const_iterator ci = begin(); ci != end(); ++ci) { for(const_iterator ci = begin(); ci != end(); ++ci) {
f_combine( *ci ); f_combine( *ci );
} }
} }
}; // enumerable_thread_specific }; // enumerable_thread_specific
template <typename T, typename Allocator, ets_key_usage_type ETS_key_ty
pe>
template<typename U, typename A2, ets_key_usage_type C2>
void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_cop
y( const enumerable_thread_specific<U, A2, C2>& other) {
typedef internal::ets_base<ets_no_key> base;
__TBB_ASSERT(my_locals.size()==0,NULL);
this->table_reserve_for_copy( other );
for( base::array* r=other.my_root; r; r=r->next ) {
for( size_t i=0; i<r->size(); ++i ) {
base::slot& s1 = r->at(i);
if( !s1.empty() ) {
base::slot& s2 = this->table_find(s1.key);
if( s2.empty() ) {
#if TBB_DEPRECATED
void* lref = &my_locals[my_locals.push_back(padded_
element())];
#else
void* lref = &*my_locals.push_back(padded_element()
);
#endif
s2.ptr = new(lref) T(*(U*)s1.ptr);
s2.key = s1.key;
} else {
// Skip the duplicate
}
}
}
}
if(other.my_finit_callback) {
my_finit_callback = other.my_finit_callback->make_copy();
} else {
my_finit_callback = 0;
}
if(other.my_exemplar_ptr) {
pointer local_ref = reinterpret_cast<U*>(other.my_exemplar_ptr-
>value);
my_exemplar_ptr = create_exemplar(*local_ref);
} else {
my_exemplar_ptr = 0;
}
}
template< typename Container > template< typename Container >
class flattened2d { class flattened2d {
// This intermediate typedef is to address issues with VC7.1 compil ers // This intermediate typedef is to address issues with VC7.1 compil ers
typedef typename Container::value_type conval_type; typedef typename Container::value_type conval_type;
public: public:
//! Basic types //! Basic types
typedef typename conval_type::size_type size_type; typedef typename conval_type::size_type size_type;
skipping to change at line 922 skipping to change at line 1037
template <typename Container> template <typename Container>
flattened2d<Container> flatten2d(const Container &c, const typename Con tainer::const_iterator b, const typename Container::const_iterator e) { flattened2d<Container> flatten2d(const Container &c, const typename Con tainer::const_iterator b, const typename Container::const_iterator e) {
return flattened2d<Container>(c, b, e); return flattened2d<Container>(c, b, e);
} }
template <typename Container> template <typename Container>
flattened2d<Container> flatten2d(const Container &c) { flattened2d<Container> flatten2d(const Container &c) {
return flattened2d<Container>(c); return flattened2d<Container>(c);
} }
} // interface5
namespace internal {
using interface5::internal::segmented_iterator;
}
using interface5::enumerable_thread_specific;
using interface5::flattened2d;
using interface5::flatten2d;
} // namespace tbb } // namespace tbb
#endif #endif
 End of changes. 33 change blocks. 
245 lines changed or deleted 364 lines changed or added


 parallel_do.h   parallel_do.h 
skipping to change at line 165 skipping to change at line 165
/** Implements new task adding procedure. /** Implements new task adding procedure.
@ingroup algorithms **/ @ingroup algorithms **/
template<class Body, typename Item> template<class Body, typename Item>
class parallel_do_feeder_impl : public parallel_do_feeder<Item> class parallel_do_feeder_impl : public parallel_do_feeder<Item>
{ {
/*override*/ /*override*/
void internal_add( const Item& item ) void internal_add( const Item& item )
{ {
typedef do_iteration_task<Body, Item> iteration_type; typedef do_iteration_task<Body, Item> iteration_type;
iteration_type& t = *new (task::self().allocate_additional_chil d_of(*my_barrier)) iteration_type(item, *this); iteration_type& t = *new (task::allocate_additional_child_of(*m y_barrier)) iteration_type(item, *this);
t.spawn( t ); t.spawn( t );
} }
public: public:
const Body* my_body; const Body* my_body;
empty_task* my_barrier; empty_task* my_barrier;
parallel_do_feeder_impl() parallel_do_feeder_impl()
{ {
my_barrier = new( task::allocate_root() ) empty_task(); my_barrier = new( task::allocate_root() ) empty_task();
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 parallel_scan.h   parallel_scan.h 
skipping to change at line 172 skipping to change at line 172
if( result.left ) if( result.left )
result.left_is_final = false; result.left_is_final = false;
if( right_zombie && sum ) if( right_zombie && sum )
((*sum)->body).reverse_join(result.left_sum->body); ((*sum)->body).reverse_join(result.left_sum->body);
__TBB_ASSERT( !return_slot, NULL ); __TBB_ASSERT( !return_slot, NULL );
if( right_zombie || result.right ) { if( right_zombie || result.right ) {
return_slot = &result; return_slot = &result;
} else { } else {
destroy( result ); destroy( result );
} }
if( right_zombie && !sum && !result.right ) { if( right_zombie && !sum && !result.right ) destroy(*right_zomb
destroy(*right_zombie); ie);
right_zombie = NULL;
}
return NULL; return NULL;
} }
finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, s um_node_type& result_ ) : finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, s um_node_type& result_ ) :
sum(sum_), sum(sum_),
return_slot(return_slot_), return_slot(return_slot_),
right_zombie(NULL), right_zombie(NULL),
result(result_) result(result_)
{ {
__TBB_ASSERT( !return_slot, NULL ); __TBB_ASSERT( !return_slot, NULL );
} }
~finish_scan(){
#if __TBB_TASK_GROUP_CONTEXT
if (is_cancelled()) {
if (result.ref_count() == 0) destroy(result);
if (right_zombie) destroy(*right_zombie);
}
#endif
}
}; };
//! Initial task to split the work //! Initial task to split the work
/** @ingroup algorithms */ /** @ingroup algorithms */
template<typename Range, typename Body, typename Partitioner=simple_par titioner> template<typename Range, typename Body, typename Partitioner=simple_par titioner>
class start_scan: public task { class start_scan: public task {
typedef sum_node<Range,Body> sum_node_type; typedef sum_node<Range,Body> sum_node_type;
typedef final_sum<Range,Body> final_sum_type; typedef final_sum<Range,Body> final_sum_type;
final_sum_type* body; final_sum_type* body;
/** Non-null if caller is requesting total. */ /** Non-null if caller is requesting total. */
final_sum_type** sum; final_sum_type** sum;
sum_node_type** return_slot; sum_node_type** return_slot;
/** Null if computing root. */ /** Null if computing root. */
sum_node_type* parent_sum; sum_node_type* parent_sum;
bool is_final; bool is_final;
bool is_right_child; bool is_right_child;
Range range; Range range;
typename Partitioner::partition_type partition; typename Partitioner::partition_type partition;
/*override*/ task* execute(); /*override*/ task* execute();
#if __TBB_TASK_GROUP_CONTEXT
tbb::task_group_context &my_context;
#endif
// The class is intended to destroy allocated tasks if exception oc
curs
class task_cleaner: internal::no_copy {
typedef internal::start_scan<Range,Body,Partitioner> start_pass
1_type;
internal::sum_node<Range,Body>* my_root;
final_sum_type* my_temp_body;
const Range& my_range;
Body& my_body;
start_pass1_type* my_pass1;
public:
bool do_clean; // Set to true if cleanup is required.
task_cleaner(internal::sum_node<Range,Body>* root, final_sum_ty
pe* temp_body, const Range& range, Body& body, start_pass1_type* pass1)
: my_root(root), my_temp_body(temp_body), my_range(range),
my_body(body), my_pass1(pass1), do_clean(true) {}
~task_cleaner(){
if (do_clean) {
my_body.assign(my_temp_body->body);
my_temp_body->finish_construction( my_range, NULL );
my_temp_body->destroy(*my_temp_body);
}
}
};
public: public:
start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_ start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_
node_type* parent_sum_ node_type* parent_sum_ ) :
#if __TBB_TASK_GROUP_CONTEXT
, tbb::task_group_context &context_
#endif
) :
body(parent_.body), body(parent_.body),
sum(parent_.sum), sum(parent_.sum),
return_slot(&return_slot_), return_slot(&return_slot_),
parent_sum(parent_sum_), parent_sum(parent_sum_),
is_final(parent_.is_final), is_final(parent_.is_final),
is_right_child(false), is_right_child(false),
range(parent_.range,split()), range(parent_.range,split()),
partition(parent_.partition,split()) partition(parent_.partition,split())
#if __TBB_TASK_GROUP_CONTEXT
, my_context(context_)
#endif
{ {
__TBB_ASSERT( !*return_slot, NULL ); __TBB_ASSERT( !*return_slot, NULL );
} }
start_scan( sum_node_type*& return_slot_, const Range& range_, fina start_scan( sum_node_type*& return_slot_, const Range& range_, fina
l_sum_type& body_, const Partitioner& partitioner_ l_sum_type& body_, const Partitioner& partitioner_) :
#if __TBB_TASK_GROUP_CONTEXT
, tbb::task_group_context &_context
#endif
) :
body(&body_), body(&body_),
sum(NULL), sum(NULL),
return_slot(&return_slot_), return_slot(&return_slot_),
parent_sum(NULL), parent_sum(NULL),
is_final(true), is_final(true),
is_right_child(false), is_right_child(false),
range(range_), range(range_),
partition(partitioner_) partition(partitioner_)
#if __TBB_TASK_GROUP_CONTEXT
, my_context (_context)
#endif
{ {
__TBB_ASSERT( !*return_slot, NULL ); __TBB_ASSERT( !*return_slot, NULL );
} }
static void run( const Range& range, Body& body, const Partitioner static void run( const Range& range, Body& body, const Partitioner
& partitioner & partitioner ) {
#if __TBB_TASK_GROUP_CONTEXT
, task_group_context& context
#endif
) {
if( !range.empty() ) { if( !range.empty() ) {
typedef internal::start_scan<Range,Body,Partitioner> start_ pass1_type; typedef internal::start_scan<Range,Body,Partitioner> start_ pass1_type;
internal::sum_node<Range,Body>* root = NULL; internal::sum_node<Range,Body>* root = NULL;
typedef internal::final_sum<Range,Body> final_sum_type; typedef internal::final_sum<Range,Body> final_sum_type;
#if __TBB_TASK_GROUP_CONTEXT
final_sum_type* temp_body = new(task::allocate_root(context
)) final_sum_type( body );
start_pass1_type& pass1 = *new(task::allocate_root(context)
) start_pass1_type(
/*return_slot=*/root,
range,
*temp_body,
partitioner,
context
);
#else
final_sum_type* temp_body = new(task::allocate_root()) fina l_sum_type( body ); final_sum_type* temp_body = new(task::allocate_root()) fina l_sum_type( body );
start_pass1_type& pass1 = *new(task::allocate_root()) start _pass1_type( start_pass1_type& pass1 = *new(task::allocate_root()) start _pass1_type(
/*return_slot=*/root, /*return_slot=*/root,
range, range,
*temp_body, *temp_body,
partitioner ); partitioner );
#endif
task_cleaner my_cleaner(root, temp_body, range, body, &pass
1);
task::spawn_root_and_wait( pass1 ); task::spawn_root_and_wait( pass1 );
if( root ) { if( root ) {
my_cleaner.do_clean = false;
root->body = temp_body; root->body = temp_body;
root->incoming = NULL; root->incoming = NULL;
root->stuff_last = &body; root->stuff_last = &body;
task::spawn_root_and_wait( *root ); task::spawn_root_and_wait( *root );
} else {
body.assign(temp_body->body);
temp_body->finish_construction( range, NULL );
temp_body->destroy(*temp_body);
} }
} }
} }
}; };
template<typename Range, typename Body, typename Partitioner> template<typename Range, typename Body, typename Partitioner>
task* start_scan<Range,Body,Partitioner>::execute() { task* start_scan<Range,Body,Partitioner>::execute() {
typedef internal::finish_scan<Range,Body> finish_pass1_type; typedef internal::finish_scan<Range,Body> finish_pass1_type;
finish_pass1_type* p = parent_sum ? static_cast<finish_pass1_type*> ( parent() ) : NULL; finish_pass1_type* p = parent_sum ? static_cast<finish_pass1_type*> ( parent() ) : NULL;
// Inspecting p->result.left_sum would ordinarily be a race conditi on. // Inspecting p->result.left_sum would ordinarily be a race conditi on.
// But we inspect it only if we are not a stolen task, in which cas e we // But we inspect it only if we are not a stolen task, in which cas e we
// know that task assigning to p->result.left_sum has completed. // know that task assigning to p->result.left_sum has completed.
bool treat_as_stolen = is_right_child && (is_stolen_task() || body! =p->result.left_sum); bool treat_as_stolen = is_right_child && (is_stolen_task() || body! =p->result.left_sum);
if( treat_as_stolen ) { if( treat_as_stolen ) {
// Invocation is for right child that has been really stolen or needs to be virtually stolen // Invocation is for right child that has been really stolen or needs to be virtually stolen
#if __TBB_TASK_GROUP_CONTEXT
p->right_zombie = body = new( allocate_root(my_context) ) final
_sum_type(body->body);
#else
p->right_zombie = body = new( allocate_root() ) final_sum_type( body->body); p->right_zombie = body = new( allocate_root() ) final_sum_type( body->body);
#endif
is_final = false; is_final = false;
} }
task* next_task = NULL; task* next_task = NULL;
if( (is_right_child && !treat_as_stolen) || !range.is_divisible() | | partition.should_execute_range(*this) ) { if( (is_right_child && !treat_as_stolen) || !range.is_divisible() | | partition.should_execute_range(*this) ) {
if( is_final ) if( is_final )
(body->body)( range, final_scan_tag() ); (body->body)( range, final_scan_tag() );
else if( sum ) else if( sum )
(body->body)( range, pre_scan_tag() ); (body->body)( range, pre_scan_tag() );
if( sum ) if( sum )
*sum = body; *sum = body;
__TBB_ASSERT( !*return_slot, NULL ); __TBB_ASSERT( !*return_slot, NULL );
} else { } else {
sum_node_type* result; sum_node_type* result;
if( parent_sum ) if( parent_sum )
result = new(allocate_additional_child_of(*parent_sum)) sum _node_type(range,/*left_is_final=*/is_final); result = new(allocate_additional_child_of(*parent_sum)) sum _node_type(range,/*left_is_final=*/is_final);
else else
#if __TBB_TASK_GROUP_CONTEXT
result = new(task::allocate_root(my_context)) sum_node_type
(range,/*left_is_final=*/is_final);
#else
result = new(task::allocate_root()) sum_node_type(range,/*l eft_is_final=*/is_final); result = new(task::allocate_root()) sum_node_type(range,/*l eft_is_final=*/is_final);
#endif
finish_pass1_type& c = *new( allocate_continuation()) finish_pa ss1_type(*return_slot,sum,*result); finish_pass1_type& c = *new( allocate_continuation()) finish_pa ss1_type(*return_slot,sum,*result);
// Split off right child // Split off right child
#if __TBB_TASK_GROUP_CONTEXT
start_scan& b = *new( c.allocate_child() ) start_scan( /*return
_slot=*/result->right, *this, result, my_context );
#else
start_scan& b = *new( c.allocate_child() ) start_scan( /*return _slot=*/result->right, *this, result ); start_scan& b = *new( c.allocate_child() ) start_scan( /*return _slot=*/result->right, *this, result );
#endif
b.is_right_child = true; b.is_right_child = true;
// Left child is recycling of *this. Must recycle this before spawning b, // Left child is recycling of *this. Must recycle this before spawning b,
// otherwise b might complete and decrement c.ref_count() to ze ro, which // otherwise b might complete and decrement c.ref_count() to ze ro, which
// would cause c.execute() to run prematurely. // would cause c.execute() to run prematurely.
recycle_as_child_of(c); recycle_as_child_of(c);
c.set_ref_count(2); c.set_ref_count(2);
c.spawn(b); c.spawn(b);
sum = &result->left_sum; sum = &result->left_sum;
return_slot = &result->left; return_slot = &result->left;
is_right_child = false; is_right_child = false;
skipping to change at line 407 skipping to change at line 330
**/ **/
/** \name parallel_scan /** \name parallel_scan
See also requirements on \ref range_req "Range" and \ref parallel_scan_ body_req "parallel_scan Body". **/ See also requirements on \ref range_req "Range" and \ref parallel_scan_ body_req "parallel_scan Body". **/
//@{ //@{
//! Parallel prefix with default partitioner //! Parallel prefix with default partitioner
/** @ingroup algorithms **/ /** @ingroup algorithms **/
template<typename Range, typename Body> template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body ) { void parallel_scan( const Range& range, Body& body ) {
#if __TBB_TASK_GROUP_CONTEXT internal::start_scan<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,b
task_group_context context; ody,__TBB_DEFAULT_PARTITIONER());
#endif // __TBB_TASK_GROUP_CONTEXT
internal::start_scan<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,b
ody,__TBB_DEFAULT_PARTITIONER()
#if __TBB_TASK_GROUP_CONTEXT
, context
#endif
);
} }
//! Parallel prefix with simple_partitioner //! Parallel prefix with simple_partitioner
/** @ingroup algorithms **/ /** @ingroup algorithms **/
template<typename Range, typename Body> template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body, const simple_partitione r& partitioner ) { void parallel_scan( const Range& range, Body& body, const simple_partitione r& partitioner ) {
#if __TBB_TASK_GROUP_CONTEXT internal::start_scan<Range,Body,simple_partitioner>::run(range,body,par
task_group_context context; titioner);
#endif // __TBB_TASK_GROUP_CONTEXT
internal::start_scan<Range,Body,simple_partitioner>::run(range,body,par
titioner
#if __TBB_TASK_GROUP_CONTEXT
, context
#endif
);
} }
//! Parallel prefix with auto_partitioner //! Parallel prefix with auto_partitioner
/** @ingroup algorithms **/ /** @ingroup algorithms **/
template<typename Range, typename Body> template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) {
#if __TBB_TASK_GROUP_CONTEXT internal::start_scan<Range,Body,auto_partitioner>::run(range,body,parti
task_group_context context; tioner);
#endif // __TBB_TASK_GROUP_CONTEXT
internal::start_scan<Range,Body,auto_partitioner>::run(range,body,parti
tioner
#if __TBB_TASK_GROUP_CONTEXT
, context
#endif
);
}
#if __TBB_TASK_GROUP_CONTEXT
//! Parallel prefix with simple_partitioner and user-supplied context
/** @ingroup algorithms **/
template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body, const simple_partitione
r& partitioner, tbb::task_group_context & context ) {
internal::start_scan<Range,Body,simple_partitioner>::run(range,body,par
titioner,context);
}
//! Parallel prefix with auto_partitioner and user-supplied context
/** @ingroup algorithms **/
template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body, const auto_partitioner&
partitioner, tbb::task_group_context & context ) {
internal::start_scan<Range,Body,auto_partitioner>::run(range,body,parti
tioner,context);
}
//! Parallel prefix with default partitioner and user-supplied context
/** @ingroup algorithms **/
template<typename Range, typename Body>
void parallel_scan( const Range& range, Body& body, tbb::task_group_context
& context ) {
internal::start_scan<Range,Body,__TBB_DEFAULT_PARTITIONER>::run(range,b
ody,__TBB_DEFAULT_PARTITIONER(),context);
} }
#endif
//@} //@}
} // namespace tbb } // namespace tbb
#endif /* __TBB_parallel_scan_H */ #endif /* __TBB_parallel_scan_H */
 End of changes. 22 change blocks. 
154 lines changed or deleted 18 lines changed or added


 parallel_while.h   parallel_while.h 
skipping to change at line 188 skipping to change at line 188
my_barrier->spawn_and_wait_for_all(w); my_barrier->spawn_and_wait_for_all(w);
my_barrier->destroy(*my_barrier); my_barrier->destroy(*my_barrier);
my_barrier = NULL; my_barrier = NULL;
my_body = NULL; my_body = NULL;
} }
template<typename Body> template<typename Body>
void parallel_while<Body>::add( const value_type& item ) { void parallel_while<Body>::add( const value_type& item ) {
__TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not r unning"); __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not r unning");
typedef internal::while_iteration_task<Body> iteration_type; typedef internal::while_iteration_task<Body> iteration_type;
iteration_type& i = *new( task::self().allocate_additional_child_of(*my _barrier) ) iteration_type(item,*my_body); iteration_type& i = *new( task::allocate_additional_child_of(*my_barrie r) ) iteration_type(item,*my_body);
task::self().spawn( i ); task::self().spawn( i );
} }
} // namespace } // namespace
#endif /* __TBB_parallel_while */ #endif /* __TBB_parallel_while */
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 pipeline.h   pipeline.h 
skipping to change at line 55 skipping to change at line 55
#define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)<<1 #define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)<<1
typedef unsigned long Token; typedef unsigned long Token;
typedef long tokendiff_t; typedef long tokendiff_t;
class stage_task; class stage_task;
class input_buffer; class input_buffer;
class pipeline_root_task; class pipeline_root_task;
class pipeline_cleaner; class pipeline_cleaner;
} // namespace internal } // namespace internal
namespace interface5 {
template<typename T, typename U> class filter_t;
namespace internal {
class pipeline_proxy;
}
}
//! @endcond //! @endcond
//! A stage in a pipeline. //! A stage in a pipeline.
/** @ingroup algorithms */ /** @ingroup algorithms */
class filter: internal::no_copy { class filter: internal::no_copy {
private: private:
//! Value used to mark "not in pipeline" //! Value used to mark "not in pipeline"
static filter* not_in_pipeline() {return reinterpret_cast<filter*>(intp tr_t(-1));} static filter* not_in_pipeline() {return reinterpret_cast<filter*>(intp tr_t(-1));}
//! The lowest bit 0 is for parallel vs. serial //! The lowest bit 0 is for parallel vs. serial
static const unsigned char filter_is_serial = 0x1; static const unsigned char filter_is_serial = 0x1;
//! 4th bit distinguishes ordered vs unordered filters. //! 4th bit distinguishes ordered vs unordered filters.
/** The bit was not set for parallel filters in TBB 2.1 and earlier, /** The bit was not set for parallel filters in TBB 2.1 and earlier,
but is_ordered() function always treats parallel filters as out of order. */ but is_ordered() function always treats parallel filters as out of order. */
static const unsigned char filter_is_out_of_order = 0x1<<4; static const unsigned char filter_is_out_of_order = 0x1<<4;
//! 5th bit distinguishes thread-bound and regular filters. //! 5th bit distinguishes thread-bound and regular filters.
static const unsigned char filter_is_bound = 0x1<<5; static const unsigned char filter_is_bound = 0x1<<5;
//! 7th bit defines exception propagation mode expected by the applicat
ion.
static const unsigned char exact_exception_propagation =
#if TBB_USE_CAPTURED_EXCEPTION
0x0;
#else
0x1<<7;
#endif /* TBB_USE_CAPTURED_EXCEPTION */
static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); static const unsigned char current_version = __TBB_PIPELINE_VERSION(5);
static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for v ersion static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for v ersion
public: public:
enum mode { enum mode {
//! processes multiple items in parallel and in no particular order //! processes multiple items in parallel and in no particular order
parallel = current_version | filter_is_out_of_order, parallel = current_version | filter_is_out_of_order,
//! processes items one at a time; all such filters process items i n the same order //! processes items one at a time; all such filters process items i n the same order
serial_in_order = current_version | filter_is_serial, serial_in_order = current_version | filter_is_serial,
//! processes items one at a time and in no particular order //! processes items one at a time and in no particular order
serial_out_of_order = current_version | filter_is_serial | filter_i s_out_of_order, serial_out_of_order = current_version | filter_is_serial | filter_i s_out_of_order,
//! @deprecated use serial_in_order instead //! @deprecated use serial_in_order instead
serial = serial_in_order serial = serial_in_order
}; };
protected: protected:
filter( bool is_serial_ ) : filter( bool is_serial_ ) :
next_filter_in_pipeline(not_in_pipeline()), next_filter_in_pipeline(not_in_pipeline()),
my_input_buffer(NULL), my_input_buffer(NULL),
my_filter_mode(static_cast<unsigned char>(is_serial_ ? serial : par allel)), my_filter_mode(static_cast<unsigned char>((is_serial_ ? serial : pa rallel) | exact_exception_propagation)),
prev_filter_in_pipeline(not_in_pipeline()), prev_filter_in_pipeline(not_in_pipeline()),
my_pipeline(NULL), my_pipeline(NULL),
next_segment(NULL) next_segment(NULL)
{} {}
filter( mode filter_mode ) : filter( mode filter_mode ) :
next_filter_in_pipeline(not_in_pipeline()), next_filter_in_pipeline(not_in_pipeline()),
my_input_buffer(NULL), my_input_buffer(NULL),
my_filter_mode(static_cast<unsigned char>(filter_mode)), my_filter_mode(static_cast<unsigned char>(filter_mode | exact_excep tion_propagation)),
prev_filter_in_pipeline(not_in_pipeline()), prev_filter_in_pipeline(not_in_pipeline()),
my_pipeline(NULL), my_pipeline(NULL),
next_segment(NULL) next_segment(NULL)
{} {}
public: public:
//! True if filter is serial. //! True if filter is serial.
bool is_serial() const { bool is_serial() const {
return bool( my_filter_mode & filter_is_serial ); return bool( my_filter_mode & filter_is_serial );
} }
skipping to change at line 179 skipping to change at line 196
enum result_type { enum result_type {
// item was processed // item was processed
success, success,
// item is currently not available // item is currently not available
item_not_available, item_not_available,
// there are no more items to process // there are no more items to process
end_of_stream end_of_stream
}; };
protected: protected:
thread_bound_filter(mode filter_mode): thread_bound_filter(mode filter_mode):
filter(static_cast<mode>(filter_mode | filter::filter_is_bound)) filter(static_cast<mode>(filter_mode | filter::filter_is_bound | f ilter::exact_exception_propagation))
{} {}
public: public:
//! If a data item is available, invoke operator() on that item. //! If a data item is available, invoke operator() on that item.
/** This interface is non-blocking. /** This interface is non-blocking.
Returns 'success' if an item was processed. Returns 'success' if an item was processed.
Returns 'item_not_available' if no item can be processed now Returns 'item_not_available' if no item can be processed now
but more may arrive in the future, or if token limit is reached. but more may arrive in the future, or if token limit is reached.
Returns 'end_of_stream' if there are no more items to process. */ Returns 'end_of_stream' if there are no more items to process. */
result_type __TBB_EXPORTED_METHOD try_process_item(); result_type __TBB_EXPORTED_METHOD try_process_item();
skipping to change at line 233 skipping to change at line 250
//! Remove all filters from the pipeline. //! Remove all filters from the pipeline.
void __TBB_EXPORTED_METHOD clear(); void __TBB_EXPORTED_METHOD clear();
private: private:
friend class internal::stage_task; friend class internal::stage_task;
friend class internal::pipeline_root_task; friend class internal::pipeline_root_task;
friend class filter; friend class filter;
friend class thread_bound_filter; friend class thread_bound_filter;
friend class internal::pipeline_cleaner; friend class internal::pipeline_cleaner;
friend class tbb::interface5::internal::pipeline_proxy;
//! Pointer to first filter in the pipeline. //! Pointer to first filter in the pipeline.
filter* filter_list; filter* filter_list;
//! Pointer to location where address of next filter to be added should be stored. //! Pointer to location where address of next filter to be added should be stored.
filter* filter_end; filter* filter_end;
//! task who's reference count is used to determine when all stages are done. //! task who's reference count is used to determine when all stages are done.
task* end_counter; task* end_counter;
skipping to change at line 267 skipping to change at line 285
//! Not used, but retained to satisfy old export files. //! Not used, but retained to satisfy old export files.
void __TBB_EXPORTED_METHOD inject_token( task& self ); void __TBB_EXPORTED_METHOD inject_token( task& self );
#if __TBB_TASK_GROUP_CONTEXT #if __TBB_TASK_GROUP_CONTEXT
//! Does clean up if pipeline is cancelled or exception occured //! Does clean up if pipeline is cancelled or exception occured
void clear_filters(); void clear_filters();
#endif #endif
}; };
//------------------------------------------------------------------------
// Support for lambda-friendly parallel_pipeline interface
//------------------------------------------------------------------------
namespace interface5 {
namespace internal {
template<typename T, typename U, typename Body> class concrete_filter;
}
class flow_control {
bool is_pipeline_stopped;
flow_control() { is_pipeline_stopped = false; }
template<typename T, typename U, typename Body> friend class internal::
concrete_filter;
public:
void stop() { is_pipeline_stopped = true; }
};
//! @cond INTERNAL
namespace internal {
template<typename T, typename U, typename Body>
class concrete_filter: public tbb::filter {
Body my_body;
/*override*/ void* operator()(void* input) {
T* temp_input = (T*)input;
// Call user's operator()() here
void* output = (void*) new U(my_body(*temp_input));
delete temp_input;
return output;
}
public:
concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filt
er(filter_mode), my_body(body) {}
};
template<typename U, typename Body>
class concrete_filter<void,U,Body>: public filter {
Body my_body;
/*override*/void* operator()(void*) {
flow_control control;
U temp_output = my_body(control);
void* output = control.is_pipeline_stopped ? NULL : (void*) new U(t
emp_output);
return output;
}
public:
concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filt
er(filter_mode), my_body(body) {}
};
template<typename T, typename Body>
class concrete_filter<T,void,Body>: public filter {
Body my_body;
/*override*/ void* operator()(void* input) {
T* temp_input = (T*)input;
my_body(*temp_input);
delete temp_input;
return NULL;
}
public:
concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filt
er(filter_mode), my_body(body) {}
};
template<typename Body>
class concrete_filter<void,void,Body>: public filter {
Body my_body;
/** Override privately because it is always called virtually */
/*override*/ void* operator()(void*) {
flow_control control;
my_body(control);
void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_
t)-1;
return output;
}
public:
concrete_filter(filter::mode filter_mode, const Body& body) : filter(fi
lter_mode), my_body(body) {}
};
//! The class that represents an object of the pipeline for parallel_pipeli
ne().
/** It primarily serves as RAII class that deletes heap-allocated filter in
stances. */
class pipeline_proxy {
tbb::pipeline my_pipe;
public:
pipeline_proxy( const filter_t<void,void>& filter_chain );
~pipeline_proxy() {
while( filter* f = my_pipe.filter_list )
delete f; // filter destructor removes it from the pipeline
}
tbb::pipeline* operator->() { return &my_pipe; }
};
//! Abstract base class that represents a node in a parse tree underlying a
filter_t.
/** These nodes are always heap-allocated and can be shared by filter_t obj
ects. */
class filter_node: tbb::internal::no_copy {
/** Count must be atomic because it is hidden state for user, but might
be shared by threads. */
tbb::atomic<intptr_t> ref_count;
protected:
filter_node() {
ref_count = 0;
#ifdef __TBB_TEST_FILTER_NODE_COUNT
++(__TBB_TEST_FILTER_NODE_COUNT);
#endif
}
public:
//! Add concrete_filter to pipeline
virtual void add_to( pipeline& ) = 0;
//! Increment reference count
void add_ref() {++ref_count;}
//! Decrement reference count and delete if it becomes zero.
void remove_ref() {
__TBB_ASSERT(ref_count>0,"ref_count underflow");
if( --ref_count==0 )
delete this;
}
virtual ~filter_node() {
#ifdef __TBB_TEST_FILTER_NODE_COUNT
--(__TBB_TEST_FILTER_NODE_COUNT);
#endif
}
};
//! Node in parse tree representing result of make_filter.
template<typename T, typename U, typename Body>
class filter_node_leaf: public filter_node {
const tbb::filter::mode mode;
const Body& body;
/*override*/void add_to( pipeline& p ) {
concrete_filter<T,U,Body>* f = new concrete_filter<T,U,Body>(mode,b
ody);
p.add_filter( *f );
}
public:
filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(
b) {}
};
//! Node in parse tree representing join of two filters.
class filter_node_join: public filter_node {
friend class filter_node; // to suppress GCC 3.2 warnings
filter_node& left;
filter_node& right;
/*override*/~filter_node_join() {
left.remove_ref();
right.remove_ref();
}
/*override*/void add_to( pipeline& p ) {
left.add_to(p);
right.add_to(p);
}
public:
filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y)
{
left.add_ref();
right.add_ref();
}
};
} // namespace internal
//! @endcond
template<typename T, typename U, typename Body>
filter_t<T,U> make_filter(tbb::filter::mode mode, const Body& body) {
return new internal::filter_node_leaf<T,U,Body>(mode, body);
}
template<typename T, typename V, typename U>
filter_t<T,U> operator& (const filter_t<T,V>& left, const filter_t<V,U>& ri
ght) {
__TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left
argument of '&'");
__TBB_ASSERT(right.root,"cannot use default-constructed filter_t as rig
ht argument of '&'");
return new internal::filter_node_join(*left.root,*right.root);
}
//! Class representing a chain of type-safe pipeline filters
template<typename T, typename U>
class filter_t {
typedef internal::filter_node filter_node;
filter_node* root;
filter_t( filter_node* root_ ) : root(root_) {
root->add_ref();
}
friend class internal::pipeline_proxy;
template<typename T_, typename U_, typename Body>
friend filter_t<T_,U_> make_filter(tbb::filter::mode, const Body& );
template<typename T_, typename V_, typename U_>
friend filter_t<T_,U_> operator& (const filter_t<T_,V_>& , const filter
_t<V_,U_>& );
public:
filter_t() : root(NULL) {}
filter_t( const filter_t<T,U>& rhs ) : root(rhs.root) {
if( root ) root->add_ref();
}
template<typename Body>
filter_t( tbb::filter::mode mode, const Body& body ) :
root( new internal::filter_node_leaf<T,U,Body>(mode, body) ) {
root->add_ref();
}
void operator=( const filter_t<T,U>& rhs ) {
// Order of operations below carefully chosen so that reference cou
nts remain correct
// in unlikely event that remove_ref throws exception.
filter_node* old = root;
root = rhs.root;
if( root ) root->add_ref();
if( old ) old->remove_ref();
}
~filter_t() {
if( root ) root->remove_ref();
}
void clear() {
// Like operator= with filter_t() on right side.
if( root ) {
filter_node* old = root;
root = NULL;
old->remove_ref();
}
}
};
inline internal::pipeline_proxy::pipeline_proxy( const filter_t<void,void>&
filter_chain ) : my_pipe() {
__TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to def
ault-constructed filter_t" );
filter_chain.root->add_to(my_pipe);
}
inline void parallel_pipeline(size_t max_number_of_live_tokens, const filte
r_t<void,void>& filter_chain
#if __TBB_TASK_GROUP_CONTEXT
, tbb::task_group_context& context
#endif
) {
internal::pipeline_proxy pipe(filter_chain);
// tbb::pipeline::run() is called via the proxy
pipe->run(max_number_of_live_tokens
#if __TBB_TASK_GROUP_CONTEXT
, context
#endif
);
}
#if __TBB_TASK_GROUP_CONTEXT
inline void parallel_pipeline(size_t max_number_of_live_tokens, const filte
r_t<void,void>& filter_chain) {
tbb::task_group_context context;
parallel_pipeline(max_number_of_live_tokens, filter_chain, context);
}
#endif // __TBB_TASK_GROUP_CONTEXT
} // interface5
using interface5::flow_control;
using interface5::filter_t;
using interface5::make_filter;
using interface5::parallel_pipeline;
} // tbb } // tbb
#endif /* __TBB_pipeline_H */ #endif /* __TBB_pipeline_H */
 End of changes. 7 change blocks. 
3 lines changed or deleted 295 lines changed or added


 queuing_mutex.h   queuing_mutex.h 
skipping to change at line 110 skipping to change at line 110
//! The pointer to the mutex owned, or NULL if not holding a mutex. //! The pointer to the mutex owned, or NULL if not holding a mutex.
queuing_mutex* mutex; queuing_mutex* mutex;
//! The pointer to the next competitor for a mutex //! The pointer to the next competitor for a mutex
scoped_lock *next; scoped_lock *next;
//! The local spin-wait variable //! The local spin-wait variable
/** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of
zero-initialization. Defining it as an entire word instead of zero-initialization. Defining it as an entire word instead of
a byte seems to help performance slightly. */ a byte seems to help performance slightly. */
internal::uintptr going; uintptr_t going;
}; };
void __TBB_EXPORTED_METHOD internal_construct(); void __TBB_EXPORTED_METHOD internal_construct();
// Mutex traits // Mutex traits
static const bool is_rw_mutex = false; static const bool is_rw_mutex = false;
static const bool is_recursive_mutex = false; static const bool is_recursive_mutex = false;
static const bool is_fair_mutex = true; static const bool is_fair_mutex = true;
friend class scoped_lock; friend class scoped_lock;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 spin_mutex.h   spin_mutex.h 
skipping to change at line 67 skipping to change at line 67
#endif #endif
} }
//! Represents acquisition of a mutex. //! Represents acquisition of a mutex.
class scoped_lock : internal::no_copy { class scoped_lock : internal::no_copy {
private: private:
//! Points to currently held mutex, or NULL if no lock is held. //! Points to currently held mutex, or NULL if no lock is held.
spin_mutex* my_mutex; spin_mutex* my_mutex;
//! Value to store into spin_mutex::flag to unlock the mutex. //! Value to store into spin_mutex::flag to unlock the mutex.
internal::uintptr my_unlock_value; uintptr_t my_unlock_value;
//! Like acquire, but with ITT instrumentation. //! Like acquire, but with ITT instrumentation.
void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );
//! Like try_acquire, but with ITT instrumentation. //! Like try_acquire, but with ITT instrumentation.
bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );
//! Like release, but with ITT instrumentation. //! Like release, but with ITT instrumentation.
void __TBB_EXPORTED_METHOD internal_release(); void __TBB_EXPORTED_METHOD internal_release();
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 task.h   task.h 
skipping to change at line 54 skipping to change at line 54
#endif /* __TBB_TASK_GROUP_CONTEXT */ #endif /* __TBB_TASK_GROUP_CONTEXT */
// MSVC does not allow taking the address of a member that was defined // MSVC does not allow taking the address of a member that was defined
// privately in task_base and made public in class task via a using declara tion. // privately in task_base and made public in class task via a using declara tion.
#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
#define __TBB_TASK_BASE_ACCESS public #define __TBB_TASK_BASE_ACCESS public
#else #else
#define __TBB_TASK_BASE_ACCESS private #define __TBB_TASK_BASE_ACCESS private
#endif #endif
namespace internal {
class allocate_additional_child_of_proxy: no_assign {
//! No longer used, but retained for binary layout compatibility.
Always NULL.
task* self;
task& parent;
public:
explicit allocate_additional_child_of_proxy( task& parent_ ) : self
(NULL), parent(parent_) {}
task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
void __TBB_EXPORTED_METHOD free( task& ) const;
};
}
namespace interface5 { namespace interface5 {
namespace internal { namespace internal {
//! Base class for methods that became static in TBB 3.0. //! Base class for methods that became static in TBB 3.0.
/** TBB's evolution caused the "this" argument for several methods to become obsolete. /** TBB's evolution caused the "this" argument for several methods to become obsolete.
However, for backwards binary compatibility, the new methods ne ed distinct names, However, for backwards binary compatibility, the new methods ne ed distinct names,
otherwise the One Definition Rule would be broken. Hence the n ew methods are otherwise the One Definition Rule would be broken. Hence the n ew methods are
defined in this private base class, and then exposed in class t ask via defined in this private base class, and then exposed in class t ask via
using declarations. */ using declarations. */
class task_base: tbb::internal::no_copy { class task_base: tbb::internal::no_copy {
__TBB_TASK_BASE_ACCESS: __TBB_TASK_BASE_ACCESS:
friend class tbb::task; friend class tbb::task;
#if !TBB_DEPRECATED_TASK_INTERFACE
//! Schedule task for execution when a worker becomes available . //! Schedule task for execution when a worker becomes available .
static void spawn( task& t ); static void spawn( task& t );
//! Spawn multiple tasks and clear list. //! Spawn multiple tasks and clear list.
static void spawn( task_list& list ); static void spawn( task_list& list );
#endif /* !TBB_DEPRECATED_TASK_INTERFACE */ //! Like allocate_child, except that task's parent becomes "t",
#if !TBB_DEPRECATED_TASK_INTERFACE || __TBB_BUILD not this.
/** Typically used in conjunction with schedule_to_reexecute to
implement while loops.
Atomically increments the reference count of t.parent() */
static tbb::internal::allocate_additional_child_of_proxy alloca
te_additional_child_of( task& t ) {
return tbb::internal::allocate_additional_child_of_proxy(t)
;
}
//! Destroy a task. //! Destroy a task.
/** Usually, calling this method is unnecessary, because a task is /** Usually, calling this method is unnecessary, because a task is
implicitly deleted after its execute() method runs. Howeve r, implicitly deleted after its execute() method runs. Howeve r,
sometimes a task needs to be explicitly deallocated, such a s sometimes a task needs to be explicitly deallocated, such a s
when a root task is used as the parent in spawn_and_wait_fo r_all. */ when a root task is used as the parent in spawn_and_wait_fo r_all. */
static void __TBB_EXPORTED_FUNC destroy( task& victim ); static void __TBB_EXPORTED_FUNC destroy( task& victim );
#endif /* TBB_DEPRECATED_TASK_INTERFACE || __TBB_BUILD */
}; };
} // internal } // internal
} // interface5 } // interface5
//! @cond INTERNAL //! @cond INTERNAL
namespace internal { namespace internal {
class scheduler: no_copy { class scheduler: no_copy {
public: public:
//! For internal use only //! For internal use only
skipping to change at line 149 skipping to change at line 167
task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
void __TBB_EXPORTED_METHOD free( task& ) const; void __TBB_EXPORTED_METHOD free( task& ) const;
}; };
class allocate_child_proxy: no_assign { class allocate_child_proxy: no_assign {
public: public:
task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
void __TBB_EXPORTED_METHOD free( task& ) const; void __TBB_EXPORTED_METHOD free( task& ) const;
}; };
class allocate_additional_child_of_proxy: no_assign {
task& self;
task& parent;
public:
allocate_additional_child_of_proxy( task& self_, task& parent_ ) :
self(self_), parent(parent_) {}
task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
void __TBB_EXPORTED_METHOD free( task& ) const;
};
//! Memory prefix to a task object. //! Memory prefix to a task object.
/** This class is internal to the library. /** This class is internal to the library.
Do not reference it directly, except within the library itself. Do not reference it directly, except within the library itself.
Fields are ordered in way that preserves backwards compatibility an d yields Fields are ordered in way that preserves backwards compatibility an d yields
good packing on typical 32-bit and 64-bit platforms. good packing on typical 32-bit and 64-bit platforms.
@ingroup task_scheduling */ @ingroup task_scheduling */
class task_prefix { class task_prefix {
private: private:
friend class tbb::task; friend class tbb::task;
friend class tbb::interface5::internal::task_base; friend class tbb::interface5::internal::task_base;
skipping to change at line 288 skipping to change at line 297
}; };
public: public:
enum kind_type { enum kind_type {
isolated, isolated,
bound bound
}; };
enum traits_type { enum traits_type {
exact_exception = 0x0001ul << traits_offset, exact_exception = 0x0001ul << traits_offset,
no_cancellation = 0x0002ul << traits_offset,
concurrent_wait = 0x0004ul << traits_offset, concurrent_wait = 0x0004ul << traits_offset,
#if TBB_USE_CAPTURED_EXCEPTION #if TBB_USE_CAPTURED_EXCEPTION
default_traits = 0 default_traits = 0
#else #else
default_traits = exact_exception default_traits = exact_exception
#endif /* !TBB_USE_CAPTURED_EXCEPTION */ #endif /* !TBB_USE_CAPTURED_EXCEPTION */
}; };
private: private:
union { union {
skipping to change at line 480 skipping to change at line 488
executing, executing,
//! task to be rescheduled. //! task to be rescheduled.
reexecute, reexecute,
//! task is in ready pool, or is going to be put there, or was just taken off. //! task is in ready pool, or is going to be put there, or was just taken off.
ready, ready,
//! task object is freshly allocated or recycled. //! task object is freshly allocated or recycled.
allocated, allocated,
//! task object is on free list, or is going to be put there, or wa s just taken off. //! task object is on free list, or is going to be put there, or wa s just taken off.
freed, freed,
//! task to be recycled as continuation //! task to be recycled as continuation
recycle, recycle
//! task to be scheduled for starvation-resistant execution
to_enqueue
}; };
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
// Allocating tasks // Allocating tasks
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
//! Returns proxy for overloaded new that allocates a root task. //! Returns proxy for overloaded new that allocates a root task.
static internal::allocate_root_proxy allocate_root() { static internal::allocate_root_proxy allocate_root() {
return internal::allocate_root_proxy(); return internal::allocate_root_proxy();
} }
skipping to change at line 512 skipping to change at line 518
/** The continuation's parent becomes the parent of *this. */ /** The continuation's parent becomes the parent of *this. */
internal::allocate_continuation_proxy& allocate_continuation() { internal::allocate_continuation_proxy& allocate_continuation() {
return *reinterpret_cast<internal::allocate_continuation_proxy*>(th is); return *reinterpret_cast<internal::allocate_continuation_proxy*>(th is);
} }
//! Returns proxy for overloaded new that allocates a child task of *th is. //! Returns proxy for overloaded new that allocates a child task of *th is.
internal::allocate_child_proxy& allocate_child() { internal::allocate_child_proxy& allocate_child() {
return *reinterpret_cast<internal::allocate_child_proxy*>(this); return *reinterpret_cast<internal::allocate_child_proxy*>(this);
} }
//! Like allocate_child, except that task's parent becomes "t", not thi //! Define recommended static form via import from base class.
s. using task_base::allocate_additional_child_of;
/** Typically used in conjunction with schedule_to_reexecute to impleme
nt while loops.
Atomically increments the reference count of t.parent() */
internal::allocate_additional_child_of_proxy allocate_additional_child_
of( task& t ) {
return internal::allocate_additional_child_of_proxy(*this,t);
}
#if TBB_DEPRECATED_TASK_INTERFACE #if __TBB_DEPRECATED_TASK_INTERFACE
//! Destroy a task. //! Destroy a task.
/** Usually, calling this method is unnecessary, because a task is /** Usually, calling this method is unnecessary, because a task is
implicitly deleted after its execute() method runs. However, implicitly deleted after its execute() method runs. However,
sometimes a task needs to be explicitly deallocated, such as sometimes a task needs to be explicitly deallocated, such as
when a root task is used as the parent in spawn_and_wait_for_all. * / when a root task is used as the parent in spawn_and_wait_for_all. * /
void __TBB_EXPORTED_METHOD destroy( task& t ); void __TBB_EXPORTED_METHOD destroy( task& t );
#else #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
//! Define recommended static form via import from base class. //! Define recommended static form via import from base class.
using task_base::destroy; using task_base::destroy;
#endif /* TBB_DEPRECATED_TASK_INTERFACE */ #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
// Recycling of tasks // Recycling of tasks
//--------------------------------------------------------------------- --- //--------------------------------------------------------------------- ---
//! Change this to be a continuation of its former self. //! Change this to be a continuation of its former self.
/** The caller must guarantee that the task's refcount does not become zero until /** The caller must guarantee that the task's refcount does not become zero until
after the method execute() returns. Typically, this is done by hav ing after the method execute() returns. Typically, this is done by hav ing
method execute() return a pointer to a child of the task. If the g uarantee method execute() return a pointer to a child of the task. If the g uarantee
cannot be made, use method recycle_as_safe_continuation instead. cannot be made, use method recycle_as_safe_continuation instead.
skipping to change at line 613 skipping to change at line 615
//! Atomically decrement reference count. //! Atomically decrement reference count.
/** Has release semantics. */ /** Has release semantics. */
int decrement_ref_count() { int decrement_ref_count() {
#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
return int(internal_decrement_ref_count()); return int(internal_decrement_ref_count());
#else #else
return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))- 1; return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))- 1;
#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
} }
#if TBB_DEPRECATED_TASK_INTERFACE
//! Schedule task for execution when a worker becomes available.
/** After all children spawned so far finish their method task::execute
,
their parent's method task::execute may start running. Therefore,
it
is important to ensure that at least one child has not completed un
til
the parent is ready to run. */
void spawn( task& t );
//! Spawn multiple tasks and clear list.
void spawn( task_list& list );
#else
//! Define recommended static forms via import from base class. //! Define recommended static forms via import from base class.
using task_base::spawn; using task_base::spawn;
#endif /* TBB_DEPRECATED_TASK_INTERFACE */
//! Similar to spawn followed by wait_for_all, but more efficient. //! Similar to spawn followed by wait_for_all, but more efficient.
void spawn_and_wait_for_all( task& child ) { void spawn_and_wait_for_all( task& child ) {
prefix().owner->wait_for_all( *this, &child ); prefix().owner->wait_for_all( *this, &child );
} }
//! Similar to spawn followed by wait_for_all, but more efficient. //! Similar to spawn followed by wait_for_all, but more efficient.
void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
//! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
skipping to change at line 793 skipping to change at line 783
return *result; return *result;
} }
//! Clear the list //! Clear the list
void clear() { void clear() {
first=NULL; first=NULL;
next_ptr=&first; next_ptr=&first;
} }
}; };
#if TBB_DEPRECATED_TASK_INTERFACE inline void interface5::internal::task_base::spawn( task& t ) {
inline void task::spawn( task& t )
#else
inline void interface5::internal::task_base::spawn( task& t )
#endif
{
t.prefix().owner->spawn( t, t.prefix().next ); t.prefix().owner->spawn( t, t.prefix().next );
} }
#if TBB_DEPRECATED_TASK_INTERFACE inline void interface5::internal::task_base::spawn( task_list& list ) {
inline void task::spawn( task_list& list )
#else
inline void interface5::internal::task_base::spawn( task_list& list )
#endif
{
if( task* t = list.first ) { if( task* t = list.first ) {
t->prefix().owner->spawn( *t, *list.next_ptr ); t->prefix().owner->spawn( *t, *list.next_ptr );
list.clear(); list.clear();
} }
} }
inline void task::spawn_root_and_wait( task_list& root_list ) { inline void task::spawn_root_and_wait( task_list& root_list ) {
if( task* t = root_list.first ) { if( task* t = root_list.first ) {
t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
root_list.clear(); root_list.clear();
 End of changes. 15 change blocks. 
57 lines changed or deleted 36 lines changed or added


 tbb_config.h   tbb_config.h 
skipping to change at line 174 skipping to change at line 174
//! Macro controlling EH usages in TBB tests //! Macro controlling EH usages in TBB tests
/** Some older versions of glibc crash when exception handling happens concurrently. **/ /** Some older versions of glibc crash when exception handling happens concurrently. **/
#define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1
#endif #endif
#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 #if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110
/** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t o a worker thread crash on the thread's startup. **/ /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads t o a worker thread crash on the thread's startup. **/
#define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1
#endif #endif
#if __GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER)
/** A bug in GCC 3.3 with access to nested classes declared in protecte
d area */
#define __TBB_GCC_3_3_PROTECTED_BROKEN 1
#endif
#if __FreeBSD__ #if __FreeBSD__
/** A bug in FreeBSD 8.0 results in kernel panic when there is contenti on /** A bug in FreeBSD 8.0 results in kernel panic when there is contenti on
on a mutex created with this attribute. **/ on a mutex created with this attribute. **/
#define __TBB_PRIO_INHERIT_BROKEN 1 #define __TBB_PRIO_INHERIT_BROKEN 1
/** A bug in FreeBSD 8.0 results in test hanging when an exception occu rs /** A bug in FreeBSD 8.0 results in test hanging when an exception occu rs
during (concurrent?) object construction by means of placement new operator. **/ during (concurrent?) object construction by means of placement new operator. **/
#define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1
#endif /* __FreeBSD__ */ #endif /* __FreeBSD__ */
#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) #if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER)
/** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates /** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates
incorrect code when __asm__ arguments have a cast to volatile. **/ incorrect code when __asm__ arguments have a cast to volatile. **/
#define __TBB_ICC_ASM_VOLATILE_BROKEN 1 #define __TBB_ICC_ASM_VOLATILE_BROKEN 1
#endif #endif
#if __LRB__
#include "tbb_config_lrb.h"
#endif
#endif /* __TBB_tbb_config_H */ #endif /* __TBB_tbb_config_H */
 End of changes. 2 change blocks. 
4 lines changed or deleted 6 lines changed or added


 tbb_profiling.h   tbb_profiling.h 
skipping to change at line 33 skipping to change at line 33
file does not by itself cause the resulting executable to be covered by file does not by itself cause the resulting executable to be covered by
the GNU General Public License. This exception does not however the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered b y invalidate any other reasons why the executable file might be covered b y
the GNU General Public License. the GNU General Public License.
*/ */
#ifndef __TBB_profiling_H #ifndef __TBB_profiling_H
#define __TBB_profiling_H #define __TBB_profiling_H
// Check if the tools support is enabled // Check if the tools support is enabled
#if (_WIN32||_WIN64||__linux__) && TBB_USE_THREADING_TOOLS #if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS
#if _WIN32||_WIN64 #if _WIN32||_WIN64
#include <stdlib.h> /* mbstowcs_s */ #include <stdlib.h> /* mbstowcs_s */
#endif #endif
#include "tbb_stddef.h" #include "tbb_stddef.h"
namespace tbb { namespace tbb {
namespace internal { namespace internal {
#if _WIN32||_WIN64 #if _WIN32||_WIN64
void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wch ar_t* name ); void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wch ar_t* name );
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 tbb_stddef.h   tbb_stddef.h 
skipping to change at line 217 skipping to change at line 217
}; };
/** /**
* @cond INTERNAL * @cond INTERNAL
* @brief Identifiers declared inside namespace internal should never be us ed directly by client code. * @brief Identifiers declared inside namespace internal should never be us ed directly by client code.
*/ */
namespace internal { namespace internal {
using std::size_t; using std::size_t;
//! An unsigned integral type big enough to hold a pointer.
/** There's no guarantee by the C++ standard that a size_t is really big en
ough,
but it happens to be for all platforms of interest. */
typedef size_t uintptr;
//! Compile-time constant that is upper bound on cache line/sector size. //! Compile-time constant that is upper bound on cache line/sector size.
/** It should be used only in situations where having a compile-time upper /** It should be used only in situations where having a compile-time upper
bound is more useful than a run-time exact answer. bound is more useful than a run-time exact answer.
@ingroup memory_allocation */ @ingroup memory_allocation */
const size_t NFS_MaxLineSize = 128; const size_t NFS_MaxLineSize = 128;
template<class T, int S> template<class T, int S>
struct padded_base : T { struct padded_base : T {
char pad[NFS_MaxLineSize - sizeof(T) % NFS_MaxLineSize]; char pad[NFS_MaxLineSize - sizeof(T) % NFS_MaxLineSize];
}; };
 End of changes. 1 change blocks. 
6 lines changed or deleted 0 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/