| atomic.h | | atomic.h | |
| | | | |
| skipping to change at line 424 | | skipping to change at line 424 | |
| T operator=( T rhs ) { | | T operator=( T rhs ) { | |
| // "this" required here in strict ISO C++ because store_with_releas
e is a dependent name | | // "this" required here in strict ISO C++ because store_with_releas
e is a dependent name | |
| return this->store_with_release(rhs); | | return this->store_with_release(rhs); | |
| } | | } | |
| atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(
rhs); return *this;} | | atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(
rhs); return *this;} | |
| }; | | }; | |
| | | | |
| #if __TBB_ATOMIC_CTORS | | #if __TBB_ATOMIC_CTORS | |
| #define __TBB_DECL_ATOMIC(T)
\ | | #define __TBB_DECL_ATOMIC(T)
\ | |
| template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<
T,T,char> { \ | | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<
T,T,char> { \ | |
|
| atomic() = default;
\ | | atomic() = default;
\ | |
| constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<
T,T,char>(arg) {} \ | | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<
T,T,char>(arg) {} \ | |
|
\ | |
\ | |
| T operator=( T rhs ) {return store_with_release(rhs);}
\ | | T operator=( T rhs ) {return store_with_release(rhs);}
\ | |
| atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas
e(rhs); return *this;} \ | | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas
e(rhs); return *this;} \ | |
| }; | | }; | |
| #else | | #else | |
| #define __TBB_DECL_ATOMIC(T)
\ | | #define __TBB_DECL_ATOMIC(T)
\ | |
| template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<
T,T,char> { \ | | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<
T,T,char> { \ | |
| T operator=( T rhs ) {return store_with_release(rhs);}
\ | | T operator=( T rhs ) {return store_with_release(rhs);}
\ | |
| atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas
e(rhs); return *this;} \ | | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_releas
e(rhs); return *this;} \ | |
| | | | |
| skipping to change at line 457 | | skipping to change at line 457 | |
| | | | |
| #if _MSC_VER && !_WIN64 | | #if _MSC_VER && !_WIN64 | |
| #if __TBB_ATOMIC_CTORS | | #if __TBB_ATOMIC_CTORS | |
| /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings fro
m cl /Wp64 option. | | /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings fro
m cl /Wp64 option. | |
| It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces o
perator=(T) | | It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces o
perator=(T) | |
| with an operator=(U) that explicitly converts the U to a T. Types T and
U should be | | with an operator=(U) that explicitly converts the U to a T. Types T and
U should be | |
| type synonyms on the platform. Type U should be the wider variant of T
from the | | type synonyms on the platform. Type U should be the wider variant of T
from the | |
| perspective of /Wp64. */ | | perspective of /Wp64. */ | |
| #define __TBB_DECL_ATOMIC_ALT(T,U) \ | | #define __TBB_DECL_ATOMIC_ALT(T,U) \ | |
| template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,
char> { \ | | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,
char> { \ | |
|
| atomic() = default ;
\ | | atomic() = default ;
\ | |
| constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,
char>(arg) {} \ | | constexpr atomic(T arg): internal::atomic_impl_with_arithmetic<T,T,
char>(arg) {} \ | |
| T operator=( U rhs ) {return store_with_release(T(rhs));}
\ | | T operator=( U rhs ) {return store_with_release(T(rhs));}
\ | |
| atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh
s); return *this;} \ | | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh
s); return *this;} \ | |
| }; | | }; | |
| #else | | #else | |
| #define __TBB_DECL_ATOMIC_ALT(T,U) \ | | #define __TBB_DECL_ATOMIC_ALT(T,U) \ | |
| template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,
char> { \ | | template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,
char> { \ | |
| T operator=( U rhs ) {return store_with_release(T(rhs));}
\ | | T operator=( U rhs ) {return store_with_release(T(rhs));}
\ | |
| atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh
s); return *this;} \ | | atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rh
s); return *this;} \ | |
| }; | | }; | |
| | | | |
End of changes. 2 change blocks. |
| 2 lines changed or deleted | | 2 lines changed or added | |
|
| task.h | | task.h | |
| | | | |
| skipping to change at line 374 | | skipping to change at line 374 | |
| __itt_caller itt_caller; | | __itt_caller itt_caller; | |
| | | | |
| //! Leading padding protecting accesses to frequently used members from
false sharing. | | //! Leading padding protecting accesses to frequently used members from
false sharing. | |
| /** Read accesses to the field my_cancellation_requested are on the hot
path inside | | /** Read accesses to the field my_cancellation_requested are on the hot
path inside | |
| the scheduler. This padding ensures that this field never shares th
e same cache | | the scheduler. This padding ensures that this field never shares th
e same cache | |
| line with a local variable that is frequently written to. **/ | | line with a local variable that is frequently written to. **/ | |
| char _leading_padding[internal::NFS_MaxLineSize | | char _leading_padding[internal::NFS_MaxLineSize | |
| - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(i
nternal::context_list_node_t) | | - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(i
nternal::context_list_node_t) | |
| - sizeof(__itt_caller)]; | | - sizeof(__itt_caller)]; | |
| | | | |
|
| //! Specifies whether cancellation was request for this task group. | | //! Specifies whether cancellation was requested for this task group. | |
| uintptr_t my_cancellation_requested; | | uintptr_t my_cancellation_requested; | |
| | | | |
| //! Version for run-time checks and behavioral traits of the context. | | //! Version for run-time checks and behavioral traits of the context. | |
| /** Version occupies low 16 bits, and traits (zero or more ORed enumera
tors | | /** Version occupies low 16 bits, and traits (zero or more ORed enumera
tors | |
| from the traits_type enumerations) take the next 16 bits. | | from the traits_type enumerations) take the next 16 bits. | |
| Original (zeroth) version of the context did not support any traits
. **/ | | Original (zeroth) version of the context did not support any traits
. **/ | |
|
| uintptr_t my_version_and_traits; | | uintptr_t my_version_and_traits; | |
| | | | |
| //! Pointer to the container storing exception being propagated across
this task group. | | //! Pointer to the container storing exception being propagated across
this task group. | |
| exception_container_type *my_exception; | | exception_container_type *my_exception; | |
| | | | |
| //! Scheduler instance that registered this context in its thread speci
fic list. | | //! Scheduler instance that registered this context in its thread speci
fic list. | |
| internal::generic_scheduler *my_owner; | | internal::generic_scheduler *my_owner; | |
| | | | |
| //! Internal state (combination of state flags). | | //! Internal state (combination of state flags). | |
| uintptr_t my_state; | | uintptr_t my_state; | |
| | | | |
| | | | |
End of changes. 2 change blocks. |
| 2 lines changed or deleted | | 2 lines changed or added | |
|
| tbb_machine.h | | tbb_machine.h | |
| | | | |
| skipping to change at line 216 | | skipping to change at line 216 | |
| #include "machine/linux_ia32.h" | | #include "machine/linux_ia32.h" | |
| #endif | | #endif | |
| #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | | #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT) | |
| #include "machine/icc_generic.h" | | #include "machine/icc_generic.h" | |
| #elif defined(_M_IX86) | | #elif defined(_M_IX86) | |
| #include "machine/windows_ia32.h" | | #include "machine/windows_ia32.h" | |
| #elif defined(_M_X64) | | #elif defined(_M_X64) | |
| #include "machine/windows_intel64.h" | | #include "machine/windows_intel64.h" | |
| #elif _XBOX | | #elif _XBOX | |
| #include "machine/xbox360_ppc.h" | | #include "machine/xbox360_ppc.h" | |
|
| | | #elif _M_ARM | |
| | | #include "machine/msvc_armv7.h" | |
| #endif | | #endif | |
| | | | |
| #ifdef _MANAGED | | #ifdef _MANAGED | |
| #pragma managed(pop) | | #pragma managed(pop) | |
| #endif | | #endif | |
| | | | |
| #elif __TBB_DEFINE_MIC | | #elif __TBB_DEFINE_MIC | |
| | | | |
| #include "machine/mic_common.h" | | #include "machine/mic_common.h" | |
| //TODO: check if ICC atomic intrinsics are available for MIC | | //TODO: check if ICC atomic intrinsics are available for MIC | |
| | | | |
| skipping to change at line 451 | | skipping to change at line 453 | |
| const uint32_t big_comparand = surroundings | shifted_comparand ; | | const uint32_t big_comparand = surroundings | shifted_comparand ; | |
| const uint32_t big_value = surroundings | shifted_value ; | | const uint32_t big_value = surroundings | shifted_value ; | |
| // __TBB_machine_cmpswp4 presumed to have full fence. | | // __TBB_machine_cmpswp4 presumed to have full fence. | |
| // Cast shuts up /Wp64 warning | | // Cast shuts up /Wp64 warning | |
| const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligne
d_ptr, big_value, big_comparand ); | | const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligne
d_ptr, big_value, big_comparand ); | |
| if( big_result == big_comparand // CAS succeeded | | if( big_result == big_comparand // CAS succeeded | |
| || ((big_result ^ big_comparand) & mask) != 0) // CAS failed an
d the bits of interest have changed | | || ((big_result ^ big_comparand) & mask) != 0) // CAS failed an
d the bits of interest have changed | |
| { | | { | |
| return T((big_result & mask) >> bits_to_shift); | | return T((big_result & mask) >> bits_to_shift); | |
| } | | } | |
|
| else continue; // CAS failed bu
t the bits of interest left unchanged | | else continue; // CAS failed bu
t the bits of interest were not changed | |
| } | | } | |
| } | | } | |
| #endif //__TBB_BIG_ENDIAN!=-1 | | #endif //__TBB_BIG_ENDIAN!=-1 | |
| template<size_t S, typename T> | | template<size_t S, typename T> | |
| inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T compar
and ); | | inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T compar
and ); | |
| | | | |
| template<> | | template<> | |
| inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr,
uint8_t value, uint8_t comparand ) { | | inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr,
uint8_t value, uint8_t comparand ) { | |
| #if __TBB_USE_GENERIC_PART_WORD_CAS | | #if __TBB_USE_GENERIC_PART_WORD_CAS | |
| return __TBB_MaskedCompareAndSwap<uint8_t>((volatile uint8_t *)ptr,valu
e,comparand); | | return __TBB_MaskedCompareAndSwap<uint8_t>((volatile uint8_t *)ptr,valu
e,comparand); | |
| | | | |
| skipping to change at line 582 | | skipping to change at line 584 | |
| #if ! __TBB_USE_FENCED_ATOMICS | | #if ! __TBB_USE_FENCED_ATOMICS | |
| #undef __TBB_machine_cmpswp8full_fence | | #undef __TBB_machine_cmpswp8full_fence | |
| #endif | | #endif | |
| | | | |
| #define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8ful
l_fence | | #define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8ful
l_fence | |
| #define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full
_fence | | #define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full
_fence | |
| #endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */ | | #endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */ | |
| | | | |
| #if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE | | #if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE | |
| /** Fenced operations use volatile qualifier to prevent compiler from optim
izing | | /** Fenced operations use volatile qualifier to prevent compiler from optim
izing | |
|
| them out, and on on architectures with weak memory ordering to induce c
ompiler | | them out, and on architectures with weak memory ordering to induce comp
iler | |
| to generate code with appropriate acquire/release semantics. | | to generate code with appropriate acquire/release semantics. | |
|
| On architectures like IA32, Intel64 (and likely and Sparc TSO) volatile
has | | On architectures like IA32, Intel64 (and likely Sparc TSO) volatile has | |
| no effect on code gen, and consistency helpers serve as a compiler fenc
e (the | | no effect on code gen, and consistency helpers serve as a compiler fenc
e (the | |
|
| latter being true for IA64/gcc as well to fix a bug in some gcc version | | latter being true for IA64/gcc as well to fix a bug in some gcc version | |
| s). **/ | | s). | |
| | | This code assumes that the generated instructions will operate atomical | |
| | | ly, | |
| | | which typically requires a type that can be moved in a single instructi | |
| | | on, | |
| | | cooperation from the compiler for effective use of such an instruction, | |
| | | and appropriate alignment of the data. **/ | |
| template <typename T, size_t S> | | template <typename T, size_t S> | |
| struct machine_load_store { | | struct machine_load_store { | |
| static T load_with_acquire ( const volatile T& location ) { | | static T load_with_acquire ( const volatile T& location ) { | |
| T to_return = location; | | T to_return = location; | |
| __TBB_acquire_consistency_helper(); | | __TBB_acquire_consistency_helper(); | |
| return to_return; | | return to_return; | |
| } | | } | |
| static void store_with_release ( volatile T &location, T value ) { | | static void store_with_release ( volatile T &location, T value ) { | |
| __TBB_release_consistency_helper(); | | __TBB_release_consistency_helper(); | |
| location = value; | | location = value; | |
| | | | |
| skipping to change at line 829 | | skipping to change at line 835 | |
| using tbb::internal::__TBB_store_with_release; | | using tbb::internal::__TBB_store_with_release; | |
| | | | |
| // Mapping historically used names to the ones expected by atomic_load_stor
e_traits | | // Mapping historically used names to the ones expected by atomic_load_stor
e_traits | |
| #define __TBB_load_acquire __TBB_load_with_acquire | | #define __TBB_load_acquire __TBB_load_with_acquire | |
| #define __TBB_store_release __TBB_store_with_release | | #define __TBB_store_release __TBB_store_with_release | |
| | | | |
| #ifndef __TBB_Log2 | | #ifndef __TBB_Log2 | |
| inline intptr_t __TBB_Log2( uintptr_t x ) { | | inline intptr_t __TBB_Log2( uintptr_t x ) { | |
| if( x==0 ) return -1; | | if( x==0 ) return -1; | |
| intptr_t result = 0; | | intptr_t result = 0; | |
|
| | | | |
| | | #ifndef _M_ARM | |
| uintptr_t tmp; | | uintptr_t tmp; | |
|
| | | if( sizeof(x)>4 && (tmp = ((uint64_t)x)>>32) ) { x=tmp; result += 32 | |
| | | ; } | |
| | | #endif | |
| | | if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; } | |
| | | if( uintptr_t tmp = x>>8 ) { x=tmp; result += 8; } | |
| | | if( uintptr_t tmp = x>>4 ) { x=tmp; result += 4; } | |
| | | if( uintptr_t tmp = x>>2 ) { x=tmp; result += 2; } | |
| | | | |
|
| if( sizeof(x)>4 && (tmp = ((uint64_t)x)>>32)) { x=tmp; result += 32; } | | | |
| if( (tmp = x>>16) ) { x=tmp; result += 16; } | | | |
| if( (tmp = x>>8) ) { x=tmp; result += 8; } | | | |
| if( (tmp = x>>4) ) { x=tmp; result += 4; } | | | |
| if( (tmp = x>>2) ) { x=tmp; result += 2; } | | | |
| return (x&2)? result+1: result; | | return (x&2)? result+1: result; | |
| } | | } | |
| #endif | | #endif | |
| | | | |
| #ifndef __TBB_AtomicOR | | #ifndef __TBB_AtomicOR | |
| inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { | | inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { | |
| tbb::internal::atomic_backoff b; | | tbb::internal::atomic_backoff b; | |
| for(;;) { | | for(;;) { | |
| uintptr_t tmp = *(volatile uintptr_t *)operand; | | uintptr_t tmp = *(volatile uintptr_t *)operand; | |
| uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); | | uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); | |
| | | | |
End of changes. 8 change blocks. |
| 10 lines changed or deleted | | 22 lines changed or added | |
|
| tbb_profiling.h | | tbb_profiling.h | |
| | | | |
| skipping to change at line 66 | | skipping to change at line 66 | |
| #else | | #else | |
| void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const cha
r* name ); | | void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const cha
r* name ); | |
| #endif | | #endif | |
| } // namespace internal | | } // namespace internal | |
| } // namespace tbb | | } // namespace tbb | |
| | | | |
| //! Macro __TBB_DEFINE_PROFILING_SET_NAME(T) defines "set_name" methods for
sync objects of type T | | //! Macro __TBB_DEFINE_PROFILING_SET_NAME(T) defines "set_name" methods for
sync objects of type T | |
| /** Should be used in the "tbb" namespace only. | | /** Should be used in the "tbb" namespace only. | |
| Don't place semicolon after it to avoid compiler warnings. **/ | | Don't place semicolon after it to avoid compiler warnings. **/ | |
| #if _WIN32||_WIN64 | | #if _WIN32||_WIN64 | |
|
| #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type) \ | | #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)
\ | |
| namespace profiling {
\ | | namespace profiling {
\ | |
| inline void set_name( sync_object_type& obj, const wchar_t* nam
e ) { \ | | inline void set_name( sync_object_type& obj, const wchar_t* nam
e ) { \ | |
| tbb::internal::itt_set_sync_name_v3( &obj, name );
\ | | tbb::internal::itt_set_sync_name_v3( &obj, name );
\ | |
| }
\ | | }
\ | |
| inline void set_name( sync_object_type& obj, const char* name )
{ \ | | inline void set_name( sync_object_type& obj, const char* name )
{ \ | |
| size_t len = tbb::internal::multibyte_to_widechar(NULL, nam
e, 0); \ | | size_t len = tbb::internal::multibyte_to_widechar(NULL, nam
e, 0); \ | |
| wchar_t *wname = new wchar_t[len];
\ | | wchar_t *wname = new wchar_t[len];
\ | |
| tbb::internal::multibyte_to_widechar(wname, name, len);
\ | | tbb::internal::multibyte_to_widechar(wname, name, len);
\ | |
| set_name( obj, wname );
\ | | set_name( obj, wname );
\ | |
| delete[] wname;
\ | | delete[] wname;
\ | |
| }
\ | | }
\ | |
| } | | } | |
| #else /* !WIN */ | | #else /* !WIN */ | |
|
| #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type) \ | | #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)
\ | |
| namespace profiling {
\ | | namespace profiling {
\ | |
| inline void set_name( sync_object_type& obj, const char* name )
{ \ | | inline void set_name( sync_object_type& obj, const char* name )
{ \ | |
| tbb::internal::itt_set_sync_name_v3( &obj, name );
\ | | tbb::internal::itt_set_sync_name_v3( &obj, name );
\ | |
| }
\ | | }
\ | |
| } | | } | |
| #endif /* !WIN */ | | #endif /* !WIN */ | |
| | | | |
| #else /* no tools support */ | | #else /* no tools support */ | |
| | | | |
| #if _WIN32||_WIN64 | | #if _WIN32||_WIN64 | |
|
| #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type) \ | | #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)
\ | |
| namespace profiling {
\ | | namespace profiling {
\ | |
| inline void set_name( sync_object_type&, const wchar_t* ) {}
\ | | inline void set_name( sync_object_type&, const wchar_t* ) {}
\ | |
| inline void set_name( sync_object_type&, const char* ) {}
\ | | inline void set_name( sync_object_type&, const char* ) {}
\ | |
| } | | } | |
| #else /* !WIN */ | | #else /* !WIN */ | |
|
| #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type) \ | | #define __TBB_DEFINE_PROFILING_SET_NAME(sync_object_type)
\ | |
| namespace profiling {
\ | | namespace profiling {
\ | |
| inline void set_name( sync_object_type&, const char* ) {}
\ | | inline void set_name( sync_object_type&, const char* ) {}
\ | |
| } | | } | |
| #endif /* !WIN */ | | #endif /* !WIN */ | |
| | | | |
| #endif /* no tools support */ | | #endif /* no tools support */ | |
| | | | |
| #include "atomic.h" | | #include "atomic.h" | |
| // Need these to work regardless of tools support | | // Need these to work regardless of tools support | |
| namespace tbb { | | namespace tbb { | |
| | | | |
End of changes. 4 change blocks. |
| 4 lines changed or deleted | | 4 lines changed or added | |
|
| tbb_stddef.h | | tbb_stddef.h | |
| | | | |
| skipping to change at line 307 | | skipping to change at line 307 | |
| inline void poison_pointer( T*& p ) { p = reinterpret_cast<T*>(poisoned_ptr
); } | | inline void poison_pointer( T*& p ) { p = reinterpret_cast<T*>(poisoned_ptr
); } | |
| | | | |
| /** Expected to be used in assertions only, thus no empty form is defined.
**/ | | /** Expected to be used in assertions only, thus no empty form is defined.
**/ | |
| template<typename T> | | template<typename T> | |
| inline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned
_ptr); } | | inline bool is_poisoned( T* p ) { return p == reinterpret_cast<T*>(poisoned
_ptr); } | |
| #else | | #else | |
| template<typename T> | | template<typename T> | |
| inline void poison_pointer( T* ) {/*do nothing*/} | | inline void poison_pointer( T* ) {/*do nothing*/} | |
| #endif /* !TBB_USE_ASSERT */ | | #endif /* !TBB_USE_ASSERT */ | |
| | | | |
|
| //! Cast pointer from U* to T. | | //! Cast between unrelated pointer types. | |
| /** This method should be used sparingly as a last resort for dealing with | | /** This method should be used sparingly as a last resort for dealing with | |
| situations that inherently break strict ISO C++ aliasing rules. */ | | situations that inherently break strict ISO C++ aliasing rules. */ | |
|
| | | // T is a pointer type because it will be explicitly provided by the progra | |
| | | mmer as a template argument; | |
| | | // U is a referent type to enable the compiler to check that "ptr" is a poi | |
| | | nter, deducing U in the process. | |
| template<typename T, typename U> | | template<typename T, typename U> | |
| inline T punned_cast( U* ptr ) { | | inline T punned_cast( U* ptr ) { | |
| uintptr_t x = reinterpret_cast<uintptr_t>(ptr); | | uintptr_t x = reinterpret_cast<uintptr_t>(ptr); | |
| return reinterpret_cast<T>(x); | | return reinterpret_cast<T>(x); | |
| } | | } | |
| | | | |
| //! Base class for types that should not be assigned. | | //! Base class for types that should not be assigned. | |
| class no_assign { | | class no_assign { | |
| // Deny assignment | | // Deny assignment | |
| void operator=( const no_assign& ); | | void operator=( const no_assign& ); | |
| | | | |
End of changes. 2 change blocks. |
| 1 lines changed or deleted | | 5 lines changed or added | |
|