Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
queuing_rw_mutex.cpp
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
22#include "tbb/tbb_machine.h"
23#include "tbb/tbb_stddef.h"
24#include "tbb/tbb_machine.h"
25#include "itt_notify.h"
26
27
28namespace tbb {
29
30using namespace internal;
31
45};
46
47const unsigned char RELEASED = 0;
48const unsigned char ACQUIRED = 1;
49
51{
53}
54
56{
57 // Usually, we would use the test-test-and-set idiom here, with exponential backoff.
58 // But so far, experiments indicate there is no value in doing so here.
59 while( !try_acquire_internal_lock() ) {
60 __TBB_Pause(1);
61 }
62}
63
65{
66 __TBB_store_with_release(my_internal_lock,RELEASED);
67}
68
70{
71 spin_wait_until_eq(my_internal_lock, RELEASED);
72}
73
75 if( flag )
76 wait_for_release_of_internal_lock();
77 else
78 release_internal_lock();
79}
80
81#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
82 // Workaround for overzealous compiler warnings
83 #pragma warning (push)
84 #pragma warning (disable: 4311 4312)
85#endif
86
88template<typename T>
90public:
91 typedef typename atomic_selector<sizeof(T*)>::word word;
92
93 template<memory_semantics M>
94 static T* fetch_and_add( T* volatile * location, word addend ) {
95 return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) );
96 }
97 template<memory_semantics M>
98 static T* fetch_and_store( T* volatile * location, T* value ) {
99 return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) );
100 }
101 template<memory_semantics M>
102 static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) {
103 return reinterpret_cast<T*>(
104 atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value),
105 reinterpret_cast<word>(comparand))
106 );
107 }
108
109 T* & ref;
110 tricky_atomic_pointer( T*& original ) : ref(original) {};
111 tricky_atomic_pointer( T* volatile & original ) : ref(original) {};
112 T* operator&( word operand2 ) const {
113 return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 );
114 }
115 T* operator|( word operand2 ) const {
116 return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 );
117 }
118};
119
121
122#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
123 // Workaround for overzealous compiler warnings
124 #pragma warning (pop)
125#endif
126
128static const tricky_pointer::word FLAG = 0x1;
129
130inline
132 return uintptr_t(ptr) & FLAG;
133}
134
135//------------------------------------------------------------------------
136// Methods of queuing_rw_mutex::scoped_lock
137//------------------------------------------------------------------------
138
141{
142 __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex");
143
144 // Must set all fields before the fetch_and_store, because once the
145 // fetch_and_store executes, *this becomes accessible to other threads.
146 my_mutex = &m;
147 __TBB_store_relaxed(my_prev , (scoped_lock*)0);
148 __TBB_store_relaxed(my_next , (scoped_lock*)0);
149 __TBB_store_relaxed(my_going, 0);
150 my_state = state_t(write ? STATE_WRITER : STATE_READER);
151 my_internal_lock = RELEASED;
152
153 queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this);
154
155 if( write ) { // Acquiring for write
156
157 if( pred ) {
158 ITT_NOTIFY(sync_prepare, my_mutex);
159 pred = tricky_pointer(pred) & ~FLAG;
160 __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" );
161#if TBB_USE_ASSERT
162 __TBB_control_consistency_helper(); // on "m.q_tail"
163 __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!");
164#endif
166 spin_wait_until_eq(my_going, 1);
167 }
168
169 } else { // Acquiring for read
170#if DO_ITT_NOTIFY
171 bool sync_prepare_done = false;
172#endif
173 if( pred ) {
174 unsigned short pred_state;
175 __TBB_ASSERT( !__TBB_load_relaxed(my_prev), "the predecessor is already set" );
176 if( uintptr_t(pred) & FLAG ) {
177 /* this is only possible if pred is an upgrading reader and it signals us to wait */
178 pred_state = STATE_UPGRADE_WAITING;
179 pred = tricky_pointer(pred) & ~FLAG;
180 } else {
181 // Load pred->my_state now, because once pred->my_next becomes
182 // non-NULL, we must assume that *pred might be destroyed.
183 pred_state = pred->my_state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER);
184 }
185 __TBB_store_relaxed(my_prev, pred);
186 __TBB_ASSERT( !( uintptr_t(pred) & FLAG ), "use of corrupted pointer!" );
187#if TBB_USE_ASSERT
188 __TBB_control_consistency_helper(); // on "m.q_tail"
189 __TBB_ASSERT( !__TBB_load_relaxed(pred->my_next), "the predecessor has another successor!");
190#endif
192 if( pred_state != STATE_ACTIVEREADER ) {
193#if DO_ITT_NOTIFY
194 sync_prepare_done = true;
195 ITT_NOTIFY(sync_prepare, my_mutex);
196#endif
197 spin_wait_until_eq(my_going, 1);
198 }
199 }
200
201 // The protected state must have been acquired here before it can be further released to any other reader(s):
202 unsigned short old_state = my_state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER);
203 if( old_state!=STATE_READER ) {
204#if DO_ITT_NOTIFY
205 if( !sync_prepare_done )
206 ITT_NOTIFY(sync_prepare, my_mutex);
207#endif
208 // Failed to become active reader -> need to unblock the next waiting reader first
209 __TBB_ASSERT( my_state==STATE_READER_UNBLOCKNEXT, "unexpected state" );
210 spin_wait_while_eq(my_next, (scoped_lock*)NULL);
211 /* my_state should be changed before unblocking the next otherwise it might finish
212 and another thread can get our old state and left blocked */
213 my_state = STATE_ACTIVEREADER;
214 __TBB_store_with_release(my_next->my_going,1);
215 }
216 }
217
218 ITT_NOTIFY(sync_acquired, my_mutex);
219
220 // Force acquire so that user's critical section receives correct values
221 // from processor that was previously in the user's critical section.
222 __TBB_load_with_acquire(my_going);
223}
224
227{
228 __TBB_ASSERT( !my_mutex, "scoped_lock is already holding a mutex");
229
230 if( load<relaxed>(m.q_tail) )
231 return false; // Someone already took the lock
232
233 // Must set all fields before the fetch_and_store, because once the
234 // fetch_and_store executes, *this becomes accessible to other threads.
235 __TBB_store_relaxed(my_prev, (scoped_lock*)0);
236 __TBB_store_relaxed(my_next, (scoped_lock*)0);
237 __TBB_store_relaxed(my_going, 0); // TODO: remove dead assignment?
238 my_state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER);
239 my_internal_lock = RELEASED;
240
241 // The CAS must have release semantics, because we are
242 // "sending" the fields initialized above to other processors.
243 if( m.q_tail.compare_and_swap<tbb::release>(this, NULL) )
244 return false; // Someone already took the lock
245 // Force acquire so that user's critical section receives correct values
246 // from processor that was previously in the user's critical section.
247 __TBB_load_with_acquire(my_going);
248 my_mutex = &m;
249 ITT_NOTIFY(sync_acquired, my_mutex);
250 return true;
251}
252
255{
256 __TBB_ASSERT(my_mutex!=NULL, "no lock acquired");
257
258 ITT_NOTIFY(sync_releasing, my_mutex);
259
260 if( my_state == STATE_WRITER ) { // Acquired for write
261
262 // The logic below is the same as "writerUnlock", but elides
263 // "return" from the middle of the routine.
264 // In the statement below, acquire semantics of reading my_next is required
265 // so that following operations with fields of my_next are safe.
267 if( !n ) {
268 if( this == my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
269 // this was the only item in the queue, and the queue is now empty.
270 goto done;
271 }
272 spin_wait_while_eq( my_next, (scoped_lock*)NULL );
273 n = __TBB_load_with_acquire(my_next);
274 }
275 __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early
277 // the next waiting for upgrade means this writer was upgraded before.
278 acquire_internal_lock();
279 queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);
282 unblock_or_wait_on_internal_lock(get_flag(tmp));
283 } else {
284 __TBB_ASSERT( my_state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" );
285 __TBB_ASSERT( !( uintptr_t(__TBB_load_relaxed(n->my_prev)) & FLAG ), "use of corrupted pointer!" );
288 }
289
290 } else { // Acquired for read
291
293retry:
294 // Addition to the original paper: Mark my_prev as in use
295 queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);
296
297 if( pred ) {
298 if( !(pred->try_acquire_internal_lock()) )
299 {
300 // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades.
301 // In the second case, it could or could not know my "in use" flag - need to check
302 tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred) | FLAG );
303 if( !(uintptr_t(tmp) & FLAG) ) {
304 // Wait for the predecessor to change my_prev (e.g. during unlink)
305 spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );
306 // Now owner of pred is waiting for _us_ to release its lock
307 pred->release_internal_lock();
308 }
309 // else the "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do
310
311 tmp = NULL;
312 goto retry;
313 }
314 __TBB_ASSERT(pred && pred->my_internal_lock==ACQUIRED, "predecessor's lock is not acquired");
315 __TBB_store_relaxed(my_prev, pred);
316 acquire_internal_lock();
317
318 __TBB_store_with_release(pred->my_next,static_cast<scoped_lock *>(NULL));
319
320 if( !__TBB_load_relaxed(my_next) && this != my_mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) {
321 spin_wait_while_eq( my_next, (void*)NULL );
322 }
323 __TBB_ASSERT( !get_flag(__TBB_load_relaxed(my_next)), "use of corrupted pointer" );
324
325 // ensure acquire semantics of reading 'my_next'
326 if( scoped_lock *const l_next = __TBB_load_with_acquire(my_next) ) { // I->next != nil, TODO: rename to n after clearing up and adapting the n in the comment two lines below
327 // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0
328 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(l_next->my_prev), pred);
329 // I->prev->next = I->next;
330 __TBB_ASSERT(__TBB_load_relaxed(my_prev)==pred, NULL);
331 __TBB_store_with_release(pred->my_next, my_next);
332 }
333 // Safe to release in the order opposite to acquiring which makes the code simpler
334 pred->release_internal_lock();
335
336 } else { // No predecessor when we looked
337 acquire_internal_lock(); // "exclusiveLock(&I->EL)"
339 if( !n ) {
340 if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) {
341 spin_wait_while_eq( my_next, (scoped_lock*)NULL );
342 n = __TBB_load_relaxed(my_next);
343 } else {
344 goto unlock_self;
345 }
346 }
347 __TBB_store_relaxed(n->my_going, 2); // protect next queue node from being destroyed too early
348 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), NULL);
350 }
351unlock_self:
352 unblock_or_wait_on_internal_lock(get_flag(tmp));
353 }
354done:
355 spin_wait_while_eq( my_going, 2 );
356
357 initialize();
358}
359
361{
362 if ( my_state == STATE_ACTIVEREADER ) return true; // Already a reader
363
364 ITT_NOTIFY(sync_releasing, my_mutex);
365 my_state = STATE_READER;
366 if( ! __TBB_load_relaxed(my_next) ) {
367 // the following load of q_tail must not be reordered with setting STATE_READER above
368 if( this==my_mutex->q_tail.load<full_fence>() ) {
369 unsigned short old_state = my_state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER);
370 if( old_state==STATE_READER )
371 return true; // Downgrade completed
372 }
373 /* wait for the next to register */
374 spin_wait_while_eq( my_next, (void*)NULL );
375 }
376 scoped_lock *const n = __TBB_load_with_acquire(my_next);
377 __TBB_ASSERT( n, "still no successor at this point!" );
380 else if( n->my_state==STATE_UPGRADE_WAITING )
381 // the next waiting for upgrade means this writer was upgraded before.
383 my_state = STATE_ACTIVEREADER;
384 return true;
385}
386
388{
389 if ( my_state == STATE_WRITER ) return true; // Already a writer
390
393
394 ITT_NOTIFY(sync_releasing, my_mutex);
395 my_state = STATE_UPGRADE_REQUESTED;
396requested:
397 __TBB_ASSERT( !(uintptr_t(__TBB_load_relaxed(my_next)) & FLAG), "use of corrupted pointer!" );
398 acquire_internal_lock();
399 if( this != my_mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) {
400 spin_wait_while_eq( my_next, (void*)NULL );
402 n = tricky_pointer::fetch_and_add<tbb::acquire>(&my_next, FLAG);
403 unsigned short n_state = n->my_state;
404 /* the next reader can be blocked by our state. the best thing to do is to unblock it */
405 if( n_state & STATE_COMBINED_WAITINGREADER )
407 tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->my_prev), this);
408 unblock_or_wait_on_internal_lock(get_flag(tmp));
410 // save n|FLAG for simplicity of following comparisons
411 tmp = tricky_pointer(n)|FLAG;
412 for( atomic_backoff b; __TBB_load_relaxed(my_next)==tmp; b.pause() ) {
413 if( my_state & STATE_COMBINED_UPGRADING ) {
414 if( __TBB_load_with_acquire(my_next)==tmp )
415 __TBB_store_relaxed(my_next, n);
416 goto waiting;
417 }
418 }
419 __TBB_ASSERT(__TBB_load_relaxed(my_next) != (tricky_pointer(n)|FLAG), NULL);
420 goto requested;
421 } else {
422 __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state");
423 __TBB_ASSERT( (tricky_pointer(n)|FLAG) == __TBB_load_relaxed(my_next), NULL);
424 __TBB_store_relaxed(my_next, n);
425 }
426 } else {
427 /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */
428 release_internal_lock();
429 } // if( this != my_mutex->q_tail... )
431
432waiting:
433 __TBB_ASSERT( !( intptr_t(__TBB_load_relaxed(my_next)) & FLAG ), "use of corrupted pointer!" );
434 __TBB_ASSERT( my_state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" );
435 __TBB_ASSERT( me==this, NULL );
436 ITT_NOTIFY(sync_prepare, my_mutex);
437 /* if no one was blocked by the "corrupted" q_tail, turn it back */
438 my_mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG );
440 pred = tricky_pointer::fetch_and_add<tbb::acquire>(&my_prev, FLAG);
441 if( pred ) {
442 bool success = pred->try_acquire_internal_lock();
443 pred->my_state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED);
444 if( !success ) {
445 tmp = tricky_pointer::compare_and_swap<tbb::release>(&my_prev, pred, tricky_pointer(pred)|FLAG );
446 if( uintptr_t(tmp) & FLAG ) {
447 spin_wait_while_eq(my_prev, pred);
448 pred = __TBB_load_relaxed(my_prev);
449 } else {
450 spin_wait_while_eq( my_prev, tricky_pointer(pred)|FLAG );
451 pred->release_internal_lock();
452 }
453 } else {
454 __TBB_store_relaxed(my_prev, pred);
455 pred->release_internal_lock();
456 spin_wait_while_eq(my_prev, pred);
457 pred = __TBB_load_relaxed(my_prev);
458 }
459 if( pred )
460 goto waiting;
461 } else {
462 // restore the corrupted my_prev field for possible further use (e.g. if downgrade back to reader)
463 __TBB_store_relaxed(my_prev, pred);
464 }
465 __TBB_ASSERT( !pred && !__TBB_load_relaxed(my_prev), NULL );
466
467 // additional lifetime issue prevention checks
468 // wait for the successor to finish working with my fields
469 wait_for_release_of_internal_lock();
470 // now wait for the predecessor to finish working with my fields
471 spin_wait_while_eq( my_going, 2 );
472
473 // Acquire critical section indirectly from previous owner or directly from predecessor (TODO: not clear).
474 __TBB_control_consistency_helper(); // on either "my_mutex->q_tail" or "my_going" (TODO: not clear)
475
476 bool result = ( my_state != STATE_UPGRADE_LOSER );
477 my_state = STATE_WRITER;
478 __TBB_store_relaxed(my_going, 1);
479
480 ITT_NOTIFY(sync_acquired, my_mutex);
481 return result;
482}
483
485 ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T(""));
486}
487
488} // namespace tbb
void __TBB_Pause(int32_t)
Definition: tbb_machine.h:331
#define __TBB_control_consistency_helper()
Definition: gcc_generic.h:60
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
#define _T(string_literal)
Standard Windows style macro to markup the string literals.
Definition: itt_notify.h:59
#define ITT_SYNC_CREATE(obj, type, name)
Definition: itt_notify.h:115
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:112
The graph class.
const unsigned char ACQUIRED
static const tricky_pointer::word FLAG
Mask for low order bit of a pointer.
uintptr_t get_flag(queuing_rw_mutex::scoped_lock *ptr)
tricky_atomic_pointer< queuing_rw_mutex::scoped_lock > tricky_pointer
state_t_flags
Flag bits in a state_t that specify information about a locking request.
@ STATE_COMBINED_READER
@ STATE_UPGRADE_WAITING
@ STATE_UPGRADE_REQUESTED
@ STATE_COMBINED_WAITINGREADER
@ STATE_ACTIVEREADER
@ STATE_COMBINED_UPGRADING
@ STATE_READER_UNBLOCKNEXT
@ STATE_UPGRADE_LOSER
@ release
Release.
Definition: atomic.h:59
@ full_fence
Sequential consistency.
Definition: atomic.h:55
@ acquire
Acquire.
Definition: atomic.h:57
const unsigned char RELEASED
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:739
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:399
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:735
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
Queuing reader-writer mutex with local-only spinning.
void __TBB_EXPORTED_METHOD internal_construct()
atomic< scoped_lock * > q_tail
The last competitor requesting the lock.
The scoped locking pattern.
void acquire_internal_lock()
Acquire the internal lock.
bool try_acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex if free (i.e. non-blocking)
bool try_acquire_internal_lock()
Try to acquire the internal lock.
scoped_lock *__TBB_atomic *__TBB_atomic my_next
void wait_for_release_of_internal_lock()
Wait for internal lock to be released.
atomic< state_t > my_state
State of the request: reader, writer, active reader, other service states.
unsigned char my_internal_lock
A tiny internal lock.
void acquire(queuing_rw_mutex &m, bool write=true)
Acquire lock on given mutex.
void unblock_or_wait_on_internal_lock(uintptr_t)
A helper function.
void release_internal_lock()
Release the internal lock.
unsigned char __TBB_atomic my_going
The local spin-wait variable.
bool upgrade_to_writer()
Upgrade reader to become a writer.
scoped_lock *__TBB_atomic my_prev
The pointer to the previous and next competitors for a mutex.
bool downgrade_to_reader()
Downgrade writer to become a reader.
Class that implements exponential backoff.
Definition: tbb_machine.h:345
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
A view of a T* with additional functionality for twiddling low-order bits.
tricky_atomic_pointer(T *volatile &original)
static T * compare_and_swap(T *volatile *location, T *value, T *comparand)
T * operator|(word operand2) const
static T * fetch_and_add(T *volatile *location, word addend)
atomic_selector< sizeof(T *)>::word word
static T * fetch_and_store(T *volatile *location, T *value)
T * operator&(word operand2) const

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.