QuickCppLib 0.10
Eliminate all the tedious hassle when making state-of-the-art C++ 14 - 23 libraries!
Loading...
Searching...
No Matches
quickcpplib::_xxx::configurable_spinlock::spinlockbase< T > Struct Template Reference

#include "spinlock.hpp"

Public Types

typedef T value_type
 

Public Member Functions

constexpr spinlockbase () noexcept
 
 spinlockbase (const spinlockbase &)=delete
 
constexpr spinlockbase (spinlockbase &&) noexcept
 Atomically move constructs.
 
 ~spinlockbase ()
 
spinlockbaseoperator= (const spinlockbase &)=delete
 
spinlockbaseoperator= (spinlockbase &&)=delete
 
constexpr T load (memory_order o=memory_order_seq_cst) const noexcept
 Returns the raw atomic.
 
void store (T a, memory_order o=memory_order_seq_cst) noexcept
 Sets the raw atomic.
 
bool try_lock () noexcept
 If atomic is zero, sets to 1 and returns true, else false.
 
constexpr bool try_lock () const noexcept
 
bool try_lock (T &expected) noexcept
 If atomic equals expected, sets to 1 and returns true, else false with expected updated to actual value.
 
void unlock () noexcept
 Sets the atomic to zero.
 
constexpr bool int_yield (size_t) noexcept
 

Protected Attributes

atomic< T > v
 

Member Typedef Documentation

◆ value_type

template<typename T >
typedef T quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::value_type

Constructor & Destructor Documentation

◆ spinlockbase() [1/3]

template<typename T >
constexpr quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::spinlockbase ( )
inlineconstexprnoexcept
152 : v(0)
153 {
155#if QUICKCPPLIB_IN_THREAD_SANITIZER
156 v.store(0, memory_order_release);
157#endif
158 }
#define QUICKCPPLIB_ANNOTATE_RWLOCK_CREATE(p)
Definition config.hpp:102
atomic< T > v
Definition spinlock.hpp:143

◆ spinlockbase() [2/3]

template<typename T >
quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::spinlockbase ( const spinlockbase< T > &  )
delete

◆ spinlockbase() [3/3]

template<typename T >
constexpr quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::spinlockbase ( spinlockbase< T > &&  )
inlineconstexprnoexcept

Atomically move constructs.

165 : v(0)
166 {
168// v.store(o.v.exchange(0, memory_order_acq_rel));
169#if QUICKCPPLIB_IN_THREAD_SANITIZER
170 v.store(0, memory_order_release);
171#endif
172 }

◆ ~spinlockbase()

174 {
175#ifdef QUICKCPPLIB_ENABLE_VALGRIND
176 if(v.load(memory_order_acquire))
177 {
179 }
180#endif
182 }
#define QUICKCPPLIB_ANNOTATE_RWLOCK_DESTROY(p)
Definition config.hpp:103
#define QUICKCPPLIB_ANNOTATE_RWLOCK_RELEASED(p, s)
Definition config.hpp:105

Member Function Documentation

◆ operator=() [1/2]

template<typename T >
spinlockbase & quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::operator= ( const spinlockbase< T > &  )
delete

◆ operator=() [2/2]

template<typename T >
spinlockbase & quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::operator= ( spinlockbase< T > &&  )
delete

◆ load()

template<typename T >
constexpr T quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::load ( memory_order  o = memory_order_seq_cst) const
inlineconstexprnoexcept

Returns the raw atomic.

186{ return v.load(o); }

◆ store()

template<typename T >
void quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::store ( a,
memory_order  o = memory_order_seq_cst 
)
inlinenoexcept

Sets the raw atomic.

188{ v.store(a, o); }

◆ try_lock() [1/3]

template<typename T >
bool quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::try_lock ( )
inlinenoexcept

If atomic is zero, sets to 1 and returns true, else false.

191 {
192#if !QUICKCPPLIB_IN_THREAD_SANITIZER // no early outs for the sanitizer
193#ifdef QUICKCPPLIB_USE_VOLATILE_READ_FOR_AVOIDING_CMPXCHG
194 // MSVC's atomics always seq_cst, so use volatile read to create a true acquire
195 volatile T *_v = (volatile T *) &v;
196 if(*_v) // Avoid unnecessary cache line invalidation traffic
197 return false;
198#else
199 if(v.load(memory_order_relaxed)) // Avoid unnecessary cache line invalidation traffic
200 return false;
201#endif
202#endif
203#if 0 /* Disabled as CMPXCHG seems to have sped up on recent Intel */ // defined(__i386__) || defined(_M_IX86) ||
204 // defined(__x86_64__) || defined(_M_X64)
205 // Intel is a lot quicker if you use XCHG instead of CMPXCHG. ARM is definitely not!
206 T ret = v.exchange(1, memory_order_acquire);
207 if(!ret)
208#else
209 T expected = 0;
210 bool ret = v.compare_exchange_weak(expected, 1, memory_order_acquire, memory_order_relaxed);
211 if(ret)
212#endif
213 {
215 return true;
216 }
217 else return false;
218 }
#define QUICKCPPLIB_ANNOTATE_RWLOCK_ACQUIRED(p, s)
Definition config.hpp:104

◆ try_lock() [2/3]

template<typename T >
constexpr bool quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::try_lock ( ) const
inlineconstexprnoexcept
220 {
221 return v.load(memory_order_consume) ? false : true; // Avoid unnecessary cache line invalidation traffic
222 }

◆ try_lock() [3/3]

template<typename T >
bool quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::try_lock ( T &  expected)
inlinenoexcept

If atomic equals expected, sets to 1 and returns true, else false with expected updated to actual value.

225 {
226 T t(0);
227#if !QUICKCPPLIB_IN_THREAD_SANITIZER // no early outs for the sanitizer
228#ifdef QUICKCPPLIB_USE_VOLATILE_READ_FOR_AVOIDING_CMPXCHG
229 // MSVC's atomics always seq_cst, so use volatile read to create a true acquire
230 volatile T *_v = (volatile T *) &v;
231 if((t = *_v)) // Avoid unnecessary cache line invalidation traffic
232#else
233 t = v.load(memory_order_relaxed);
234 if(t) // Avoid unnecessary cache line invalidation traffic
235#endif
236 {
237 expected = t;
238 return false;
239 }
240#endif
241 bool ret = v.compare_exchange_weak(expected, 1, memory_order_acquire, memory_order_relaxed);
242 if(ret)
243 {
245 return true;
246 }
247 else
248 return false;
249 }

◆ unlock()

template<typename T >
void quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::unlock ( )
inlinenoexcept

Sets the atomic to zero.

252 {
253 // assert(v == 1);
255 v.store(0, memory_order_release);
256 }

◆ int_yield()

template<typename T >
constexpr bool quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::int_yield ( size_t  )
inlineconstexprnoexcept
257{ return false; }

Member Data Documentation

◆ v

template<typename T >
atomic<T> quickcpplib::_xxx::configurable_spinlock::spinlockbase< T >::v
protected

The documentation for this struct was generated from the following file: