|
|
@@ -76,7 +76,11 @@ namespace bx
|
|
|
#endif // BX_COMPILER
|
|
|
}
|
|
|
|
|
|
- inline int32_t atomicFetchAndAdd(volatile int32_t* _ptr, int32_t _add)
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicFetchAndAdd(volatile Ty* _ptr, Ty _value);
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline int32_t atomicFetchAndAdd<int32_t>(volatile int32_t* _ptr, int32_t _add)
|
|
|
{
|
|
|
#if BX_COMPILER_MSVC
|
|
|
return _InterlockedExchangeAdd( (volatile long*)_ptr, _add);
|
|
|
@@ -85,7 +89,43 @@ namespace bx
|
|
|
#endif // BX_COMPILER_
|
|
|
}
|
|
|
|
|
|
- inline int32_t atomicAddAndFetch(volatile int32_t* _ptr, int32_t _add)
|
|
|
+ template<>
|
|
|
+ inline int64_t atomicFetchAndAdd<int64_t>(volatile int64_t* _ptr, int64_t _add)
|
|
|
+ {
|
|
|
+#if BX_COMPILER_MSVC
|
|
|
+ return _InterlockedExchangeAdd64( (volatile int64_t*)_ptr, _add);
|
|
|
+#else
|
|
|
+ return __sync_fetch_and_add(_ptr, _add);
|
|
|
+#endif // BX_COMPILER_
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint32_t atomicFetchAndAdd<uint32_t>(volatile uint32_t* _ptr, uint32_t _add)
|
|
|
+ {
|
|
|
+ return uint32_t(atomicFetchAndAdd<int32_t>( (volatile int32_t*)_ptr, int32_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint64_t atomicFetchAndAdd<uint64_t>(volatile uint64_t* _ptr, uint64_t _add)
|
|
|
+ {
|
|
|
+ return uint64_t(atomicFetchAndAdd<int64_t>( (volatile int64_t*)_ptr, int64_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicAddAndFetch(volatile Ty* _ptr, Ty _value);
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline int32_t atomicAddAndFetch<int32_t>(volatile int32_t* _ptr, int32_t _add)
|
|
|
+ {
|
|
|
+#if BX_COMPILER_MSVC
|
|
|
+ return atomicFetchAndAdd(_ptr, _add) + _add;
|
|
|
+#else
|
|
|
+ return __sync_add_and_fetch(_ptr, _add);
|
|
|
+#endif // BX_COMPILER_
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline int64_t atomicAddAndFetch<int64_t>(volatile int64_t* _ptr, int64_t _add)
|
|
|
{
|
|
|
#if BX_COMPILER_MSVC
|
|
|
return atomicFetchAndAdd(_ptr, _add) + _add;
|
|
|
@@ -94,7 +134,23 @@ namespace bx
|
|
|
#endif // BX_COMPILER_
|
|
|
}
|
|
|
|
|
|
- inline int32_t atomicFetchAndSub(volatile int32_t* _ptr, int32_t _sub)
|
|
|
+ template<>
|
|
|
+ inline uint32_t atomicAddAndFetch<uint32_t>(volatile uint32_t* _ptr, uint32_t _add)
|
|
|
+ {
|
|
|
+ return uint32_t(atomicAddAndFetch<int32_t>( (volatile int32_t*)_ptr, int32_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint64_t atomicAddAndFetch<uint64_t>(volatile uint64_t* _ptr, uint64_t _add)
|
|
|
+ {
|
|
|
+ return uint64_t(atomicAddAndFetch<int64_t>( (volatile int64_t*)_ptr, int64_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicFetchAndSub(volatile Ty* _ptr, Ty _value);
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline int32_t atomicFetchAndSub<int32_t>(volatile int32_t* _ptr, int32_t _sub)
|
|
|
{
|
|
|
#if BX_COMPILER_MSVC
|
|
|
return atomicFetchAndAdd(_ptr, -_sub);
|
|
|
@@ -103,7 +159,33 @@ namespace bx
|
|
|
#endif // BX_COMPILER_
|
|
|
}
|
|
|
|
|
|
- inline int32_t atomicSubAndFetch(volatile int32_t* _ptr, int32_t _sub)
|
|
|
+ template<>
|
|
|
+ inline int64_t atomicFetchAndSub<int64_t>(volatile int64_t* _ptr, int64_t _sub)
|
|
|
+ {
|
|
|
+#if BX_COMPILER_MSVC
|
|
|
+ return atomicFetchAndAdd(_ptr, -_sub);
|
|
|
+#else
|
|
|
+ return __sync_fetch_and_sub(_ptr, _sub);
|
|
|
+#endif // BX_COMPILER_
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint32_t atomicFetchAndSub<uint32_t>(volatile uint32_t* _ptr, uint32_t _add)
|
|
|
+ {
|
|
|
+ return uint32_t(atomicFetchAndSub<int32_t>( (volatile int32_t*)_ptr, int32_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint64_t atomicFetchAndSub<uint64_t>(volatile uint64_t* _ptr, uint64_t _add)
|
|
|
+ {
|
|
|
+ return uint64_t(atomicFetchAndSub<int64_t>( (volatile int64_t*)_ptr, int64_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicSubAndFetch(volatile Ty* _ptr, Ty _value);
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline int32_t atomicSubAndFetch<int32_t>(volatile int32_t* _ptr, int32_t _sub)
|
|
|
{
|
|
|
#if BX_COMPILER_MSVC
|
|
|
return atomicFetchAndAdd(_ptr, -_sub) - _sub;
|
|
|
@@ -112,16 +194,40 @@ namespace bx
|
|
|
#endif // BX_COMPILER_
|
|
|
}
|
|
|
|
|
|
+ template<>
|
|
|
+ inline int64_t atomicSubAndFetch<int64_t>(volatile int64_t* _ptr, int64_t _sub)
|
|
|
+ {
|
|
|
+#if BX_COMPILER_MSVC
|
|
|
+ return atomicFetchAndAdd(_ptr, -_sub) - _sub;
|
|
|
+#else
|
|
|
+ return __sync_sub_and_fetch(_ptr, _sub);
|
|
|
+#endif // BX_COMPILER_
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint32_t atomicSubAndFetch<uint32_t>(volatile uint32_t* _ptr, uint32_t _add)
|
|
|
+ {
|
|
|
+ return uint32_t(atomicSubAndFetch<int32_t>( (volatile int32_t*)_ptr, int32_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
+ template<>
|
|
|
+ inline uint64_t atomicSubAndFetch<uint64_t>(volatile uint64_t* _ptr, uint64_t _add)
|
|
|
+ {
|
|
|
+ return uint64_t(atomicSubAndFetch<int64_t>( (volatile int64_t*)_ptr, int64_t(_add) ) );
|
|
|
+ }
|
|
|
+
|
|
|
/// Returns the resulting incremented value.
|
|
|
- inline int32_t atomicInc(volatile int32_t* _ptr)
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicInc(volatile Ty* _ptr)
|
|
|
{
|
|
|
- return atomicAddAndFetch(_ptr, 1);
|
|
|
+ return atomicAddAndFetch(_ptr, Ty(1) );
|
|
|
}
|
|
|
|
|
|
/// Returns the resulting decremented value.
|
|
|
- inline int32_t atomicDec(volatile int32_t* _ptr)
|
|
|
+ template<typename Ty>
|
|
|
+ inline Ty atomicDec(volatile Ty* _ptr)
|
|
|
{
|
|
|
- return atomicSubAndFetch(_ptr, 1);
|
|
|
+ return atomicSubAndFetch(_ptr, Ty(1) );
|
|
|
}
|
|
|
|
|
|
///
|