Browse Source

reduce clang warning spam

karroffel 7 years ago
parent
commit
7211fd604c
2 changed files with 49 additions and 49 deletions
  1. 24 24
      core/safe_refcount.cpp
  2. 25 25
      core/safe_refcount.h

+ 24 - 24
core/safe_refcount.cpp

@@ -57,113 +57,113 @@
 			return m_val;                                                                   \
 	}
 
-_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(register uint32_t *pw){
+_ALWAYS_INLINE_ uint32_t _atomic_conditional_increment_impl(volatile uint32_t *pw){
 
 	ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONG, InterlockedCompareExchange, uint32_t)
 }
 
-_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(register uint32_t *pw) {
+_ALWAYS_INLINE_ uint32_t _atomic_decrement_impl(volatile uint32_t *pw) {
 
 	return InterlockedDecrement((LONG volatile *)pw);
 }
 
-_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(register uint32_t *pw) {
+_ALWAYS_INLINE_ uint32_t _atomic_increment_impl(volatile uint32_t *pw) {
 
 	return InterlockedIncrement((LONG volatile *)pw);
 }
 
-_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(register uint32_t *pw, register uint32_t val) {
+_ALWAYS_INLINE_ uint32_t _atomic_sub_impl(volatile uint32_t *pw, volatile uint32_t val) {
 
 	return InterlockedExchangeAdd((LONG volatile *)pw, -(int32_t)val) - val;
 }
 
-_ALWAYS_INLINE_ uint32_t _atomic_add_impl(register uint32_t *pw, register uint32_t val) {
+_ALWAYS_INLINE_ uint32_t _atomic_add_impl(volatile uint32_t *pw, volatile uint32_t val) {
 
 	return InterlockedAdd((LONG volatile *)pw, val);
 }
 
-_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(register uint32_t *pw, register uint32_t val){
+_ALWAYS_INLINE_ uint32_t _atomic_exchange_if_greater_impl(volatile uint32_t *pw, volatile uint32_t val){
 
 	ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONG, InterlockedCompareExchange, uint32_t)
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(register uint64_t *pw){
+_ALWAYS_INLINE_ uint64_t _atomic_conditional_increment_impl(volatile uint64_t *pw){
 
 	ATOMIC_CONDITIONAL_INCREMENT_BODY(pw, LONGLONG, InterlockedCompareExchange64, uint64_t)
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(register uint64_t *pw) {
+_ALWAYS_INLINE_ uint64_t _atomic_decrement_impl(volatile uint64_t *pw) {
 
 	return InterlockedDecrement64((LONGLONG volatile *)pw);
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) {
+_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(volatile uint64_t *pw) {
 
 	return InterlockedIncrement64((LONGLONG volatile *)pw);
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(register uint64_t *pw, register uint64_t val) {
+_ALWAYS_INLINE_ uint64_t _atomic_sub_impl(volatile uint64_t *pw, volatile uint64_t val) {
 
 	return InterlockedExchangeAdd64((LONGLONG volatile *)pw, -(int64_t)val) - val;
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_add_impl(register uint64_t *pw, register uint64_t val) {
+_ALWAYS_INLINE_ uint64_t _atomic_add_impl(volatile uint64_t *pw, volatile uint64_t val) {
 
 	return InterlockedAdd64((LONGLONG volatile *)pw, val);
 }
 
-_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(register uint64_t *pw, register uint64_t val){
+_ALWAYS_INLINE_ uint64_t _atomic_exchange_if_greater_impl(volatile uint64_t *pw, volatile uint64_t val){
 
 	ATOMIC_EXCHANGE_IF_GREATER_BODY(pw, val, LONGLONG, InterlockedCompareExchange64, uint64_t)
 }
 
 // The actual advertised functions; they'll call the right implementation
 
-uint32_t atomic_conditional_increment(register uint32_t *pw) {
+uint32_t atomic_conditional_increment(volatile uint32_t *pw) {
 	return _atomic_conditional_increment_impl(pw);
 }
 
-uint32_t atomic_decrement(register uint32_t *pw) {
+uint32_t atomic_decrement(volatile uint32_t *pw) {
 	return _atomic_decrement_impl(pw);
 }
 
-uint32_t atomic_increment(register uint32_t *pw) {
+uint32_t atomic_increment(volatile uint32_t *pw) {
 	return _atomic_increment_impl(pw);
 }
 
-uint32_t atomic_sub(register uint32_t *pw, register uint32_t val) {
+uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val) {
 	return _atomic_sub_impl(pw, val);
 }
 
-uint32_t atomic_add(register uint32_t *pw, register uint32_t val) {
+uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val) {
 	return _atomic_add_impl(pw, val);
 }
 
-uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val) {
+uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val) {
 	return _atomic_exchange_if_greater_impl(pw, val);
 }
 
-uint64_t atomic_conditional_increment(register uint64_t *pw) {
+uint64_t atomic_conditional_increment(volatile uint64_t *pw) {
 	return _atomic_conditional_increment_impl(pw);
 }
 
-uint64_t atomic_decrement(register uint64_t *pw) {
+uint64_t atomic_decrement(volatile uint64_t *pw) {
 	return _atomic_decrement_impl(pw);
 }
 
-uint64_t atomic_increment(register uint64_t *pw) {
+uint64_t atomic_increment(volatile uint64_t *pw) {
 	return _atomic_increment_impl(pw);
 }
 
-uint64_t atomic_sub(register uint64_t *pw, register uint64_t val) {
+uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val) {
 	return _atomic_sub_impl(pw, val);
 }
 
-uint64_t atomic_add(register uint64_t *pw, register uint64_t val) {
+uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val) {
 	return _atomic_add_impl(pw, val);
 }
 
-uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val) {
+uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val) {
 	return _atomic_exchange_if_greater_impl(pw, val);
 }
 #endif

+ 25 - 25
core/safe_refcount.h

@@ -44,7 +44,7 @@
 /* Bogus implementation unaware of multiprocessing */
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
 
 	if (*pw == 0)
 		return 0;
@@ -55,7 +55,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
 }
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
 
 	(*pw)--;
 
@@ -63,7 +63,7 @@ static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
 }
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
 
 	(*pw)++;
 
@@ -71,7 +71,7 @@ static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
 
 	(*pw) -= val;
 
@@ -79,7 +79,7 @@ static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
 
 	(*pw) += val;
 
@@ -87,7 +87,7 @@ static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
 
 	if (val > *pw)
 		*pw = val;
@@ -103,7 +103,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
 // Clang states it supports GCC atomic builtins.
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_conditional_increment(volatile T *pw) {
 
 	while (true) {
 		T tmp = static_cast<T const volatile &>(*pw);
@@ -115,31 +115,31 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
 }
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_decrement(volatile T *pw) {
 
 	return __sync_sub_and_fetch(pw, 1);
 }
 
 template <class T>
-static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+static _ALWAYS_INLINE_ T atomic_increment(volatile T *pw) {
 
 	return __sync_add_and_fetch(pw, 1);
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_sub(volatile T *pw, volatile V val) {
 
 	return __sync_sub_and_fetch(pw, val);
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_add(volatile T *pw, volatile V val) {
 
 	return __sync_add_and_fetch(pw, val);
 }
 
 template <class T, class V>
-static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V val) {
+static _ALWAYS_INLINE_ T atomic_exchange_if_greater(volatile T *pw, volatile V val) {
 
 	while (true) {
 		T tmp = static_cast<T const volatile &>(*pw);
@@ -153,19 +153,19 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(register T *pw, register V v
 #elif defined(_MSC_VER)
 // For MSVC use a separate compilation unit to prevent windows.h from polluting
 // the global namespace.
-uint32_t atomic_conditional_increment(register uint32_t *pw);
-uint32_t atomic_decrement(register uint32_t *pw);
-uint32_t atomic_increment(register uint32_t *pw);
-uint32_t atomic_sub(register uint32_t *pw, register uint32_t val);
-uint32_t atomic_add(register uint32_t *pw, register uint32_t val);
-uint32_t atomic_exchange_if_greater(register uint32_t *pw, register uint32_t val);
-
-uint64_t atomic_conditional_increment(register uint64_t *pw);
-uint64_t atomic_decrement(register uint64_t *pw);
-uint64_t atomic_increment(register uint64_t *pw);
-uint64_t atomic_sub(register uint64_t *pw, register uint64_t val);
-uint64_t atomic_add(register uint64_t *pw, register uint64_t val);
-uint64_t atomic_exchange_if_greater(register uint64_t *pw, register uint64_t val);
+uint32_t atomic_conditional_increment(volatile uint32_t *pw);
+uint32_t atomic_decrement(volatile uint32_t *pw);
+uint32_t atomic_increment(volatile uint32_t *pw);
+uint32_t atomic_sub(volatile uint32_t *pw, volatile uint32_t val);
+uint32_t atomic_add(volatile uint32_t *pw, volatile uint32_t val);
+uint32_t atomic_exchange_if_greater(volatile uint32_t *pw, volatile uint32_t val);
+
+uint64_t atomic_conditional_increment(volatile uint64_t *pw);
+uint64_t atomic_decrement(volatile uint64_t *pw);
+uint64_t atomic_increment(volatile uint64_t *pw);
+uint64_t atomic_sub(volatile uint64_t *pw, volatile uint64_t val);
+uint64_t atomic_add(volatile uint64_t *pw, volatile uint64_t val);
+uint64_t atomic_exchange_if_greater(volatile uint64_t *pw, volatile uint64_t val);
 
 #else
 //no threads supported?