SkAtomics.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * Copyright 2015 Google Inc.
  3. *
  4. * Use of this source code is governed by a BSD-style license that can be
  5. * found in the LICENSE file.
  6. */
  7. #ifndef SkAtomics_DEFINED
  8. #define SkAtomics_DEFINED
  9. // This file is not part of the public Skia API.
  10. #include "../private/SkNoncopyable.h"
  11. #include "SkTypes.h"
  12. #include <atomic>
  13. // ~~~~~~~~ APIs ~~~~~~~~~
  14. enum sk_memory_order {
  15. sk_memory_order_relaxed,
  16. sk_memory_order_consume,
  17. sk_memory_order_acquire,
  18. sk_memory_order_release,
  19. sk_memory_order_acq_rel,
  20. sk_memory_order_seq_cst,
  21. };
  22. template <typename T>
  23. T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
  24. template <typename T>
  25. void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
  26. template <typename T>
  27. T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
  28. template <typename T>
  29. bool sk_atomic_compare_exchange(T*, T* expected, T desired,
  30. sk_memory_order success = sk_memory_order_seq_cst,
  31. sk_memory_order failure = sk_memory_order_seq_cst);
  32. // A little wrapper class for small T (think, builtins: int, float, void*) to
  33. // ensure they're always used atomically. This is our stand-in for std::atomic<T>.
  34. // !!! Please _really_ know what you're doing if you change default_memory_order. !!!
  35. template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
  36. class SkAtomic : SkNoncopyable {
  37. public:
  38. SkAtomic() {}
  39. explicit SkAtomic(const T& val) : fVal(val) {}
  40. // It is essential we return by value rather than by const&. fVal may change at any time.
  41. T load(sk_memory_order mo = default_memory_order) const {
  42. return sk_atomic_load(&fVal, mo);
  43. }
  44. void store(const T& val, sk_memory_order mo = default_memory_order) {
  45. sk_atomic_store(&fVal, val, mo);
  46. }
  47. // Alias for .load(default_memory_order).
  48. operator T() const {
  49. return this->load();
  50. }
  51. // Alias for .store(v, default_memory_order).
  52. T operator=(const T& v) {
  53. this->store(v);
  54. return v;
  55. }
  56. private:
  57. T fVal;
  58. };
  59. // ~~~~~~~~ Implementations ~~~~~~~~~
  60. template <typename T>
  61. T sk_atomic_load(const T* ptr, sk_memory_order mo) {
  62. SkASSERT(mo == sk_memory_order_relaxed ||
  63. mo == sk_memory_order_seq_cst ||
  64. mo == sk_memory_order_acquire ||
  65. mo == sk_memory_order_consume);
  66. const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
  67. return std::atomic_load_explicit(ap, (std::memory_order)mo);
  68. }
  69. template <typename T>
  70. void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
  71. SkASSERT(mo == sk_memory_order_relaxed ||
  72. mo == sk_memory_order_seq_cst ||
  73. mo == sk_memory_order_release);
  74. std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
  75. return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
  76. }
  77. template <typename T>
  78. T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
  79. // All values of mo are valid.
  80. std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
  81. return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
  82. }
  83. template <typename T>
  84. bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
  85. sk_memory_order success,
  86. sk_memory_order failure) {
  87. // All values of success are valid.
  88. SkASSERT(failure == sk_memory_order_relaxed ||
  89. failure == sk_memory_order_seq_cst ||
  90. failure == sk_memory_order_acquire ||
  91. failure == sk_memory_order_consume);
  92. SkASSERT(failure <= success);
  93. std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
  94. return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
  95. (std::memory_order)success,
  96. (std::memory_order)failure);
  97. }
  98. // ~~~~~~~~ Legacy APIs ~~~~~~~~~
  99. // From here down we have shims for our old atomics API, to be weaned off of.
  100. // We use the default sequentially-consistent memory order to make things simple
  101. // and to match the practical reality of our old _sync and _win implementations.
  102. inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
  103. inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
  104. #endif//SkAtomics_DEFINED