alnumeric.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. #ifndef AL_NUMERIC_H
  2. #define AL_NUMERIC_H
  3. #include <cstddef>
  4. #include <cstdint>
  5. #ifdef HAVE_INTRIN_H
  6. #include <intrin.h>
  7. #endif
  8. #ifdef HAVE_SSE_INTRINSICS
  9. #include <xmmintrin.h>
  10. #endif
  11. #include "opthelpers.h"
  12. inline constexpr int64_t operator "" _i64(unsigned long long int n) noexcept { return static_cast<int64_t>(n); }
  13. inline constexpr uint64_t operator "" _u64(unsigned long long int n) noexcept { return static_cast<uint64_t>(n); }
  14. constexpr inline float minf(float a, float b) noexcept
  15. { return ((a > b) ? b : a); }
  16. constexpr inline float maxf(float a, float b) noexcept
  17. { return ((a > b) ? a : b); }
  18. constexpr inline float clampf(float val, float min, float max) noexcept
  19. { return minf(max, maxf(min, val)); }
  20. constexpr inline double mind(double a, double b) noexcept
  21. { return ((a > b) ? b : a); }
  22. constexpr inline double maxd(double a, double b) noexcept
  23. { return ((a > b) ? a : b); }
  24. constexpr inline double clampd(double val, double min, double max) noexcept
  25. { return mind(max, maxd(min, val)); }
  26. constexpr inline unsigned int minu(unsigned int a, unsigned int b) noexcept
  27. { return ((a > b) ? b : a); }
  28. constexpr inline unsigned int maxu(unsigned int a, unsigned int b) noexcept
  29. { return ((a > b) ? a : b); }
  30. constexpr inline unsigned int clampu(unsigned int val, unsigned int min, unsigned int max) noexcept
  31. { return minu(max, maxu(min, val)); }
  32. constexpr inline int mini(int a, int b) noexcept
  33. { return ((a > b) ? b : a); }
  34. constexpr inline int maxi(int a, int b) noexcept
  35. { return ((a > b) ? a : b); }
  36. constexpr inline int clampi(int val, int min, int max) noexcept
  37. { return mini(max, maxi(min, val)); }
  38. constexpr inline int64_t mini64(int64_t a, int64_t b) noexcept
  39. { return ((a > b) ? b : a); }
  40. constexpr inline int64_t maxi64(int64_t a, int64_t b) noexcept
  41. { return ((a > b) ? a : b); }
  42. constexpr inline int64_t clampi64(int64_t val, int64_t min, int64_t max) noexcept
  43. { return mini64(max, maxi64(min, val)); }
  44. constexpr inline uint64_t minu64(uint64_t a, uint64_t b) noexcept
  45. { return ((a > b) ? b : a); }
  46. constexpr inline uint64_t maxu64(uint64_t a, uint64_t b) noexcept
  47. { return ((a > b) ? a : b); }
  48. constexpr inline uint64_t clampu64(uint64_t val, uint64_t min, uint64_t max) noexcept
  49. { return minu64(max, maxu64(min, val)); }
  50. constexpr inline size_t minz(size_t a, size_t b) noexcept
  51. { return ((a > b) ? b : a); }
  52. constexpr inline size_t maxz(size_t a, size_t b) noexcept
  53. { return ((a > b) ? a : b); }
  54. constexpr inline size_t clampz(size_t val, size_t min, size_t max) noexcept
  55. { return minz(max, maxz(min, val)); }
  56. /** Find the next power-of-2 for non-power-of-2 numbers. */
  57. inline uint32_t NextPowerOf2(uint32_t value) noexcept
  58. {
  59. if(value > 0)
  60. {
  61. value--;
  62. value |= value>>1;
  63. value |= value>>2;
  64. value |= value>>4;
  65. value |= value>>8;
  66. value |= value>>16;
  67. }
  68. return value+1;
  69. }
  70. /** Round up a value to the next multiple. */
  71. inline size_t RoundUp(size_t value, size_t r) noexcept
  72. {
  73. value += r-1;
  74. return value - (value%r);
  75. }
  76. /* Define CTZ macros (count trailing zeros), and POPCNT macros (population
  77. * count/count 1 bits), for 32- and 64-bit integers. The CTZ macros' results
  78. * are *UNDEFINED* if the value is 0.
  79. */
  80. #ifdef __GNUC__
  81. #define POPCNT32 __builtin_popcount
  82. #define CTZ32 __builtin_ctz
  83. #if SIZEOF_LONG == 8
  84. #define POPCNT64 __builtin_popcountl
  85. #define CTZ64 __builtin_ctzl
  86. #else
  87. #define POPCNT64 __builtin_popcountll
  88. #define CTZ64 __builtin_ctzll
  89. #endif
  90. #else
  91. /* There be black magics here. The popcnt method is derived from
  92. * https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
  93. * while the ctz-utilizing-popcnt algorithm is shown here
  94. * http://www.hackersdelight.org/hdcodetxt/ntz.c.txt
  95. * as the ntz2 variant. These likely aren't the most efficient methods, but
  96. * they're good enough if the GCC built-ins aren't available.
  97. */
  98. inline int fallback_popcnt32(uint32_t v)
  99. {
  100. v = v - ((v >> 1) & 0x55555555u);
  101. v = (v & 0x33333333u) + ((v >> 2) & 0x33333333u);
  102. v = (v + (v >> 4)) & 0x0f0f0f0fu;
  103. return (int)((v * 0x01010101u) >> 24);
  104. }
  105. #define POPCNT32 fallback_popcnt32
  106. inline int fallback_popcnt64(uint64_t v)
  107. {
  108. v = v - ((v >> 1) & 0x5555555555555555_u64);
  109. v = (v & 0x3333333333333333_u64) + ((v >> 2) & 0x3333333333333333_u64);
  110. v = (v + (v >> 4)) & 0x0f0f0f0f0f0f0f0f_u64;
  111. return (int)((v * 0x0101010101010101_u64) >> 56);
  112. }
  113. #define POPCNT64 fallback_popcnt64
  114. #if defined(HAVE_BITSCANFORWARD64_INTRINSIC)
  115. inline int msvc64_ctz32(uint32_t v)
  116. {
  117. unsigned long idx = 32;
  118. _BitScanForward(&idx, v);
  119. return (int)idx;
  120. }
  121. #define CTZ32 msvc64_ctz32
  122. inline int msvc64_ctz64(uint64_t v)
  123. {
  124. unsigned long idx = 64;
  125. _BitScanForward64(&idx, v);
  126. return (int)idx;
  127. }
  128. #define CTZ64 msvc64_ctz64
  129. #elif defined(HAVE_BITSCANFORWARD_INTRINSIC)
  130. inline int msvc_ctz32(uint32_t v)
  131. {
  132. unsigned long idx = 32;
  133. _BitScanForward(&idx, v);
  134. return (int)idx;
  135. }
  136. #define CTZ32 msvc_ctz32
  137. inline int msvc_ctz64(uint64_t v)
  138. {
  139. unsigned long idx = 64;
  140. if(!_BitScanForward(&idx, (uint32_t)(v&0xffffffff)))
  141. {
  142. if(_BitScanForward(&idx, (uint32_t)(v>>32)))
  143. idx += 32;
  144. }
  145. return (int)idx;
  146. }
  147. #define CTZ64 msvc_ctz64
  148. #else
  149. inline int fallback_ctz32(uint32_t value)
  150. { return POPCNT32(~value & (value - 1)); }
  151. #define CTZ32 fallback_ctz32
  152. inline int fallback_ctz64(uint64_t value)
  153. { return POPCNT64(~value & (value - 1)); }
  154. #define CTZ64 fallback_ctz64
  155. #endif
  156. #endif
  157. /**
  158. * Fast float-to-int conversion. No particular rounding mode is assumed; the
  159. * IEEE-754 default is round-to-nearest with ties-to-even, though an app could
  160. * change it on its own threads. On some systems, a truncating conversion may
  161. * always be the fastest method.
  162. */
  163. inline int fastf2i(float f) noexcept
  164. {
  165. #if defined(HAVE_SSE_INTRINSICS)
  166. return _mm_cvt_ss2si(_mm_set_ss(f));
  167. #elif defined(_MSC_VER) && defined(_M_IX86_FP)
  168. int i;
  169. __asm fld f
  170. __asm fistp i
  171. return i;
  172. #elif (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
  173. int i;
  174. #ifdef __SSE_MATH__
  175. __asm__("cvtss2si %1, %0" : "=r"(i) : "x"(f));
  176. #else
  177. __asm__ __volatile__("fistpl %0" : "=m"(i) : "t"(f) : "st");
  178. #endif
  179. return i;
  180. #else
  181. return static_cast<int>(f);
  182. #endif
  183. }
  184. inline unsigned int fastf2u(float f) noexcept
  185. { return static_cast<unsigned int>(fastf2i(f)); }
  186. /** Converts float-to-int using standard behavior (truncation). */
  187. inline int float2int(float f) noexcept
  188. {
  189. #if defined(HAVE_SSE_INTRINSICS)
  190. return _mm_cvtt_ss2si(_mm_set_ss(f));
  191. #elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
  192. !defined(__SSE_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP == 0)
  193. int sign, shift, mant;
  194. union {
  195. float f;
  196. int i;
  197. } conv;
  198. conv.f = f;
  199. sign = (conv.i>>31) | 1;
  200. shift = ((conv.i>>23)&0xff) - (127+23);
  201. /* Over/underflow */
  202. if UNLIKELY(shift >= 31 || shift < -23)
  203. return 0;
  204. mant = (conv.i&0x7fffff) | 0x800000;
  205. if LIKELY(shift < 0)
  206. return (mant >> -shift) * sign;
  207. return (mant << shift) * sign;
  208. #else
  209. return static_cast<int>(f);
  210. #endif
  211. }
  212. inline unsigned int float2uint(float f) noexcept
  213. { return static_cast<unsigned int>(float2int(f)); }
  214. /** Converts double-to-int using standard behavior (truncation). */
  215. inline int double2int(double d) noexcept
  216. {
  217. #if defined(HAVE_SSE_INTRINSICS)
  218. return _mm_cvttsd_si32(_mm_set_sd(d));
  219. #elif ((defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
  220. !defined(__SSE2_MATH__)) || (defined(_MSC_VER) && defined(_M_IX86_FP) && _M_IX86_FP < 2)
  221. int sign, shift;
  222. int64_t mant;
  223. union {
  224. double d;
  225. int64_t i64;
  226. } conv;
  227. conv.d = d;
  228. sign = (conv.i64 >> 63) | 1;
  229. shift = ((conv.i64 >> 52) & 0x7ff) - (1023 + 52);
  230. /* Over/underflow */
  231. if UNLIKELY(shift >= 63 || shift < -52)
  232. return 0;
  233. mant = (conv.i64 & 0xfffffffffffff_i64) | 0x10000000000000_i64;
  234. if LIKELY(shift < 0)
  235. return (int)(mant >> -shift) * sign;
  236. return (int)(mant << shift) * sign;
  237. #else
  238. return static_cast<int>(d);
  239. #endif
  240. }
  241. /**
  242. * Rounds a float to the nearest integral value, according to the current
  243. * rounding mode. This is essentially an inlined version of rintf, although
  244. * makes fewer promises (e.g. -0 or -0.25 rounded to 0 may result in +0).
  245. */
  246. inline float fast_roundf(float f) noexcept
  247. {
  248. #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__)) && \
  249. !defined(__SSE_MATH__)
  250. float out;
  251. __asm__ __volatile__("frndint" : "=t"(out) : "0"(f));
  252. return out;
  253. #else
  254. /* Integral limit, where sub-integral precision is not available for
  255. * floats.
  256. */
  257. static const float ilim[2]{
  258. 8388608.0f /* 0x1.0p+23 */,
  259. -8388608.0f /* -0x1.0p+23 */
  260. };
  261. unsigned int sign, expo;
  262. union {
  263. float f;
  264. unsigned int i;
  265. } conv;
  266. conv.f = f;
  267. sign = (conv.i>>31)&0x01;
  268. expo = (conv.i>>23)&0xff;
  269. if UNLIKELY(expo >= 150/*+23*/)
  270. {
  271. /* An exponent (base-2) of 23 or higher is incapable of sub-integral
  272. * precision, so it's already an integral value. We don't need to worry
  273. * about infinity or NaN here.
  274. */
  275. return f;
  276. }
  277. /* Adding the integral limit to the value (with a matching sign) forces a
  278. * result that has no sub-integral precision, and is consequently forced to
  279. * round to an integral value. Removing the integral limit then restores
  280. * the initial value rounded to the integral. The compiler should not
  281. * optimize this out because of non-associative rules on floating-point
  282. * math (as long as you don't use -fassociative-math,
  283. * -funsafe-math-optimizations, -ffast-math, or -Ofast, in which case this
  284. * may break).
  285. */
  286. f += ilim[sign];
  287. return f - ilim[sign];
  288. #endif
  289. }
  290. #endif /* AL_NUMERIC_H */