x86.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #ifndef VPX_PORTS_X86_H_
  11. #define VPX_PORTS_X86_H_
  12. #include <stdlib.h>
  13. #if defined(_MSC_VER)
  14. #include <intrin.h> /* For __cpuidex, __rdtsc */
  15. #endif
  16. #include "vpx_config.h"
  17. #include "vpx/vpx_integer.h"
  18. #ifdef __cplusplus
  19. extern "C" {
  20. #endif
  21. typedef enum {
  22. VPX_CPU_UNKNOWN = -1,
  23. VPX_CPU_AMD,
  24. VPX_CPU_AMD_OLD,
  25. VPX_CPU_CENTAUR,
  26. VPX_CPU_CYRIX,
  27. VPX_CPU_INTEL,
  28. VPX_CPU_NEXGEN,
  29. VPX_CPU_NSC,
  30. VPX_CPU_RISE,
  31. VPX_CPU_SIS,
  32. VPX_CPU_TRANSMETA,
  33. VPX_CPU_TRANSMETA_OLD,
  34. VPX_CPU_UMC,
  35. VPX_CPU_VIA,
  36. VPX_CPU_LAST
  37. } vpx_cpu_t;
  38. #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
  39. #if ARCH_X86_64
  40. #define cpuid(func, func2, ax, bx, cx, dx) \
  41. __asm__ __volatile__("cpuid \n\t" \
  42. : "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \
  43. : "a"(func), "c"(func2));
  44. #else
  45. #define cpuid(func, func2, ax, bx, cx, dx) \
  46. __asm__ __volatile__( \
  47. "mov %%ebx, %%edi \n\t" \
  48. "cpuid \n\t" \
  49. "xchg %%edi, %%ebx \n\t" \
  50. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  51. : "a"(func), "c"(func2));
  52. #endif
  53. #elif defined(__SUNPRO_C) || \
  54. defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
  55. #if ARCH_X86_64
  56. #define cpuid(func, func2, ax, bx, cx, dx) \
  57. asm volatile( \
  58. "xchg %rsi, %rbx \n\t" \
  59. "cpuid \n\t" \
  60. "movl %ebx, %edi \n\t" \
  61. "xchg %rsi, %rbx \n\t" \
  62. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  63. : "a"(func), "c"(func2));
  64. #else
  65. #define cpuid(func, func2, ax, bx, cx, dx) \
  66. asm volatile( \
  67. "pushl %ebx \n\t" \
  68. "cpuid \n\t" \
  69. "movl %ebx, %edi \n\t" \
  70. "popl %ebx \n\t" \
  71. : "=a"(ax), "=D"(bx), "=c"(cx), "=d"(dx) \
  72. : "a"(func), "c"(func2));
  73. #endif
  74. #else /* end __SUNPRO__ */
  75. #if ARCH_X86_64
  76. #if defined(_MSC_VER) && _MSC_VER > 1500
  77. #define cpuid(func, func2, a, b, c, d) \
  78. do { \
  79. int regs[4]; \
  80. __cpuidex(regs, func, func2); \
  81. a = regs[0]; \
  82. b = regs[1]; \
  83. c = regs[2]; \
  84. d = regs[3]; \
  85. } while (0)
  86. #else
  87. #define cpuid(func, func2, a, b, c, d) \
  88. do { \
  89. int regs[4]; \
  90. __cpuid(regs, func); \
  91. a = regs[0]; \
  92. b = regs[1]; \
  93. c = regs[2]; \
  94. d = regs[3]; \
  95. } while (0)
  96. #endif
  97. #else
  98. #define cpuid(func, func2, a, b, c, d) \
  99. __asm mov eax, func __asm mov ecx, func2 __asm cpuid __asm mov a, \
  100. eax __asm mov b, ebx __asm mov c, ecx __asm mov d, edx
  101. #endif
  102. #endif /* end others */
  103. // NaCl has no support for xgetbv or the raw opcode.
  104. #if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
  105. static INLINE uint64_t xgetbv(void) {
  106. const uint32_t ecx = 0;
  107. uint32_t eax, edx;
  108. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  109. __asm__ volatile(".byte 0x0f, 0x01, 0xd0\n"
  110. : "=a"(eax), "=d"(edx)
  111. : "c"(ecx));
  112. return ((uint64_t)edx << 32) | eax;
  113. }
  114. #elif (defined(_M_X64) || defined(_M_IX86)) && defined(_MSC_FULL_VER) && \
  115. _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
  116. #include <immintrin.h>
  117. #define xgetbv() _xgetbv(0)
  118. #elif defined(_MSC_VER) && defined(_M_IX86)
  119. static INLINE uint64_t xgetbv(void) {
  120. uint32_t eax_, edx_;
  121. __asm {
  122. xor ecx, ecx // ecx = 0
  123. // Use the raw opcode for xgetbv for compatibility with older toolchains.
  124. __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
  125. mov eax_, eax
  126. mov edx_, edx
  127. }
  128. return ((uint64_t)edx_ << 32) | eax_;
  129. }
  130. #else
  131. #define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
  132. #endif
  133. #define getenv(x) NULL
  134. #define HAS_MMX 0x01
  135. #define HAS_SSE 0x02
  136. #define HAS_SSE2 0x04
  137. #define HAS_SSE3 0x08
  138. #define HAS_SSSE3 0x10
  139. #define HAS_SSE4_1 0x20
  140. #define HAS_AVX 0x40
  141. #define HAS_AVX2 0x80
  142. #ifndef BIT
  143. #define BIT(n) (1 << n)
  144. #endif
  145. static INLINE int x86_simd_caps(void) {
  146. unsigned int flags = 0;
  147. unsigned int mask = ~0;
  148. unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
  149. char *env;
  150. (void)reg_ebx;
  151. /* See if the CPU capabilities are being overridden by the environment */
  152. env = getenv("VPX_SIMD_CAPS");
  153. if (env && *env) return (int)strtol(env, NULL, 0);
  154. env = getenv("VPX_SIMD_CAPS_MASK");
  155. if (env && *env) mask = (unsigned int)strtoul(env, NULL, 0);
  156. /* Ensure that the CPUID instruction supports extended features */
  157. cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
  158. if (max_cpuid_val < 1) return 0;
  159. /* Get the standard feature flags */
  160. cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  161. if (reg_edx & BIT(23)) flags |= HAS_MMX;
  162. if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
  163. if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
  164. if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
  165. if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
  166. if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
  167. // bits 27 (OSXSAVE) & 28 (256-bit AVX)
  168. if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
  169. if ((xgetbv() & 0x6) == 0x6) {
  170. flags |= HAS_AVX;
  171. if (max_cpuid_val >= 7) {
  172. /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
  173. cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
  174. if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
  175. }
  176. }
  177. }
  178. return flags & mask;
  179. }
  180. // Note:
  181. // 32-bit CPU cycle counter is light-weighted for most function performance
  182. // measurement. For large function (CPU time > a couple of seconds), 64-bit
  183. // counter should be used.
  184. // 32-bit CPU cycle counter
  185. static INLINE unsigned int x86_readtsc(void) {
  186. #if defined(__GNUC__) && __GNUC__
  187. unsigned int tsc;
  188. __asm__ __volatile__("rdtsc\n\t" : "=a"(tsc) :);
  189. return tsc;
  190. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  191. unsigned int tsc;
  192. asm volatile("rdtsc\n\t" : "=a"(tsc) :);
  193. return tsc;
  194. #else
  195. #if ARCH_X86_64
  196. return (unsigned int)__rdtsc();
  197. #else
  198. __asm rdtsc;
  199. #endif
  200. #endif
  201. }
  202. // 64-bit CPU cycle counter
  203. static INLINE uint64_t x86_readtsc64(void) {
  204. #if defined(__GNUC__) && __GNUC__
  205. uint32_t hi, lo;
  206. __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
  207. return ((uint64_t)hi << 32) | lo;
  208. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  209. uint_t hi, lo;
  210. asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
  211. return ((uint64_t)hi << 32) | lo;
  212. #else
  213. #if ARCH_X86_64
  214. return (uint64_t)__rdtsc();
  215. #else
  216. __asm rdtsc;
  217. #endif
  218. #endif
  219. }
  220. #if defined(__GNUC__) && __GNUC__
  221. #define x86_pause_hint() __asm__ __volatile__("pause \n\t")
  222. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  223. #define x86_pause_hint() asm volatile("pause \n\t")
  224. #else
  225. #if ARCH_X86_64
  226. #define x86_pause_hint() _mm_pause();
  227. #else
  228. #define x86_pause_hint() __asm pause
  229. #endif
  230. #endif
  231. #if defined(__GNUC__) && __GNUC__
  232. static void x87_set_control_word(unsigned short mode) {
  233. __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
  234. }
  235. static unsigned short x87_get_control_word(void) {
  236. unsigned short mode;
  237. __asm__ __volatile__("fstcw %0\n\t" : "=m"(*&mode) :);
  238. return mode;
  239. }
  240. #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
  241. static void x87_set_control_word(unsigned short mode) {
  242. asm volatile("fldcw %0" : : "m"(*&mode));
  243. }
  244. static unsigned short x87_get_control_word(void) {
  245. unsigned short mode;
  246. asm volatile("fstcw %0\n\t" : "=m"(*&mode) :);
  247. return mode;
  248. }
  249. #elif ARCH_X86_64
  250. /* No fldcw intrinsics on Windows x64, punt to external asm */
  251. extern void vpx_winx64_fldcw(unsigned short mode);
  252. extern unsigned short vpx_winx64_fstcw(void);
  253. #define x87_set_control_word vpx_winx64_fldcw
  254. #define x87_get_control_word vpx_winx64_fstcw
  255. #else
  256. static void x87_set_control_word(unsigned short mode) {
  257. __asm { fldcw mode }
  258. }
  259. static unsigned short x87_get_control_word(void) {
  260. unsigned short mode;
  261. __asm { fstcw mode }
  262. return mode;
  263. }
  264. #endif
  265. static INLINE unsigned int x87_set_double_precision(void) {
  266. unsigned int mode = x87_get_control_word();
  267. x87_set_control_word((mode & ~0x300) | 0x200);
  268. return mode;
  269. }
  270. extern void vpx_reset_mmx_state(void);
  271. #ifdef __cplusplus
  272. } // extern "C"
  273. #endif
  274. #endif // VPX_PORTS_X86_H_