port.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /* Copyright 2013 Google Inc. All Rights Reserved.
  2. Distributed under MIT license.
  3. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
  4. */
  5. /* Macros for endianness, branch prediction and unaligned loads and stores. */
  6. #ifndef BROTLI_ENC_PORT_H_
  7. #define BROTLI_ENC_PORT_H_
  8. #include <assert.h>
  9. #include <string.h> /* memcpy */
  10. #include "../common/port.h"
  11. #include "../common/types.h"
  12. #if defined OS_LINUX || defined OS_CYGWIN
  13. #include <endian.h>
  14. #elif defined OS_FREEBSD
  15. #include <machine/endian.h>
  16. #elif defined OS_MACOSX
  17. #include <machine/endian.h>
  18. /* Let's try and follow the Linux convention */
  19. #define __BYTE_ORDER BYTE_ORDER
  20. #define __LITTLE_ENDIAN LITTLE_ENDIAN
  21. #endif
  22. /* define the macro IS_LITTLE_ENDIAN
  23. using the above endian definitions from endian.h if
  24. endian.h was included */
  25. #ifdef __BYTE_ORDER
  26. #if __BYTE_ORDER == __LITTLE_ENDIAN
  27. #define IS_LITTLE_ENDIAN
  28. #endif
  29. #else
  30. #if defined(__LITTLE_ENDIAN__)
  31. #define IS_LITTLE_ENDIAN
  32. #endif
  33. #endif /* __BYTE_ORDER */
  34. #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
  35. #define IS_LITTLE_ENDIAN
  36. #endif
  37. /* Enable little-endian optimization for x64 architecture on Windows. */
  38. #if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64)
  39. #define IS_LITTLE_ENDIAN
  40. #endif
  41. /* Portable handling of unaligned loads, stores, and copies.
  42. On some platforms, like ARM, the copy functions can be more efficient
  43. then a load and a store. */
  44. #if defined(ARCH_PIII) || \
  45. defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
  46. /* x86 and x86-64 can perform unaligned loads/stores directly;
  47. modern PowerPC hardware can also do unaligned integer loads and stores;
  48. but note: the FPU still sends unaligned loads and stores to a trap handler!
  49. */
  50. #define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
  51. #define BROTLI_UNALIGNED_LOAD64(_p) (*(const uint64_t *)(_p))
  52. #define BROTLI_UNALIGNED_STORE32(_p, _val) \
  53. (*(uint32_t *)(_p) = (_val))
  54. #define BROTLI_UNALIGNED_STORE64(_p, _val) \
  55. (*(uint64_t *)(_p) = (_val))
  56. #elif defined(__arm__) && \
  57. !defined(__ARM_ARCH_5__) && \
  58. !defined(__ARM_ARCH_5T__) && \
  59. !defined(__ARM_ARCH_5TE__) && \
  60. !defined(__ARM_ARCH_5TEJ__) && \
  61. !defined(__ARM_ARCH_6__) && \
  62. !defined(__ARM_ARCH_6J__) && \
  63. !defined(__ARM_ARCH_6K__) && \
  64. !defined(__ARM_ARCH_6Z__) && \
  65. !defined(__ARM_ARCH_6ZK__) && \
  66. !defined(__ARM_ARCH_6T2__)
  67. /* ARMv7 and newer support native unaligned accesses, but only of 16-bit
  68. and 32-bit values (not 64-bit); older versions either raise a fatal signal,
  69. do an unaligned read and rotate the words around a bit, or do the reads very
  70. slowly (trip through kernel mode). */
  71. #define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
  72. #define BROTLI_UNALIGNED_STORE32(_p, _val) \
  73. (*(uint32_t *)(_p) = (_val))
  74. static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
  75. uint64_t t;
  76. memcpy(&t, p, sizeof t);
  77. return t;
  78. }
  79. static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
  80. memcpy(p, &v, sizeof v);
  81. }
  82. #else
  83. /* These functions are provided for architectures that don't support */
  84. /* unaligned loads and stores. */
  85. static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {
  86. uint32_t t;
  87. memcpy(&t, p, sizeof t);
  88. return t;
  89. }
  90. static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
  91. uint64_t t;
  92. memcpy(&t, p, sizeof t);
  93. return t;
  94. }
  95. static BROTLI_INLINE void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {
  96. memcpy(p, &v, sizeof v);
  97. }
  98. static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
  99. memcpy(p, &v, sizeof v);
  100. }
  101. #endif
  102. #if !defined(__cplusplus) && !defined(c_plusplus) && __STDC_VERSION__ >= 199901L
  103. #define BROTLI_RESTRICT restrict
  104. #elif BROTLI_GCC_VERSION > 295 || defined(__llvm__)
  105. #define BROTLI_RESTRICT __restrict
  106. #else
  107. #define BROTLI_RESTRICT
  108. #endif
  109. #define _TEMPLATE(T) \
  110. static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \
  111. static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }
  112. _TEMPLATE(double) _TEMPLATE(float) _TEMPLATE(int)
  113. _TEMPLATE(size_t) _TEMPLATE(uint32_t) _TEMPLATE(uint8_t)
  114. #undef _TEMPLATE
  115. #define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))
  116. #define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))
  117. #define BROTLI_SWAP(T, A, I, J) { \
  118. T __brotli_swap_tmp = (A)[(I)]; \
  119. (A)[(I)] = (A)[(J)]; \
  120. (A)[(J)] = __brotli_swap_tmp; \
  121. }
  122. #define BROTLI_ENSURE_CAPACITY(M, T, A, C, R) { \
  123. if (C < (R)) { \
  124. size_t _new_size = (C == 0) ? (R) : C; \
  125. T* new_array; \
  126. while (_new_size < (R)) _new_size *= 2; \
  127. new_array = BROTLI_ALLOC((M), T, _new_size); \
  128. if (!BROTLI_IS_OOM(m)) \
  129. memcpy(new_array, A, C * sizeof(T)); \
  130. BROTLI_FREE((M), A); \
  131. A = new_array; \
  132. C = _new_size; \
  133. } \
  134. }
  135. #endif /* BROTLI_ENC_PORT_H_ */