tvgSwRasterAvx.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * Copyright (c) 2021 - 2024 the ThorVG project. All rights reserved.
  3. * Permission is hereby granted, free of charge, to any person obtaining a copy
  4. * of this software and associated documentation files (the "Software"), to deal
  5. * in the Software without restriction, including without limitation the rights
  6. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. * copies of the Software, and to permit persons to whom the Software is
  8. * furnished to do so, subject to the following conditions:
  9. * The above copyright notice and this permission notice shall be included in all
  10. * copies or substantial portions of the Software.
  11. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. * SOFTWARE.
  18. */
  19. #ifdef THORVG_AVX_VECTOR_SUPPORT
  20. #include <immintrin.h>
  21. #define N_32BITS_IN_128REG 4
  22. #define N_32BITS_IN_256REG 8
  23. static inline __m128i ALPHA_BLEND(__m128i c, __m128i a)
  24. {
  25. //1. set the masks for the A/G and R/B channels
  26. auto AG = _mm_set1_epi32(0xff00ff00);
  27. auto RB = _mm_set1_epi32(0x00ff00ff);
  28. //2. mask the alpha vector - originally quartet [a, a, a, a]
  29. auto aAG = _mm_and_si128(a, AG);
  30. auto aRB = _mm_and_si128(a, RB);
  31. //3. calculate the alpha blending of the 2nd and 4th channel
  32. //- mask the color vector
  33. //- multiply it by the masked alpha vector
  34. //- add the correction to compensate bit shifting used instead of dividing by 255
  35. //- shift bits - corresponding to division by 256
  36. auto even = _mm_and_si128(c, RB);
  37. even = _mm_mullo_epi16(even, aRB);
  38. even =_mm_add_epi16(even, RB);
  39. even = _mm_srli_epi16(even, 8);
  40. //4. calculate the alpha blending of the 1st and 3rd channel:
  41. //- mask the color vector
  42. //- multiply it by the corresponding masked alpha vector and store the high bits of the result
  43. //- add the correction to compensate division by 256 instead of by 255 (next step)
  44. //- remove the low 8 bits to mimic the division by 256
  45. auto odd = _mm_and_si128(c, AG);
  46. odd = _mm_mulhi_epu16(odd, aAG);
  47. odd = _mm_add_epi16(odd, RB);
  48. odd = _mm_and_si128(odd, AG);
  49. //5. the final result
  50. return _mm_or_si128(odd, even);
  51. }
  52. static void avxRasterPixel32(uint32_t *dst, uint32_t val, uint32_t offset, int32_t len)
  53. {
  54. //1. calculate how many iterations we need to cover the length
  55. uint32_t iterations = len / N_32BITS_IN_256REG;
  56. uint32_t avxFilled = iterations * N_32BITS_IN_256REG;
  57. //2. set the beginning of the array
  58. dst += offset;
  59. //3. fill the octets
  60. for (uint32_t i = 0; i < iterations; ++i, dst += N_32BITS_IN_256REG) {
  61. _mm256_storeu_si256((__m256i*)dst, _mm256_set1_epi32(val));
  62. }
  63. //4. fill leftovers (in the first step we have to set the pointer to the place where the avx job is done)
  64. int32_t leftovers = len - avxFilled;
  65. while (leftovers--) *dst++ = val;
  66. }
  67. static bool avxRasterTranslucentRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
  68. {
  69. if (surface->channelSize != sizeof(uint32_t)) {
  70. TVGERR("SW_ENGINE", "Unsupported Channel Size = %d", surface->channelSize);
  71. return false;
  72. }
  73. auto color = surface->join(r, g, b, a);
  74. auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x;
  75. auto h = static_cast<uint32_t>(region.max.y - region.min.y);
  76. auto w = static_cast<uint32_t>(region.max.x - region.min.x);
  77. uint32_t ialpha = 255 - a;
  78. auto avxColor = _mm_set1_epi32(color);
  79. auto avxIalpha = _mm_set1_epi8(ialpha);
  80. for (uint32_t y = 0; y < h; ++y) {
  81. auto dst = &buffer[y * surface->stride];
  82. //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
  83. auto notAligned = ((uintptr_t)dst & 0xf) / 4;
  84. if (notAligned) {
  85. notAligned = (N_32BITS_IN_128REG - notAligned > w ? w : N_32BITS_IN_128REG - notAligned);
  86. for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
  87. *dst = color + ALPHA_BLEND(*dst, ialpha);
  88. }
  89. }
  90. //2. fill the aligned memory - N_32BITS_IN_128REG pixels processed at once
  91. uint32_t iterations = (w - notAligned) / N_32BITS_IN_128REG;
  92. uint32_t avxFilled = iterations * N_32BITS_IN_128REG;
  93. auto avxDst = (__m128i*)dst;
  94. for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
  95. *avxDst = _mm_add_epi32(avxColor, ALPHA_BLEND(*avxDst, avxIalpha));
  96. }
  97. //3. fill the remaining pixels
  98. int32_t leftovers = w - notAligned - avxFilled;
  99. dst += avxFilled;
  100. while (leftovers--) {
  101. *dst = color + ALPHA_BLEND(*dst, ialpha);
  102. dst++;
  103. }
  104. }
  105. return true;
  106. }
  107. static bool avxRasterTranslucentRle(SwSurface* surface, const SwRleData* rle, uint8_t r, uint8_t g, uint8_t b, uint8_t a)
  108. {
  109. if (surface->channelSize != sizeof(uint32_t)) {
  110. TVGERR("SW_ENGINE", "Unsupported Channel Size = %d", surface->channelSize);
  111. return false;
  112. }
  113. auto color = surface->join(r, g, b, a);
  114. auto span = rle->spans;
  115. uint32_t src;
  116. for (uint32_t i = 0; i < rle->size; ++i) {
  117. auto dst = &surface->buf32[span->y * surface->stride + span->x];
  118. if (span->coverage < 255) src = ALPHA_BLEND(color, span->coverage);
  119. else src = color;
  120. auto ialpha = IA(src);
  121. //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
  122. auto notAligned = ((uintptr_t)dst & 0xf) / 4;
  123. if (notAligned) {
  124. notAligned = (N_32BITS_IN_128REG - notAligned > span->len ? span->len : N_32BITS_IN_128REG - notAligned);
  125. for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
  126. *dst = src + ALPHA_BLEND(*dst, ialpha);
  127. }
  128. }
  129. //2. fill the aligned memory using avx - N_32BITS_IN_128REG pixels processed at once
  130. //In order to avoid unneccessary avx variables declarations a check is made whether there are any iterations at all
  131. uint32_t iterations = (span->len - notAligned) / N_32BITS_IN_128REG;
  132. uint32_t avxFilled = 0;
  133. if (iterations > 0) {
  134. auto avxSrc = _mm_set1_epi32(src);
  135. auto avxIalpha = _mm_set1_epi8(ialpha);
  136. avxFilled = iterations * N_32BITS_IN_128REG;
  137. auto avxDst = (__m128i*)dst;
  138. for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
  139. *avxDst = _mm_add_epi32(avxSrc, ALPHA_BLEND(*avxDst, avxIalpha));
  140. }
  141. }
  142. //3. fill the remaining pixels
  143. int32_t leftovers = span->len - notAligned - avxFilled;
  144. dst += avxFilled;
  145. while (leftovers--) {
  146. *dst = src + ALPHA_BLEND(*dst, ialpha);
  147. dst++;
  148. }
  149. ++span;
  150. }
  151. return true;
  152. }
  153. #endif