mixer_sse3.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /**
  2. * OpenAL cross platform audio library, SSE3 mixer functions
  3. *
  4. * Copyright (C) 2014 by Timothy Arceri <[email protected]>.
  5. * Copyright (C) 2015 by Chris Robinson <[email protected]>.
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Library General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Library General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Library General Public
  18. * License along with this library; if not, write to the
  19. * Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. * Or go to http://www.gnu.org/copyleft/lgpl.html
  22. */
  23. #include "config.h"
  24. #include <xmmintrin.h>
  25. #include <emmintrin.h>
  26. #include <pmmintrin.h>
  27. #include "alu.h"
  28. #include "mixer_defs.h"
  29. const ALfloat *Resample_fir4_32_SSE3(const BsincState* UNUSED(state), const ALfloat *src, ALuint frac, ALuint increment,
  30. ALfloat *restrict dst, ALuint numsamples)
  31. {
  32. const __m128i increment4 = _mm_set1_epi32(increment*4);
  33. const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
  34. alignas(16) union { ALuint i[4]; float f[4]; } pos_;
  35. alignas(16) union { ALuint i[4]; float f[4]; } frac_;
  36. __m128i frac4, pos4;
  37. ALuint pos;
  38. ALuint i;
  39. InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
  40. frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
  41. pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
  42. --src;
  43. for(i = 0;numsamples-i > 3;i += 4)
  44. {
  45. const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]]);
  46. const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]]);
  47. const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]]);
  48. const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]]);
  49. __m128 k0 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[0]]);
  50. __m128 k1 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[1]]);
  51. __m128 k2 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[2]]);
  52. __m128 k3 = _mm_load_ps(ResampleCoeffs.FIR4[frac_.i[3]]);
  53. __m128 out;
  54. k0 = _mm_mul_ps(k0, val0);
  55. k1 = _mm_mul_ps(k1, val1);
  56. k2 = _mm_mul_ps(k2, val2);
  57. k3 = _mm_mul_ps(k3, val3);
  58. k0 = _mm_hadd_ps(k0, k1);
  59. k2 = _mm_hadd_ps(k2, k3);
  60. out = _mm_hadd_ps(k0, k2);
  61. _mm_store_ps(&dst[i], out);
  62. frac4 = _mm_add_epi32(frac4, increment4);
  63. pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
  64. frac4 = _mm_and_si128(frac4, fracMask4);
  65. _mm_store_ps(pos_.f, _mm_castsi128_ps(pos4));
  66. _mm_store_ps(frac_.f, _mm_castsi128_ps(frac4));
  67. }
  68. /* NOTE: These four elements represent the position *after* the last four
  69. * samples, so the lowest element is the next position to resample.
  70. */
  71. pos = pos_.i[0];
  72. frac = frac_.i[0];
  73. for(;i < numsamples;i++)
  74. {
  75. dst[i] = resample_fir4(src[pos], src[pos+1], src[pos+2], src[pos+3], frac);
  76. frac += increment;
  77. pos += frac>>FRACTIONBITS;
  78. frac &= FRACTIONMASK;
  79. }
  80. return dst;
  81. }
  82. const ALfloat *Resample_fir8_32_SSE3(const BsincState* UNUSED(state), const ALfloat *src, ALuint frac, ALuint increment,
  83. ALfloat *restrict dst, ALuint numsamples)
  84. {
  85. const __m128i increment4 = _mm_set1_epi32(increment*4);
  86. const __m128i fracMask4 = _mm_set1_epi32(FRACTIONMASK);
  87. alignas(16) union { ALuint i[4]; float f[4]; } pos_;
  88. alignas(16) union { ALuint i[4]; float f[4]; } frac_;
  89. __m128i frac4, pos4;
  90. ALuint pos;
  91. ALuint i, j;
  92. InitiatePositionArrays(frac, increment, frac_.i, pos_.i, 4);
  93. frac4 = _mm_castps_si128(_mm_load_ps(frac_.f));
  94. pos4 = _mm_castps_si128(_mm_load_ps(pos_.f));
  95. src -= 3;
  96. for(i = 0;numsamples-i > 3;i += 4)
  97. {
  98. __m128 out[2];
  99. for(j = 0;j < 8;j+=4)
  100. {
  101. const __m128 val0 = _mm_loadu_ps(&src[pos_.i[0]+j]);
  102. const __m128 val1 = _mm_loadu_ps(&src[pos_.i[1]+j]);
  103. const __m128 val2 = _mm_loadu_ps(&src[pos_.i[2]+j]);
  104. const __m128 val3 = _mm_loadu_ps(&src[pos_.i[3]+j]);
  105. __m128 k0 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[0]][j]);
  106. __m128 k1 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[1]][j]);
  107. __m128 k2 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[2]][j]);
  108. __m128 k3 = _mm_load_ps(&ResampleCoeffs.FIR8[frac_.i[3]][j]);
  109. k0 = _mm_mul_ps(k0, val0);
  110. k1 = _mm_mul_ps(k1, val1);
  111. k2 = _mm_mul_ps(k2, val2);
  112. k3 = _mm_mul_ps(k3, val3);
  113. k0 = _mm_hadd_ps(k0, k1);
  114. k2 = _mm_hadd_ps(k2, k3);
  115. out[j>>2] = _mm_hadd_ps(k0, k2);
  116. }
  117. out[0] = _mm_add_ps(out[0], out[1]);
  118. _mm_store_ps(&dst[i], out[0]);
  119. frac4 = _mm_add_epi32(frac4, increment4);
  120. pos4 = _mm_add_epi32(pos4, _mm_srli_epi32(frac4, FRACTIONBITS));
  121. frac4 = _mm_and_si128(frac4, fracMask4);
  122. _mm_store_ps(pos_.f, _mm_castsi128_ps(pos4));
  123. _mm_store_ps(frac_.f, _mm_castsi128_ps(frac4));
  124. }
  125. pos = pos_.i[0];
  126. frac = frac_.i[0];
  127. for(;i < numsamples;i++)
  128. {
  129. dst[i] = resample_fir8(src[pos ], src[pos+1], src[pos+2], src[pos+3],
  130. src[pos+4], src[pos+5], src[pos+6], src[pos+7], frac);
  131. frac += increment;
  132. pos += frac>>FRACTIONBITS;
  133. frac &= FRACTIONMASK;
  134. }
  135. return dst;
  136. }