pffft.cpp 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308
  1. //$ nobt
  2. /* Copyright (c) 2013 Julien Pommier ( [email protected] )
  3. * Copyright (c) 2023 Christopher Robinson
  4. *
  5. * Based on original fortran 77 code from FFTPACKv4 from NETLIB
  6. * (http://www.netlib.org/fftpack), authored by Dr Paul Swarztrauber
  7. * of NCAR, in 1985.
  8. *
  9. * As confirmed by the NCAR fftpack software curators, the following
  10. * FFTPACKv5 license applies to FFTPACKv4 sources. My changes are
  11. * released under the same terms.
  12. *
  13. * FFTPACK license:
  14. *
  15. * http://www.cisl.ucar.edu/css/software/fftpack5/ftpk.html
  16. *
  17. * Copyright (c) 2004 the University Corporation for Atmospheric
  18. * Research ("UCAR"). All rights reserved. Developed by NCAR's
  19. * Computational and Information Systems Laboratory, UCAR,
  20. * www.cisl.ucar.edu.
  21. *
  22. * Redistribution and use of the Software in source and binary forms,
  23. * with or without modification, is permitted provided that the
  24. * following conditions are met:
  25. *
  26. * - Neither the names of NCAR's Computational and Information Systems
  27. * Laboratory, the University Corporation for Atmospheric Research,
  28. * nor the names of its sponsors or contributors may be used to
  29. * endorse or promote products derived from this Software without
  30. * specific prior written permission.
  31. *
  32. * - Redistributions of source code must retain the above copyright
  33. * notices, this list of conditions, and the disclaimer below.
  34. *
  35. * - Redistributions in binary form must reproduce the above copyright
  36. * notice, this list of conditions, and the disclaimer below in the
  37. * documentation and/or other materials provided with the
  38. * distribution.
  39. *
  40. * THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  41. * EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO THE WARRANTIES OF
  42. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  43. * NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
  44. * HOLDERS BE LIABLE FOR ANY CLAIM, INDIRECT, INCIDENTAL, SPECIAL,
  45. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  46. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  47. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
  48. * SOFTWARE.
  49. *
  50. *
  51. * PFFFT : a Pretty Fast FFT.
  52. *
  53. * This file is largerly based on the original FFTPACK implementation, modified
  54. * in order to take advantage of SIMD instructions of modern CPUs.
  55. */
  56. #include "pffft.h"
  57. #include <algorithm>
  58. #include <array>
  59. #include <cassert>
  60. #include <cmath>
  61. #include <cstdint>
  62. #include <cstdio>
  63. #include <cstring>
  64. #include <new>
  65. #include <utility>
  66. #include <vector>
  67. #include "albit.h"
  68. #include "almalloc.h"
  69. #include "alnumbers.h"
  70. #include "alnumeric.h"
  71. #include "alspan.h"
  72. #include "opthelpers.h"
  73. using uint = unsigned int;
  74. namespace {
  75. #if defined(__GNUC__) || defined(_MSC_VER)
  76. #define RESTRICT __restrict
  77. #else
  78. #define RESTRICT
  79. #endif
  80. /* Vector support macros: the rest of the code is independent of
  81. * SSE/Altivec/NEON -- adding support for other platforms with 4-element
  82. * vectors should be limited to these macros
  83. */
  84. /* Define PFFFT_SIMD_DISABLE if you want to use scalar code instead of SIMD code */
  85. //#define PFFFT_SIMD_DISABLE
  86. #ifndef PFFFT_SIMD_DISABLE
  87. /*
  88. * Altivec support macros
  89. */
  90. #if defined(__ppc__) || defined(__ppc64__) || defined(__powerpc__) || defined(__powerpc64__)
  91. #include <altivec.h>
  92. using v4sf = vector float;
  93. constexpr uint SimdSize{4};
  94. force_inline v4sf vzero() noexcept { return (vector float)vec_splat_u8(0); }
  95. force_inline v4sf vmul(v4sf a, v4sf b) noexcept { return vec_madd(a, b, vzero()); }
  96. force_inline v4sf vadd(v4sf a, v4sf b) noexcept { return vec_add(a, b); }
  97. force_inline v4sf vmadd(v4sf a, v4sf b, v4sf c) noexcept { return vec_madd(a, b, c); }
  98. force_inline v4sf vsub(v4sf a, v4sf b) noexcept { return vec_sub(a, b); }
  99. force_inline v4sf ld_ps1(float a) noexcept { return vec_splats(a); }
  100. force_inline v4sf vset4(float a, float b, float c, float d) noexcept
  101. {
  102. /* There a more efficient way to do this? */
  103. alignas(16) std::array<float,4> vals{{a, b, c, d}};
  104. return vec_ld(0, vals.data());
  105. }
  106. force_inline v4sf vinsert0(v4sf v, float a) noexcept { return vec_insert(a, v, 0); }
  107. force_inline float vextract0(v4sf v) noexcept { return vec_extract(v, 0); }
  108. force_inline void interleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  109. {
  110. out1 = vec_mergeh(in1, in2);
  111. out2 = vec_mergel(in1, in2);
  112. }
  113. force_inline void uninterleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  114. {
  115. out1 = vec_perm(in1, in2, (vector unsigned char){0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27});
  116. out2 = vec_perm(in1, in2, (vector unsigned char){4,5,6,7,12,13,14,15,20,21,22,23,28,29,30,31});
  117. out1 = tmp;
  118. }
  119. force_inline void vtranspose4(v4sf &x0, v4sf &x1, v4sf &x2, v4sf &x3) noexcept
  120. {
  121. v4sf y0{vec_mergeh(x0, x2)};
  122. v4sf y1{vec_mergel(x0, x2)};
  123. v4sf y2{vec_mergeh(x1, x3)};
  124. v4sf y3{vec_mergel(x1, x3)};
  125. x0 = vec_mergeh(y0, y2);
  126. x1 = vec_mergel(y0, y2);
  127. x2 = vec_mergeh(y1, y3);
  128. x3 = vec_mergel(y1, y3);
  129. }
  130. force_inline v4sf vswaphl(v4sf a, v4sf b) noexcept
  131. { return vec_perm(a,b, (vector unsigned char){16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15}); }
  132. /*
  133. * SSE1 support macros
  134. */
  135. #elif defined(__x86_64__) || defined(__SSE__) || defined(_M_X64) || \
  136. (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
  137. #include <xmmintrin.h>
  138. using v4sf = __m128;
  139. /* 4 floats by simd vector -- this is pretty much hardcoded in the preprocess/
  140. * finalize functions anyway so you will have to work if you want to enable AVX
  141. * with its 256-bit vectors.
  142. */
  143. constexpr uint SimdSize{4};
  144. force_inline v4sf vzero() noexcept { return _mm_setzero_ps(); }
  145. force_inline v4sf vmul(v4sf a, v4sf b) noexcept { return _mm_mul_ps(a, b); }
  146. force_inline v4sf vadd(v4sf a, v4sf b) noexcept { return _mm_add_ps(a, b); }
  147. force_inline v4sf vmadd(v4sf a, v4sf b, v4sf c) noexcept { return _mm_add_ps(_mm_mul_ps(a,b), c); }
  148. force_inline v4sf vsub(v4sf a, v4sf b) noexcept { return _mm_sub_ps(a, b); }
  149. force_inline v4sf ld_ps1(float a) noexcept { return _mm_set1_ps(a); }
  150. force_inline v4sf vset4(float a, float b, float c, float d) noexcept
  151. { return _mm_setr_ps(a, b, c, d); }
  152. force_inline v4sf vinsert0(const v4sf v, const float a) noexcept
  153. { return _mm_move_ss(v, _mm_set_ss(a)); }
  154. force_inline float vextract0(v4sf v) noexcept
  155. { return _mm_cvtss_f32(v); }
  156. force_inline void interleave2(const v4sf in1, const v4sf in2, v4sf &out1, v4sf &out2) noexcept
  157. {
  158. out1 = _mm_unpacklo_ps(in1, in2);
  159. out2 = _mm_unpackhi_ps(in1, in2);
  160. }
  161. force_inline void uninterleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  162. {
  163. out1 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(2,0,2,0));
  164. out2 = _mm_shuffle_ps(in1, in2, _MM_SHUFFLE(3,1,3,1));
  165. }
  166. force_inline void vtranspose4(v4sf &x0, v4sf &x1, v4sf &x2, v4sf &x3) noexcept
  167. { _MM_TRANSPOSE4_PS(x0, x1, x2, x3); }
  168. force_inline v4sf vswaphl(v4sf a, v4sf b) noexcept
  169. { return _mm_shuffle_ps(b, a, _MM_SHUFFLE(3,2,1,0)); }
  170. /*
  171. * ARM NEON support macros
  172. */
  173. #elif defined(__ARM_NEON) || defined(__aarch64__) || defined(__arm64) || defined(_M_ARM64)
  174. #include <arm_neon.h>
  175. using v4sf = float32x4_t;
  176. constexpr uint SimdSize{4};
  177. force_inline v4sf vzero() noexcept { return vdupq_n_f32(0.0f); }
  178. force_inline v4sf vmul(v4sf a, v4sf b) noexcept { return vmulq_f32(a, b); }
  179. force_inline v4sf vadd(v4sf a, v4sf b) noexcept { return vaddq_f32(a, b); }
  180. force_inline v4sf vmadd(v4sf a, v4sf b, v4sf c) noexcept { return vmlaq_f32(c, a, b); }
  181. force_inline v4sf vsub(v4sf a, v4sf b) noexcept { return vsubq_f32(a, b); }
  182. force_inline v4sf ld_ps1(float a) noexcept { return vdupq_n_f32(a); }
  183. force_inline v4sf vset4(float a, float b, float c, float d) noexcept
  184. {
  185. float32x4_t ret{vmovq_n_f32(a)};
  186. ret = vsetq_lane_f32(b, ret, 1);
  187. ret = vsetq_lane_f32(c, ret, 2);
  188. ret = vsetq_lane_f32(d, ret, 3);
  189. return ret;
  190. }
  191. force_inline v4sf vinsert0(v4sf v, float a) noexcept
  192. { return vsetq_lane_f32(a, v, 0); }
  193. force_inline float vextract0(v4sf v) noexcept
  194. { return vgetq_lane_f32(v, 0); }
  195. force_inline void interleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  196. {
  197. float32x4x2_t tmp{vzipq_f32(in1, in2)};
  198. out1 = tmp.val[0];
  199. out2 = tmp.val[1];
  200. }
  201. force_inline void uninterleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  202. {
  203. float32x4x2_t tmp{vuzpq_f32(in1, in2)};
  204. out1 = tmp.val[0];
  205. out2 = tmp.val[1];
  206. }
  207. force_inline void vtranspose4(v4sf &x0, v4sf &x1, v4sf &x2, v4sf &x3) noexcept
  208. {
  209. /* marginally faster version:
  210. * asm("vtrn.32 %q0, %q1;\n"
  211. * "vtrn.32 %q2, %q3\n
  212. * "vswp %f0, %e2\n
  213. * "vswp %f1, %e3"
  214. * : "+w"(x0), "+w"(x1), "+w"(x2), "+w"(x3)::);
  215. */
  216. float32x4x2_t t0_{vzipq_f32(x0, x2)};
  217. float32x4x2_t t1_{vzipq_f32(x1, x3)};
  218. float32x4x2_t u0_{vzipq_f32(t0_.val[0], t1_.val[0])};
  219. float32x4x2_t u1_{vzipq_f32(t0_.val[1], t1_.val[1])};
  220. x0 = u0_.val[0];
  221. x1 = u0_.val[1];
  222. x2 = u1_.val[0];
  223. x3 = u1_.val[1];
  224. }
  225. force_inline v4sf vswaphl(v4sf a, v4sf b) noexcept
  226. { return vcombine_f32(vget_low_f32(b), vget_high_f32(a)); }
  227. /*
  228. * Generic GCC vector macros
  229. */
  230. #elif defined(__GNUC__)
  231. using v4sf [[gnu::vector_size(16), gnu::aligned(16)]] = float;
  232. constexpr uint SimdSize{4};
  233. force_inline constexpr v4sf vzero() noexcept { return v4sf{0.0f, 0.0f, 0.0f, 0.0f}; }
  234. force_inline constexpr v4sf vmul(v4sf a, v4sf b) noexcept { return a * b; }
  235. force_inline constexpr v4sf vadd(v4sf a, v4sf b) noexcept { return a + b; }
  236. force_inline constexpr v4sf vmadd(v4sf a, v4sf b, v4sf c) noexcept { return a*b + c; }
  237. force_inline constexpr v4sf vsub(v4sf a, v4sf b) noexcept { return a - b; }
  238. force_inline constexpr v4sf ld_ps1(float a) noexcept { return v4sf{a, a, a, a}; }
  239. force_inline constexpr v4sf vset4(float a, float b, float c, float d) noexcept
  240. { return v4sf{a, b, c, d}; }
  241. force_inline constexpr v4sf vinsert0(v4sf v, float a) noexcept
  242. { return v4sf{a, v[1], v[2], v[3]}; }
  243. force_inline float vextract0(v4sf v) noexcept
  244. { return v[0]; }
  245. force_inline v4sf unpacklo(v4sf a, v4sf b) noexcept
  246. { return v4sf{a[0], b[0], a[1], b[1]}; }
  247. force_inline v4sf unpackhi(v4sf a, v4sf b) noexcept
  248. { return v4sf{a[2], b[2], a[3], b[3]}; }
  249. force_inline void interleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  250. {
  251. out1 = unpacklo(in1, in2);
  252. out2 = unpackhi(in1, in2);
  253. }
  254. force_inline void uninterleave2(v4sf in1, v4sf in2, v4sf &out1, v4sf &out2) noexcept
  255. {
  256. out1 = v4sf{in1[0], in1[2], in2[0], in2[2]};
  257. out2 = v4sf{in1[1], in1[3], in2[1], in2[3]};
  258. }
  259. force_inline void vtranspose4(v4sf &x0, v4sf &x1, v4sf &x2, v4sf &x3) noexcept
  260. {
  261. v4sf tmp0{unpacklo(x0, x1)};
  262. v4sf tmp2{unpacklo(x2, x3)};
  263. v4sf tmp1{unpackhi(x0, x1)};
  264. v4sf tmp3{unpackhi(x2, x3)};
  265. x0 = v4sf{tmp0[0], tmp0[1], tmp2[0], tmp2[1]};
  266. x1 = v4sf{tmp0[2], tmp0[3], tmp2[2], tmp2[3]};
  267. x2 = v4sf{tmp1[0], tmp1[1], tmp3[0], tmp3[1]};
  268. x3 = v4sf{tmp1[2], tmp1[3], tmp3[2], tmp3[3]};
  269. }
  270. force_inline v4sf vswaphl(v4sf a, v4sf b) noexcept
  271. { return v4sf{b[0], b[1], a[2], a[3]}; }
  272. #else
  273. #warning "building with simd disabled !\n";
  274. #define PFFFT_SIMD_DISABLE // fallback to scalar code
  275. #endif
  276. #endif /* PFFFT_SIMD_DISABLE */
  277. // fallback mode for situations where SIMD is not available, use scalar mode instead
  278. #ifdef PFFFT_SIMD_DISABLE
  279. using v4sf = float;
  280. constexpr uint SimdSize{1};
  281. force_inline constexpr v4sf vmul(v4sf a, v4sf b) noexcept { return a * b; }
  282. force_inline constexpr v4sf vadd(v4sf a, v4sf b) noexcept { return a + b; }
  283. force_inline constexpr v4sf vmadd(v4sf a, v4sf b, v4sf c) noexcept { return a*b + c; }
  284. force_inline constexpr v4sf vsub(v4sf a, v4sf b) noexcept { return a - b; }
  285. force_inline constexpr v4sf ld_ps1(float a) noexcept { return a; }
  286. #else
  287. [[maybe_unused]] inline
  288. auto valigned(const float *ptr) noexcept -> bool
  289. {
  290. static constexpr uintptr_t alignmask{SimdSize*sizeof(float) - 1};
  291. return (reinterpret_cast<uintptr_t>(ptr) & alignmask) == 0;
  292. }
  293. #endif
  294. // shortcuts for complex multiplications
  295. force_inline void vcplxmul(v4sf &ar, v4sf &ai, v4sf br, v4sf bi) noexcept
  296. {
  297. v4sf tmp{vmul(ar, bi)};
  298. ar = vsub(vmul(ar, br), vmul(ai, bi));
  299. ai = vmadd(ai, br, tmp);
  300. }
  301. force_inline void vcplxmulconj(v4sf &ar, v4sf &ai, v4sf br, v4sf bi) noexcept
  302. {
  303. v4sf tmp{vmul(ar, bi)};
  304. ar = vmadd(ai, bi, vmul(ar, br));
  305. ai = vsub(vmul(ai, br), tmp);
  306. }
  307. #if !defined(PFFFT_SIMD_DISABLE)
  308. inline void assertv4(const al::span<const float,4> v_f [[maybe_unused]],
  309. const float f0 [[maybe_unused]], const float f1 [[maybe_unused]],
  310. const float f2 [[maybe_unused]], const float f3 [[maybe_unused]])
  311. { assert(v_f[0] == f0 && v_f[1] == f1 && v_f[2] == f2 && v_f[3] == f3); }
  312. template<typename T, T ...N>
  313. constexpr auto make_float_array(std::integer_sequence<T,N...>)
  314. { return std::array{static_cast<float>(N)...}; }
  315. /* detect bugs with the vector support macros */
  316. [[maybe_unused]] void validate_pffft_simd()
  317. {
  318. using float4 = std::array<float,4>;
  319. static constexpr auto f = make_float_array(std::make_index_sequence<16>{});
  320. v4sf a0_v{vset4(f[ 0], f[ 1], f[ 2], f[ 3])};
  321. v4sf a1_v{vset4(f[ 4], f[ 5], f[ 6], f[ 7])};
  322. v4sf a2_v{vset4(f[ 8], f[ 9], f[10], f[11])};
  323. v4sf a3_v{vset4(f[12], f[13], f[14], f[15])};
  324. v4sf u_v{};
  325. auto t_v = vzero();
  326. auto t_f = al::bit_cast<float4>(t_v);
  327. printf("VZERO=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  328. assertv4(t_f, 0, 0, 0, 0);
  329. t_v = vadd(a1_v, a2_v);
  330. t_f = al::bit_cast<float4>(t_v);
  331. printf("VADD(4:7,8:11)=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  332. assertv4(t_f, 12, 14, 16, 18);
  333. t_v = vmul(a1_v, a2_v);
  334. t_f = al::bit_cast<float4>(t_v);
  335. printf("VMUL(4:7,8:11)=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  336. assertv4(t_f, 32, 45, 60, 77);
  337. t_v = vmadd(a1_v, a2_v, a0_v);
  338. t_f = al::bit_cast<float4>(t_v);
  339. printf("VMADD(4:7,8:11,0:3)=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  340. assertv4(t_f, 32, 46, 62, 80);
  341. interleave2(a1_v, a2_v, t_v, u_v);
  342. t_f = al::bit_cast<float4>(t_v);
  343. auto u_f = al::bit_cast<float4>(u_v);
  344. printf("INTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n",
  345. t_f[0], t_f[1], t_f[2], t_f[3], u_f[0], u_f[1], u_f[2], u_f[3]);
  346. assertv4(t_f, 4, 8, 5, 9);
  347. assertv4(u_f, 6, 10, 7, 11);
  348. uninterleave2(a1_v, a2_v, t_v, u_v);
  349. t_f = al::bit_cast<float4>(t_v);
  350. u_f = al::bit_cast<float4>(u_v);
  351. printf("UNINTERLEAVE2(4:7,8:11)=[%2g %2g %2g %2g] [%2g %2g %2g %2g]\n",
  352. t_f[0], t_f[1], t_f[2], t_f[3], u_f[0], u_f[1], u_f[2], u_f[3]);
  353. assertv4(t_f, 4, 6, 8, 10);
  354. assertv4(u_f, 5, 7, 9, 11);
  355. t_v = ld_ps1(f[15]);
  356. t_f = al::bit_cast<float4>(t_v);
  357. printf("LD_PS1(15)=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  358. assertv4(t_f, 15, 15, 15, 15);
  359. t_v = vswaphl(a1_v, a2_v);
  360. t_f = al::bit_cast<float4>(t_v);
  361. printf("VSWAPHL(4:7,8:11)=[%2g %2g %2g %2g]\n", t_f[0], t_f[1], t_f[2], t_f[3]);
  362. assertv4(t_f, 8, 9, 6, 7);
  363. vtranspose4(a0_v, a1_v, a2_v, a3_v);
  364. auto a0_f = al::bit_cast<float4>(a0_v);
  365. auto a1_f = al::bit_cast<float4>(a1_v);
  366. auto a2_f = al::bit_cast<float4>(a2_v);
  367. auto a3_f = al::bit_cast<float4>(a3_v);
  368. printf("VTRANSPOSE4(0:3,4:7,8:11,12:15)=[%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g] [%2g %2g %2g %2g]\n",
  369. a0_f[0], a0_f[1], a0_f[2], a0_f[3], a1_f[0], a1_f[1], a1_f[2], a1_f[3],
  370. a2_f[0], a2_f[1], a2_f[2], a2_f[3], a3_f[0], a3_f[1], a3_f[2], a3_f[3]);
  371. assertv4(a0_f, 0, 4, 8, 12);
  372. assertv4(a1_f, 1, 5, 9, 13);
  373. assertv4(a2_f, 2, 6, 10, 14);
  374. assertv4(a3_f, 3, 7, 11, 15);
  375. }
  376. #endif //!PFFFT_SIMD_DISABLE
  377. /* SSE and co like 16-bytes aligned pointers */
  378. /* with a 64-byte alignment, we are even aligned on L2 cache lines... */
  379. constexpr auto V4sfAlignment = size_t(64);
  380. constexpr auto V4sfAlignVal = std::align_val_t(V4sfAlignment);
  381. /* NOLINTBEGIN(cppcoreguidelines-pro-bounds-pointer-arithmetic)
  382. * FIXME: Converting this from raw pointers to spans or something will probably
  383. * need significant work to maintain performance, given non-sequential range-
  384. * checked accesses and lack of 'restrict' to indicate non-aliased memory. At
  385. * least, some tests should be done to check the impact of using range-checked
  386. * spans here before blindly switching.
  387. */
  388. /*
  389. passf2 and passb2 has been merged here, fsign = -1 for passf2, +1 for passb2
  390. */
  391. NOINLINE void passf2_ps(const size_t ido, const size_t l1, const v4sf *cc, v4sf *RESTRICT ch,
  392. const float *const wa1, const float fsign)
  393. {
  394. const size_t l1ido{l1*ido};
  395. if(ido <= 2)
  396. {
  397. for(size_t k{0};k < l1ido;k += ido, ch += ido, cc += 2*ido)
  398. {
  399. ch[0] = vadd(cc[0], cc[ido+0]);
  400. ch[l1ido] = vsub(cc[0], cc[ido+0]);
  401. ch[1] = vadd(cc[1], cc[ido+1]);
  402. ch[l1ido + 1] = vsub(cc[1], cc[ido+1]);
  403. }
  404. }
  405. else
  406. {
  407. for(size_t k{0};k < l1ido;k += ido, ch += ido, cc += 2*ido)
  408. {
  409. for(size_t i{0};i < ido-1;i += 2)
  410. {
  411. v4sf tr2{vsub(cc[i+0], cc[i+ido+0])};
  412. v4sf ti2{vsub(cc[i+1], cc[i+ido+1])};
  413. v4sf wr{ld_ps1(wa1[i])}, wi{ld_ps1(wa1[i+1]*fsign)};
  414. ch[i] = vadd(cc[i+0], cc[i+ido+0]);
  415. ch[i+1] = vadd(cc[i+1], cc[i+ido+1]);
  416. vcplxmul(tr2, ti2, wr, wi);
  417. ch[i+l1ido] = tr2;
  418. ch[i+l1ido+1] = ti2;
  419. }
  420. }
  421. }
  422. }
  423. /*
  424. passf3 and passb3 has been merged here, fsign = -1 for passf3, +1 for passb3
  425. */
  426. NOINLINE void passf3_ps(const size_t ido, const size_t l1, const v4sf *cc, v4sf *RESTRICT ch,
  427. const float *const wa1, const float fsign)
  428. {
  429. assert(ido > 2);
  430. const v4sf taur{ld_ps1(-0.5f)};
  431. const v4sf taui{ld_ps1(0.866025403784439f*fsign)};
  432. const size_t l1ido{l1*ido};
  433. const auto wa2 = wa1 + ido;
  434. for(size_t k{0};k < l1ido;k += ido, cc += 3*ido, ch +=ido)
  435. {
  436. for(size_t i{0};i < ido-1;i += 2)
  437. {
  438. v4sf tr2{vadd(cc[i+ido], cc[i+2*ido])};
  439. v4sf cr2{vmadd(taur, tr2, cc[i])};
  440. ch[i] = vadd(tr2, cc[i]);
  441. v4sf ti2{vadd(cc[i+ido+1], cc[i+2*ido+1])};
  442. v4sf ci2{vmadd(taur, ti2, cc[i+1])};
  443. ch[i+1] = vadd(cc[i+1], ti2);
  444. v4sf cr3{vmul(taui, vsub(cc[i+ido], cc[i+2*ido]))};
  445. v4sf ci3{vmul(taui, vsub(cc[i+ido+1], cc[i+2*ido+1]))};
  446. v4sf dr2{vsub(cr2, ci3)};
  447. v4sf dr3{vadd(cr2, ci3)};
  448. v4sf di2{vadd(ci2, cr3)};
  449. v4sf di3{vsub(ci2, cr3)};
  450. float wr1{wa1[i]}, wi1{fsign*wa1[i+1]}, wr2{wa2[i]}, wi2{fsign*wa2[i+1]};
  451. vcplxmul(dr2, di2, ld_ps1(wr1), ld_ps1(wi1));
  452. ch[i+l1ido] = dr2;
  453. ch[i+l1ido + 1] = di2;
  454. vcplxmul(dr3, di3, ld_ps1(wr2), ld_ps1(wi2));
  455. ch[i+2*l1ido] = dr3;
  456. ch[i+2*l1ido+1] = di3;
  457. }
  458. }
  459. } /* passf3 */
  460. NOINLINE void passf4_ps(const size_t ido, const size_t l1, const v4sf *cc, v4sf *RESTRICT ch,
  461. const float *const wa1, const float fsign)
  462. {
  463. /* fsign == -1 for forward transform and +1 for backward transform */
  464. const v4sf vsign{ld_ps1(fsign)};
  465. const size_t l1ido{l1*ido};
  466. if(ido == 2)
  467. {
  468. for(size_t k{0};k < l1ido;k += ido, ch += ido, cc += 4*ido)
  469. {
  470. v4sf tr1{vsub(cc[0], cc[2*ido + 0])};
  471. v4sf tr2{vadd(cc[0], cc[2*ido + 0])};
  472. v4sf ti1{vsub(cc[1], cc[2*ido + 1])};
  473. v4sf ti2{vadd(cc[1], cc[2*ido + 1])};
  474. v4sf ti4{vmul(vsub(cc[1*ido + 0], cc[3*ido + 0]), vsign)};
  475. v4sf tr4{vmul(vsub(cc[3*ido + 1], cc[1*ido + 1]), vsign)};
  476. v4sf tr3{vadd(cc[ido + 0], cc[3*ido + 0])};
  477. v4sf ti3{vadd(cc[ido + 1], cc[3*ido + 1])};
  478. ch[0*l1ido + 0] = vadd(tr2, tr3);
  479. ch[0*l1ido + 1] = vadd(ti2, ti3);
  480. ch[1*l1ido + 0] = vadd(tr1, tr4);
  481. ch[1*l1ido + 1] = vadd(ti1, ti4);
  482. ch[2*l1ido + 0] = vsub(tr2, tr3);
  483. ch[2*l1ido + 1] = vsub(ti2, ti3);
  484. ch[3*l1ido + 0] = vsub(tr1, tr4);
  485. ch[3*l1ido + 1] = vsub(ti1, ti4);
  486. }
  487. }
  488. else
  489. {
  490. const auto wa2 = wa1 + ido;
  491. const auto wa3 = wa2 + ido;
  492. for(size_t k{0};k < l1ido;k += ido, ch+=ido, cc += 4*ido)
  493. {
  494. for(size_t i{0};i < ido-1;i+=2)
  495. {
  496. v4sf tr1{vsub(cc[i + 0], cc[i + 2*ido + 0])};
  497. v4sf tr2{vadd(cc[i + 0], cc[i + 2*ido + 0])};
  498. v4sf ti1{vsub(cc[i + 1], cc[i + 2*ido + 1])};
  499. v4sf ti2{vadd(cc[i + 1], cc[i + 2*ido + 1])};
  500. v4sf tr4{vmul(vsub(cc[i + 3*ido + 1], cc[i + 1*ido + 1]), vsign)};
  501. v4sf ti4{vmul(vsub(cc[i + 1*ido + 0], cc[i + 3*ido + 0]), vsign)};
  502. v4sf tr3{vadd(cc[i + ido + 0], cc[i + 3*ido + 0])};
  503. v4sf ti3{vadd(cc[i + ido + 1], cc[i + 3*ido + 1])};
  504. ch[i] = vadd(tr2, tr3);
  505. v4sf cr3{vsub(tr2, tr3)};
  506. ch[i + 1] = vadd(ti2, ti3);
  507. v4sf ci3{vsub(ti2, ti3)};
  508. v4sf cr2{vadd(tr1, tr4)};
  509. v4sf cr4{vsub(tr1, tr4)};
  510. v4sf ci2{vadd(ti1, ti4)};
  511. v4sf ci4{vsub(ti1, ti4)};
  512. float wr1{wa1[i]}, wi1{fsign*wa1[i+1]};
  513. vcplxmul(cr2, ci2, ld_ps1(wr1), ld_ps1(wi1));
  514. float wr2{wa2[i]}, wi2{fsign*wa2[i+1]};
  515. ch[i + l1ido] = cr2;
  516. ch[i + l1ido + 1] = ci2;
  517. vcplxmul(cr3, ci3, ld_ps1(wr2), ld_ps1(wi2));
  518. float wr3{wa3[i]}, wi3{fsign*wa3[i+1]};
  519. ch[i + 2*l1ido] = cr3;
  520. ch[i + 2*l1ido + 1] = ci3;
  521. vcplxmul(cr4, ci4, ld_ps1(wr3), ld_ps1(wi3));
  522. ch[i + 3*l1ido] = cr4;
  523. ch[i + 3*l1ido + 1] = ci4;
  524. }
  525. }
  526. }
  527. } /* passf4 */
  528. /*
  529. * passf5 and passb5 has been merged here, fsign = -1 for passf5, +1 for passb5
  530. */
  531. NOINLINE void passf5_ps(const size_t ido, const size_t l1, const v4sf *cc, v4sf *RESTRICT ch,
  532. const float *const wa1, const float fsign)
  533. {
  534. const v4sf tr11{ld_ps1(0.309016994374947f)};
  535. const v4sf tr12{ld_ps1(-0.809016994374947f)};
  536. const v4sf ti11{ld_ps1(0.951056516295154f*fsign)};
  537. const v4sf ti12{ld_ps1(0.587785252292473f*fsign)};
  538. auto cc_ref = [&cc,ido](size_t a_1, size_t a_2) noexcept -> auto&
  539. { return cc[(a_2-1)*ido + a_1 + 1]; };
  540. auto ch_ref = [&ch,ido,l1](size_t a_1, size_t a_3) noexcept -> auto&
  541. { return ch[(a_3-1)*l1*ido + a_1 + 1]; };
  542. assert(ido > 2);
  543. const auto wa2 = wa1 + ido;
  544. const auto wa3 = wa2 + ido;
  545. const auto wa4 = wa3 + ido;
  546. for(size_t k{0};k < l1;++k, cc += 5*ido, ch += ido)
  547. {
  548. for(size_t i{0};i < ido-1;i += 2)
  549. {
  550. v4sf ti5{vsub(cc_ref(i , 2), cc_ref(i , 5))};
  551. v4sf ti2{vadd(cc_ref(i , 2), cc_ref(i , 5))};
  552. v4sf ti4{vsub(cc_ref(i , 3), cc_ref(i , 4))};
  553. v4sf ti3{vadd(cc_ref(i , 3), cc_ref(i , 4))};
  554. v4sf tr5{vsub(cc_ref(i-1, 2), cc_ref(i-1, 5))};
  555. v4sf tr2{vadd(cc_ref(i-1, 2), cc_ref(i-1, 5))};
  556. v4sf tr4{vsub(cc_ref(i-1, 3), cc_ref(i-1, 4))};
  557. v4sf tr3{vadd(cc_ref(i-1, 3), cc_ref(i-1, 4))};
  558. ch_ref(i-1, 1) = vadd(cc_ref(i-1, 1), vadd(tr2, tr3));
  559. ch_ref(i , 1) = vadd(cc_ref(i , 1), vadd(ti2, ti3));
  560. v4sf cr2{vadd(cc_ref(i-1, 1), vmadd(tr11, tr2, vmul(tr12, tr3)))};
  561. v4sf ci2{vadd(cc_ref(i , 1), vmadd(tr11, ti2, vmul(tr12, ti3)))};
  562. v4sf cr3{vadd(cc_ref(i-1, 1), vmadd(tr12, tr2, vmul(tr11, tr3)))};
  563. v4sf ci3{vadd(cc_ref(i , 1), vmadd(tr12, ti2, vmul(tr11, ti3)))};
  564. v4sf cr5{vmadd(ti11, tr5, vmul(ti12, tr4))};
  565. v4sf ci5{vmadd(ti11, ti5, vmul(ti12, ti4))};
  566. v4sf cr4{vsub(vmul(ti12, tr5), vmul(ti11, tr4))};
  567. v4sf ci4{vsub(vmul(ti12, ti5), vmul(ti11, ti4))};
  568. v4sf dr3{vsub(cr3, ci4)};
  569. v4sf dr4{vadd(cr3, ci4)};
  570. v4sf di3{vadd(ci3, cr4)};
  571. v4sf di4{vsub(ci3, cr4)};
  572. v4sf dr5{vadd(cr2, ci5)};
  573. v4sf dr2{vsub(cr2, ci5)};
  574. v4sf di5{vsub(ci2, cr5)};
  575. v4sf di2{vadd(ci2, cr5)};
  576. float wr1{wa1[i]}, wi1{fsign*wa1[i+1]}, wr2{wa2[i]}, wi2{fsign*wa2[i+1]};
  577. float wr3{wa3[i]}, wi3{fsign*wa3[i+1]}, wr4{wa4[i]}, wi4{fsign*wa4[i+1]};
  578. vcplxmul(dr2, di2, ld_ps1(wr1), ld_ps1(wi1));
  579. ch_ref(i - 1, 2) = dr2;
  580. ch_ref(i, 2) = di2;
  581. vcplxmul(dr3, di3, ld_ps1(wr2), ld_ps1(wi2));
  582. ch_ref(i - 1, 3) = dr3;
  583. ch_ref(i, 3) = di3;
  584. vcplxmul(dr4, di4, ld_ps1(wr3), ld_ps1(wi3));
  585. ch_ref(i - 1, 4) = dr4;
  586. ch_ref(i, 4) = di4;
  587. vcplxmul(dr5, di5, ld_ps1(wr4), ld_ps1(wi4));
  588. ch_ref(i - 1, 5) = dr5;
  589. ch_ref(i, 5) = di5;
  590. }
  591. }
  592. }
  593. NOINLINE void radf2_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc,
  594. v4sf *RESTRICT ch, const float *const wa1)
  595. {
  596. const size_t l1ido{l1*ido};
  597. for(size_t k{0};k < l1ido;k += ido)
  598. {
  599. v4sf a{cc[k]}, b{cc[k + l1ido]};
  600. ch[2*k] = vadd(a, b);
  601. ch[2*(k+ido)-1] = vsub(a, b);
  602. }
  603. if(ido < 2)
  604. return;
  605. if(ido != 2)
  606. {
  607. for(size_t k{0};k < l1ido;k += ido)
  608. {
  609. for(size_t i{2};i < ido;i += 2)
  610. {
  611. v4sf tr2{cc[i - 1 + k + l1ido]}, ti2{cc[i + k + l1ido]};
  612. v4sf br{cc[i - 1 + k]}, bi{cc[i + k]};
  613. vcplxmulconj(tr2, ti2, ld_ps1(wa1[i - 2]), ld_ps1(wa1[i - 1]));
  614. ch[i + 2*k] = vadd(bi, ti2);
  615. ch[2*(k+ido) - i] = vsub(ti2, bi);
  616. ch[i - 1 + 2*k] = vadd(br, tr2);
  617. ch[2*(k+ido) - i -1] = vsub(br, tr2);
  618. }
  619. }
  620. if((ido&1) == 1)
  621. return;
  622. }
  623. const v4sf minus_one{ld_ps1(-1.0f)};
  624. for(size_t k{0};k < l1ido;k += ido)
  625. {
  626. ch[2*k + ido] = vmul(minus_one, cc[ido-1 + k + l1ido]);
  627. ch[2*k + ido-1] = cc[k + ido-1];
  628. }
  629. } /* radf2 */
  630. NOINLINE void radb2_ps(const size_t ido, const size_t l1, const v4sf *cc, v4sf *RESTRICT ch,
  631. const float *const wa1)
  632. {
  633. const size_t l1ido{l1*ido};
  634. for(size_t k{0};k < l1ido;k += ido)
  635. {
  636. v4sf a{cc[2*k]};
  637. v4sf b{cc[2*(k+ido) - 1]};
  638. ch[k] = vadd(a, b);
  639. ch[k + l1ido] = vsub(a, b);
  640. }
  641. if(ido < 2)
  642. return;
  643. if(ido != 2)
  644. {
  645. for(size_t k{0};k < l1ido;k += ido)
  646. {
  647. for(size_t i{2};i < ido;i += 2)
  648. {
  649. v4sf a{cc[i-1 + 2*k]};
  650. v4sf b{cc[2*(k + ido) - i - 1]};
  651. v4sf c{cc[i+0 + 2*k]};
  652. v4sf d{cc[2*(k + ido) - i + 0]};
  653. ch[i-1 + k] = vadd(a, b);
  654. v4sf tr2{vsub(a, b)};
  655. ch[i+0 + k] = vsub(c, d);
  656. v4sf ti2{vadd(c, d)};
  657. vcplxmul(tr2, ti2, ld_ps1(wa1[i - 2]), ld_ps1(wa1[i - 1]));
  658. ch[i-1 + k + l1ido] = tr2;
  659. ch[i+0 + k + l1ido] = ti2;
  660. }
  661. }
  662. if((ido&1) == 1)
  663. return;
  664. }
  665. const v4sf minus_two{ld_ps1(-2.0f)};
  666. for(size_t k{0};k < l1ido;k += ido)
  667. {
  668. v4sf a{cc[2*k + ido-1]};
  669. v4sf b{cc[2*k + ido]};
  670. ch[k + ido-1] = vadd(a,a);
  671. ch[k + ido-1 + l1ido] = vmul(minus_two, b);
  672. }
  673. } /* radb2 */
  674. void radf3_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch,
  675. const float *const wa1)
  676. {
  677. const v4sf taur{ld_ps1(-0.5f)};
  678. const v4sf taui{ld_ps1(0.866025403784439f)};
  679. for(size_t k{0};k < l1;++k)
  680. {
  681. v4sf cr2{vadd(cc[(k + l1)*ido], cc[(k + 2*l1)*ido])};
  682. ch[ (3*k )*ido] = vadd(cc[k*ido], cr2);
  683. ch[ (3*k + 2)*ido] = vmul(taui, vsub(cc[(k + l1*2)*ido], cc[(k + l1)*ido]));
  684. ch[ido-1 + (3*k + 1)*ido] = vmadd(taur, cr2, cc[k*ido]);
  685. }
  686. if(ido == 1)
  687. return;
  688. const auto wa2 = wa1 + ido;
  689. for(size_t k{0};k < l1;++k)
  690. {
  691. for(size_t i{2};i < ido;i += 2)
  692. {
  693. const size_t ic{ido - i};
  694. v4sf wr1{ld_ps1(wa1[i - 2])};
  695. v4sf wi1{ld_ps1(wa1[i - 1])};
  696. v4sf dr2{cc[i - 1 + (k + l1)*ido]};
  697. v4sf di2{cc[i + (k + l1)*ido]};
  698. vcplxmulconj(dr2, di2, wr1, wi1);
  699. v4sf wr2{ld_ps1(wa2[i - 2])};
  700. v4sf wi2{ld_ps1(wa2[i - 1])};
  701. v4sf dr3{cc[i - 1 + (k + l1*2)*ido]};
  702. v4sf di3{cc[i + (k + l1*2)*ido]};
  703. vcplxmulconj(dr3, di3, wr2, wi2);
  704. v4sf cr2{vadd(dr2, dr3)};
  705. v4sf ci2{vadd(di2, di3)};
  706. ch[i - 1 + 3*k*ido] = vadd(cc[i - 1 + k*ido], cr2);
  707. ch[i + 3*k*ido] = vadd(cc[i + k*ido], ci2);
  708. v4sf tr2{vmadd(taur, cr2, cc[i - 1 + k*ido])};
  709. v4sf ti2{vmadd(taur, ci2, cc[i + k*ido])};
  710. v4sf tr3{vmul(taui, vsub(di2, di3))};
  711. v4sf ti3{vmul(taui, vsub(dr3, dr2))};
  712. ch[i - 1 + (3*k + 2)*ido] = vadd(tr2, tr3);
  713. ch[ic - 1 + (3*k + 1)*ido] = vsub(tr2, tr3);
  714. ch[i + (3*k + 2)*ido] = vadd(ti2, ti3);
  715. ch[ic + (3*k + 1)*ido] = vsub(ti3, ti2);
  716. }
  717. }
  718. } /* radf3 */
  719. void radb3_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch,
  720. const float *const wa1)
  721. {
  722. static constexpr float taur{-0.5f};
  723. static constexpr float taui{0.866025403784439f};
  724. static constexpr float taui_2{taui*2.0f};
  725. const v4sf vtaur{ld_ps1(taur)};
  726. const v4sf vtaui_2{ld_ps1(taui_2)};
  727. for(size_t k{0};k < l1;++k)
  728. {
  729. v4sf tr2 = cc[ido-1 + (3*k + 1)*ido];
  730. tr2 = vadd(tr2,tr2);
  731. v4sf cr2 = vmadd(vtaur, tr2, cc[3*k*ido]);
  732. ch[k*ido] = vadd(cc[3*k*ido], tr2);
  733. v4sf ci3 = vmul(vtaui_2, cc[(3*k + 2)*ido]);
  734. ch[(k + l1)*ido] = vsub(cr2, ci3);
  735. ch[(k + 2*l1)*ido] = vadd(cr2, ci3);
  736. }
  737. if(ido == 1)
  738. return;
  739. const auto wa2 = wa1 + ido;
  740. const v4sf vtaui{ld_ps1(taui)};
  741. for(size_t k{0};k < l1;++k)
  742. {
  743. for(size_t i{2};i < ido;i += 2)
  744. {
  745. const size_t ic{ido - i};
  746. v4sf tr2{vadd(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido])};
  747. v4sf cr2{vmadd(vtaur, tr2, cc[i - 1 + 3*k*ido])};
  748. ch[i - 1 + k*ido] = vadd(cc[i - 1 + 3*k*ido], tr2);
  749. v4sf ti2{vsub(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido])};
  750. v4sf ci2{vmadd(vtaur, ti2, cc[i + 3*k*ido])};
  751. ch[i + k*ido] = vadd(cc[i + 3*k*ido], ti2);
  752. v4sf cr3{vmul(vtaui, vsub(cc[i - 1 + (3*k + 2)*ido], cc[ic - 1 + (3*k + 1)*ido]))};
  753. v4sf ci3{vmul(vtaui, vadd(cc[i + (3*k + 2)*ido], cc[ic + (3*k + 1)*ido]))};
  754. v4sf dr2{vsub(cr2, ci3)};
  755. v4sf dr3{vadd(cr2, ci3)};
  756. v4sf di2{vadd(ci2, cr3)};
  757. v4sf di3{vsub(ci2, cr3)};
  758. vcplxmul(dr2, di2, ld_ps1(wa1[i-2]), ld_ps1(wa1[i-1]));
  759. ch[i - 1 + (k + l1)*ido] = dr2;
  760. ch[i + (k + l1)*ido] = di2;
  761. vcplxmul(dr3, di3, ld_ps1(wa2[i-2]), ld_ps1(wa2[i-1]));
  762. ch[i - 1 + (k + 2*l1)*ido] = dr3;
  763. ch[i + (k + 2*l1)*ido] = di3;
  764. }
  765. }
  766. } /* radb3 */
  767. NOINLINE void radf4_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc,
  768. v4sf *RESTRICT ch, const float *const wa1)
  769. {
  770. const size_t l1ido{l1*ido};
  771. {
  772. const v4sf *RESTRICT cc_{cc}, *RESTRICT cc_end{cc + l1ido};
  773. v4sf *RESTRICT ch_{ch};
  774. while(cc != cc_end)
  775. {
  776. // this loop represents between 25% and 40% of total radf4_ps cost !
  777. v4sf a0{cc[0]}, a1{cc[l1ido]};
  778. v4sf a2{cc[2*l1ido]}, a3{cc[3*l1ido]};
  779. v4sf tr1{vadd(a1, a3)};
  780. v4sf tr2{vadd(a0, a2)};
  781. ch[2*ido-1] = vsub(a0, a2);
  782. ch[2*ido ] = vsub(a3, a1);
  783. ch[0 ] = vadd(tr1, tr2);
  784. ch[4*ido-1] = vsub(tr2, tr1);
  785. cc += ido; ch += 4*ido;
  786. }
  787. cc = cc_;
  788. ch = ch_;
  789. }
  790. if(ido < 2)
  791. return;
  792. if(ido != 2)
  793. {
  794. const auto wa2 = wa1 + ido;
  795. const auto wa3 = wa2 + ido;
  796. for(size_t k{0};k < l1ido;k += ido)
  797. {
  798. const v4sf *RESTRICT pc{cc + 1 + k};
  799. for(size_t i{2};i < ido;i += 2, pc += 2)
  800. {
  801. const size_t ic{ido - i};
  802. v4sf cr2{pc[1*l1ido+0]};
  803. v4sf ci2{pc[1*l1ido+1]};
  804. v4sf wr{ld_ps1(wa1[i - 2])};
  805. v4sf wi{ld_ps1(wa1[i - 1])};
  806. vcplxmulconj(cr2,ci2,wr,wi);
  807. v4sf cr3{pc[2*l1ido+0]};
  808. v4sf ci3{pc[2*l1ido+1]};
  809. wr = ld_ps1(wa2[i-2]);
  810. wi = ld_ps1(wa2[i-1]);
  811. vcplxmulconj(cr3, ci3, wr, wi);
  812. v4sf cr4{pc[3*l1ido]};
  813. v4sf ci4{pc[3*l1ido+1]};
  814. wr = ld_ps1(wa3[i-2]);
  815. wi = ld_ps1(wa3[i-1]);
  816. vcplxmulconj(cr4, ci4, wr, wi);
  817. /* at this point, on SSE, five of "cr2 cr3 cr4 ci2 ci3 ci4" should be loaded in registers */
  818. v4sf tr1{vadd(cr2,cr4)};
  819. v4sf tr4{vsub(cr4,cr2)};
  820. v4sf tr2{vadd(pc[0],cr3)};
  821. v4sf tr3{vsub(pc[0],cr3)};
  822. ch[i - 1 + 4*k ] = vadd(tr2,tr1);
  823. ch[ic - 1 + 4*k + 3*ido] = vsub(tr2,tr1); // at this point tr1 and tr2 can be disposed
  824. v4sf ti1{vadd(ci2,ci4)};
  825. v4sf ti4{vsub(ci2,ci4)};
  826. ch[i - 1 + 4*k + 2*ido] = vadd(tr3,ti4);
  827. ch[ic - 1 + 4*k + 1*ido] = vsub(tr3,ti4); // dispose tr3, ti4
  828. v4sf ti2{vadd(pc[1],ci3)};
  829. v4sf ti3{vsub(pc[1],ci3)};
  830. ch[i + 4*k ] = vadd(ti1, ti2);
  831. ch[ic + 4*k + 3*ido] = vsub(ti1, ti2);
  832. ch[i + 4*k + 2*ido] = vadd(tr4, ti3);
  833. ch[ic + 4*k + 1*ido] = vsub(tr4, ti3);
  834. }
  835. }
  836. if((ido&1) == 1)
  837. return;
  838. }
  839. const v4sf minus_hsqt2{ld_ps1(al::numbers::sqrt2_v<float> * -0.5f)};
  840. for(size_t k{0};k < l1ido;k += ido)
  841. {
  842. v4sf a{cc[ido-1 + k + l1ido]}, b{cc[ido-1 + k + 3*l1ido]};
  843. v4sf c{cc[ido-1 + k]}, d{cc[ido-1 + k + 2*l1ido]};
  844. v4sf ti1{vmul(minus_hsqt2, vadd(b, a))};
  845. v4sf tr1{vmul(minus_hsqt2, vsub(b, a))};
  846. ch[ido-1 + 4*k ] = vadd(c, tr1);
  847. ch[ido-1 + 4*k + 2*ido] = vsub(c, tr1);
  848. ch[ 4*k + 1*ido] = vsub(ti1, d);
  849. ch[ 4*k + 3*ido] = vadd(ti1, d);
  850. }
  851. } /* radf4 */
  852. NOINLINE void radb4_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc,
  853. v4sf *RESTRICT ch, const float *const wa1)
  854. {
  855. const v4sf two{ld_ps1(2.0f)};
  856. const size_t l1ido{l1*ido};
  857. {
  858. const v4sf *RESTRICT cc_{cc}, *RESTRICT ch_end{ch + l1ido};
  859. v4sf *ch_{ch};
  860. while(ch != ch_end)
  861. {
  862. v4sf a{cc[0]}, b{cc[4*ido-1]};
  863. v4sf c{cc[2*ido]}, d{cc[2*ido-1]};
  864. v4sf tr3{vmul(two,d)};
  865. v4sf tr2{vadd(a,b)};
  866. v4sf tr1{vsub(a,b)};
  867. v4sf tr4{vmul(two,c)};
  868. ch[0*l1ido] = vadd(tr2, tr3);
  869. ch[2*l1ido] = vsub(tr2, tr3);
  870. ch[1*l1ido] = vsub(tr1, tr4);
  871. ch[3*l1ido] = vadd(tr1, tr4);
  872. cc += 4*ido; ch += ido;
  873. }
  874. cc = cc_; ch = ch_;
  875. }
  876. if(ido < 2)
  877. return;
  878. if(ido != 2)
  879. {
  880. const auto wa2 = wa1 + ido;
  881. const auto wa3 = wa2 + ido;
  882. for(size_t k{0};k < l1ido;k += ido)
  883. {
  884. const v4sf *RESTRICT pc{cc - 1 + 4*k};
  885. v4sf *RESTRICT ph{ch + k + 1};
  886. for(size_t i{2};i < ido;i += 2)
  887. {
  888. v4sf tr1{vsub(pc[ i], pc[4*ido - i])};
  889. v4sf tr2{vadd(pc[ i], pc[4*ido - i])};
  890. v4sf ti4{vsub(pc[2*ido + i], pc[2*ido - i])};
  891. v4sf tr3{vadd(pc[2*ido + i], pc[2*ido - i])};
  892. ph[0] = vadd(tr2, tr3);
  893. v4sf cr3{vsub(tr2, tr3)};
  894. v4sf ti3{vsub(pc[2*ido + i + 1], pc[2*ido - i + 1])};
  895. v4sf tr4{vadd(pc[2*ido + i + 1], pc[2*ido - i + 1])};
  896. v4sf cr2{vsub(tr1, tr4)};
  897. v4sf cr4{vadd(tr1, tr4)};
  898. v4sf ti1{vadd(pc[i + 1], pc[4*ido - i + 1])};
  899. v4sf ti2{vsub(pc[i + 1], pc[4*ido - i + 1])};
  900. ph[1] = vadd(ti2, ti3); ph += l1ido;
  901. v4sf ci3{vsub(ti2, ti3)};
  902. v4sf ci2{vadd(ti1, ti4)};
  903. v4sf ci4{vsub(ti1, ti4)};
  904. vcplxmul(cr2, ci2, ld_ps1(wa1[i-2]), ld_ps1(wa1[i-1]));
  905. ph[0] = cr2;
  906. ph[1] = ci2; ph += l1ido;
  907. vcplxmul(cr3, ci3, ld_ps1(wa2[i-2]), ld_ps1(wa2[i-1]));
  908. ph[0] = cr3;
  909. ph[1] = ci3; ph += l1ido;
  910. vcplxmul(cr4, ci4, ld_ps1(wa3[i-2]), ld_ps1(wa3[i-1]));
  911. ph[0] = cr4;
  912. ph[1] = ci4; ph = ph - 3*l1ido + 2;
  913. }
  914. }
  915. if((ido&1) == 1)
  916. return;
  917. }
  918. const v4sf minus_sqrt2{ld_ps1(-1.414213562373095f)};
  919. for(size_t k{0};k < l1ido;k += ido)
  920. {
  921. const size_t i0{4*k + ido};
  922. v4sf c{cc[i0-1]}, d{cc[i0 + 2*ido-1]};
  923. v4sf a{cc[i0+0]}, b{cc[i0 + 2*ido+0]};
  924. v4sf tr1{vsub(c,d)};
  925. v4sf tr2{vadd(c,d)};
  926. v4sf ti1{vadd(b,a)};
  927. v4sf ti2{vsub(b,a)};
  928. ch[ido-1 + k + 0*l1ido] = vadd(tr2,tr2);
  929. ch[ido-1 + k + 1*l1ido] = vmul(minus_sqrt2, vsub(ti1, tr1));
  930. ch[ido-1 + k + 2*l1ido] = vadd(ti2, ti2);
  931. ch[ido-1 + k + 3*l1ido] = vmul(minus_sqrt2, vadd(ti1, tr1));
  932. }
  933. } /* radb4 */
  934. void radf5_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch,
  935. const float *const wa1)
  936. {
  937. const v4sf tr11{ld_ps1(0.309016994374947f)};
  938. const v4sf ti11{ld_ps1(0.951056516295154f)};
  939. const v4sf tr12{ld_ps1(-0.809016994374947f)};
  940. const v4sf ti12{ld_ps1(0.587785252292473f)};
  941. auto cc_ref = [&cc,l1,ido](size_t a_1, size_t a_2, size_t a_3) noexcept -> auto&
  942. { return cc[(a_3*l1 + a_2)*ido + a_1]; };
  943. auto ch_ref = [&ch,ido](size_t a_1, size_t a_2, size_t a_3) noexcept -> auto&
  944. { return ch[(a_3*5 + a_2)*ido + a_1]; };
  945. /* Parameter adjustments */
  946. ch -= 1 + ido * 6;
  947. cc -= 1 + ido * (1 + l1);
  948. const auto wa2 = wa1 + ido;
  949. const auto wa3 = wa2 + ido;
  950. const auto wa4 = wa3 + ido;
  951. /* Function Body */
  952. for(size_t k{1};k <= l1;++k)
  953. {
  954. v4sf cr2{vadd(cc_ref(1, k, 5), cc_ref(1, k, 2))};
  955. v4sf ci5{vsub(cc_ref(1, k, 5), cc_ref(1, k, 2))};
  956. v4sf cr3{vadd(cc_ref(1, k, 4), cc_ref(1, k, 3))};
  957. v4sf ci4{vsub(cc_ref(1, k, 4), cc_ref(1, k, 3))};
  958. ch_ref(1, 1, k) = vadd(cc_ref(1, k, 1), vadd(cr2, cr3));
  959. ch_ref(ido, 2, k) = vadd(cc_ref(1, k, 1), vmadd(tr11, cr2, vmul(tr12, cr3)));
  960. ch_ref(1, 3, k) = vmadd(ti11, ci5, vmul(ti12, ci4));
  961. ch_ref(ido, 4, k) = vadd(cc_ref(1, k, 1), vmadd(tr12, cr2, vmul(tr11, cr3)));
  962. ch_ref(1, 5, k) = vsub(vmul(ti12, ci5), vmul(ti11, ci4));
  963. //printf("pffft: radf5, k=%d ch_ref=%f, ci4=%f\n", k, ch_ref(1, 5, k), ci4);
  964. }
  965. if(ido == 1)
  966. return;
  967. const size_t idp2{ido + 2};
  968. for(size_t k{1};k <= l1;++k)
  969. {
  970. for(size_t i{3};i <= ido;i += 2)
  971. {
  972. const size_t ic{idp2 - i};
  973. v4sf dr2{ld_ps1(wa1[i-3])};
  974. v4sf di2{ld_ps1(wa1[i-2])};
  975. v4sf dr3{ld_ps1(wa2[i-3])};
  976. v4sf di3{ld_ps1(wa2[i-2])};
  977. v4sf dr4{ld_ps1(wa3[i-3])};
  978. v4sf di4{ld_ps1(wa3[i-2])};
  979. v4sf dr5{ld_ps1(wa4[i-3])};
  980. v4sf di5{ld_ps1(wa4[i-2])};
  981. vcplxmulconj(dr2, di2, cc_ref(i-1, k, 2), cc_ref(i, k, 2));
  982. vcplxmulconj(dr3, di3, cc_ref(i-1, k, 3), cc_ref(i, k, 3));
  983. vcplxmulconj(dr4, di4, cc_ref(i-1, k, 4), cc_ref(i, k, 4));
  984. vcplxmulconj(dr5, di5, cc_ref(i-1, k, 5), cc_ref(i, k, 5));
  985. v4sf cr2{vadd(dr2, dr5)};
  986. v4sf ci5{vsub(dr5, dr2)};
  987. v4sf cr5{vsub(di2, di5)};
  988. v4sf ci2{vadd(di2, di5)};
  989. v4sf cr3{vadd(dr3, dr4)};
  990. v4sf ci4{vsub(dr4, dr3)};
  991. v4sf cr4{vsub(di3, di4)};
  992. v4sf ci3{vadd(di3, di4)};
  993. ch_ref(i - 1, 1, k) = vadd(cc_ref(i - 1, k, 1), vadd(cr2, cr3));
  994. ch_ref(i, 1, k) = vsub(cc_ref(i, k, 1), vadd(ci2, ci3));
  995. v4sf tr2{vadd(cc_ref(i - 1, k, 1), vmadd(tr11, cr2, vmul(tr12, cr3)))};
  996. v4sf ti2{vsub(cc_ref(i, k, 1), vmadd(tr11, ci2, vmul(tr12, ci3)))};
  997. v4sf tr3{vadd(cc_ref(i - 1, k, 1), vmadd(tr12, cr2, vmul(tr11, cr3)))};
  998. v4sf ti3{vsub(cc_ref(i, k, 1), vmadd(tr12, ci2, vmul(tr11, ci3)))};
  999. v4sf tr5{vmadd(ti11, cr5, vmul(ti12, cr4))};
  1000. v4sf ti5{vmadd(ti11, ci5, vmul(ti12, ci4))};
  1001. v4sf tr4{vsub(vmul(ti12, cr5), vmul(ti11, cr4))};
  1002. v4sf ti4{vsub(vmul(ti12, ci5), vmul(ti11, ci4))};
  1003. ch_ref(i - 1, 3, k) = vsub(tr2, tr5);
  1004. ch_ref(ic - 1, 2, k) = vadd(tr2, tr5);
  1005. ch_ref(i , 3, k) = vadd(ti5, ti2);
  1006. ch_ref(ic , 2, k) = vsub(ti5, ti2);
  1007. ch_ref(i - 1, 5, k) = vsub(tr3, tr4);
  1008. ch_ref(ic - 1, 4, k) = vadd(tr3, tr4);
  1009. ch_ref(i , 5, k) = vadd(ti4, ti3);
  1010. ch_ref(ic , 4, k) = vsub(ti4, ti3);
  1011. }
  1012. }
  1013. } /* radf5 */
  1014. void radb5_ps(const size_t ido, const size_t l1, const v4sf *RESTRICT cc, v4sf *RESTRICT ch,
  1015. const float *const wa1)
  1016. {
  1017. const v4sf tr11{ld_ps1(0.309016994374947f)};
  1018. const v4sf ti11{ld_ps1(0.951056516295154f)};
  1019. const v4sf tr12{ld_ps1(-0.809016994374947f)};
  1020. const v4sf ti12{ld_ps1(0.587785252292473f)};
  1021. auto cc_ref = [&cc,ido](size_t a_1, size_t a_2, size_t a_3) noexcept -> auto&
  1022. { return cc[(a_3*5 + a_2)*ido + a_1]; };
  1023. auto ch_ref = [&ch,ido,l1](size_t a_1, size_t a_2, size_t a_3) noexcept -> auto&
  1024. { return ch[(a_3*l1 + a_2)*ido + a_1]; };
  1025. /* Parameter adjustments */
  1026. ch -= 1 + ido*(1 + l1);
  1027. cc -= 1 + ido*6;
  1028. const auto wa2 = wa1 + ido;
  1029. const auto wa3 = wa2 + ido;
  1030. const auto wa4 = wa3 + ido;
  1031. /* Function Body */
  1032. for(size_t k{1};k <= l1;++k)
  1033. {
  1034. v4sf ti5{vadd(cc_ref( 1, 3, k), cc_ref(1, 3, k))};
  1035. v4sf ti4{vadd(cc_ref( 1, 5, k), cc_ref(1, 5, k))};
  1036. v4sf tr2{vadd(cc_ref(ido, 2, k), cc_ref(ido, 2, k))};
  1037. v4sf tr3{vadd(cc_ref(ido, 4, k), cc_ref(ido, 4, k))};
  1038. ch_ref(1, k, 1) = vadd(cc_ref(1, 1, k), vadd(tr2, tr3));
  1039. v4sf cr2{vadd(cc_ref(1, 1, k), vmadd(tr11, tr2, vmul(tr12, tr3)))};
  1040. v4sf cr3{vadd(cc_ref(1, 1, k), vmadd(tr12, tr2, vmul(tr11, tr3)))};
  1041. v4sf ci5{vmadd(ti11, ti5, vmul(ti12, ti4))};
  1042. v4sf ci4{vsub(vmul(ti12, ti5), vmul(ti11, ti4))};
  1043. ch_ref(1, k, 2) = vsub(cr2, ci5);
  1044. ch_ref(1, k, 3) = vsub(cr3, ci4);
  1045. ch_ref(1, k, 4) = vadd(cr3, ci4);
  1046. ch_ref(1, k, 5) = vadd(cr2, ci5);
  1047. }
  1048. if(ido == 1)
  1049. return;
  1050. const size_t idp2{ido + 2};
  1051. for(size_t k{1};k <= l1;++k)
  1052. {
  1053. for(size_t i{3};i <= ido;i += 2)
  1054. {
  1055. const size_t ic{idp2 - i};
  1056. v4sf ti5{vadd(cc_ref(i , 3, k), cc_ref(ic , 2, k))};
  1057. v4sf ti2{vsub(cc_ref(i , 3, k), cc_ref(ic , 2, k))};
  1058. v4sf ti4{vadd(cc_ref(i , 5, k), cc_ref(ic , 4, k))};
  1059. v4sf ti3{vsub(cc_ref(i , 5, k), cc_ref(ic , 4, k))};
  1060. v4sf tr5{vsub(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k))};
  1061. v4sf tr2{vadd(cc_ref(i-1, 3, k), cc_ref(ic-1, 2, k))};
  1062. v4sf tr4{vsub(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k))};
  1063. v4sf tr3{vadd(cc_ref(i-1, 5, k), cc_ref(ic-1, 4, k))};
  1064. ch_ref(i - 1, k, 1) = vadd(cc_ref(i-1, 1, k), vadd(tr2, tr3));
  1065. ch_ref(i , k, 1) = vadd(cc_ref(i , 1, k), vadd(ti2, ti3));
  1066. v4sf cr2{vadd(cc_ref(i-1, 1, k), vmadd(tr11, tr2, vmul(tr12, tr3)))};
  1067. v4sf ci2{vadd(cc_ref(i , 1, k), vmadd(tr11, ti2, vmul(tr12, ti3)))};
  1068. v4sf cr3{vadd(cc_ref(i-1, 1, k), vmadd(tr12, tr2, vmul(tr11, tr3)))};
  1069. v4sf ci3{vadd(cc_ref(i , 1, k), vmadd(tr12, ti2, vmul(tr11, ti3)))};
  1070. v4sf cr5{vmadd(ti11, tr5, vmul(ti12, tr4))};
  1071. v4sf ci5{vmadd(ti11, ti5, vmul(ti12, ti4))};
  1072. v4sf cr4{vsub(vmul(ti12, tr5), vmul(ti11, tr4))};
  1073. v4sf ci4{vsub(vmul(ti12, ti5), vmul(ti11, ti4))};
  1074. v4sf dr3{vsub(cr3, ci4)};
  1075. v4sf dr4{vadd(cr3, ci4)};
  1076. v4sf di3{vadd(ci3, cr4)};
  1077. v4sf di4{vsub(ci3, cr4)};
  1078. v4sf dr5{vadd(cr2, ci5)};
  1079. v4sf dr2{vsub(cr2, ci5)};
  1080. v4sf di5{vsub(ci2, cr5)};
  1081. v4sf di2{vadd(ci2, cr5)};
  1082. vcplxmul(dr2, di2, ld_ps1(wa1[i-3]), ld_ps1(wa1[i-2]));
  1083. vcplxmul(dr3, di3, ld_ps1(wa2[i-3]), ld_ps1(wa2[i-2]));
  1084. vcplxmul(dr4, di4, ld_ps1(wa3[i-3]), ld_ps1(wa3[i-2]));
  1085. vcplxmul(dr5, di5, ld_ps1(wa4[i-3]), ld_ps1(wa4[i-2]));
  1086. ch_ref(i-1, k, 2) = dr2; ch_ref(i, k, 2) = di2;
  1087. ch_ref(i-1, k, 3) = dr3; ch_ref(i, k, 3) = di3;
  1088. ch_ref(i-1, k, 4) = dr4; ch_ref(i, k, 4) = di4;
  1089. ch_ref(i-1, k, 5) = dr5; ch_ref(i, k, 5) = di5;
  1090. }
  1091. }
  1092. } /* radb5 */
  1093. NOINLINE v4sf *rfftf1_ps(const size_t n, const v4sf *input_readonly, v4sf *work1, v4sf *work2,
  1094. const float *wa, const al::span<const uint,15> ifac)
  1095. {
  1096. assert(work1 != work2);
  1097. const v4sf *in{input_readonly};
  1098. v4sf *out{in == work2 ? work1 : work2};
  1099. const size_t nf{ifac[1]};
  1100. size_t l2{n};
  1101. size_t iw{n-1};
  1102. size_t k1{1};
  1103. while(true)
  1104. {
  1105. const size_t kh{nf - k1};
  1106. const size_t ip{ifac[kh + 2]};
  1107. const size_t l1{l2 / ip};
  1108. const size_t ido{n / l2};
  1109. iw -= (ip - 1)*ido;
  1110. switch(ip)
  1111. {
  1112. case 5:
  1113. radf5_ps(ido, l1, in, out, &wa[iw]);
  1114. break;
  1115. case 4:
  1116. radf4_ps(ido, l1, in, out, &wa[iw]);
  1117. break;
  1118. case 3:
  1119. radf3_ps(ido, l1, in, out, &wa[iw]);
  1120. break;
  1121. case 2:
  1122. radf2_ps(ido, l1, in, out, &wa[iw]);
  1123. break;
  1124. default:
  1125. assert(0);
  1126. }
  1127. if(++k1 > nf)
  1128. return out;
  1129. l2 = l1;
  1130. if(out == work2)
  1131. {
  1132. out = work1;
  1133. in = work2;
  1134. }
  1135. else
  1136. {
  1137. out = work2;
  1138. in = work1;
  1139. }
  1140. }
  1141. } /* rfftf1 */
  1142. NOINLINE v4sf *rfftb1_ps(const size_t n, const v4sf *input_readonly, v4sf *work1, v4sf *work2,
  1143. const float *wa, const al::span<const uint,15> ifac)
  1144. {
  1145. assert(work1 != work2);
  1146. const v4sf *in{input_readonly};
  1147. v4sf *out{in == work2 ? work1 : work2};
  1148. const size_t nf{ifac[1]};
  1149. size_t l1{1};
  1150. size_t iw{0};
  1151. size_t k1{1};
  1152. while(true)
  1153. {
  1154. const size_t ip{ifac[k1 + 1]};
  1155. const size_t l2{ip*l1};
  1156. const size_t ido{n / l2};
  1157. switch(ip)
  1158. {
  1159. case 5:
  1160. radb5_ps(ido, l1, in, out, &wa[iw]);
  1161. break;
  1162. case 4:
  1163. radb4_ps(ido, l1, in, out, &wa[iw]);
  1164. break;
  1165. case 3:
  1166. radb3_ps(ido, l1, in, out, &wa[iw]);
  1167. break;
  1168. case 2:
  1169. radb2_ps(ido, l1, in, out, &wa[iw]);
  1170. break;
  1171. default:
  1172. assert(0);
  1173. }
  1174. if(++k1 > nf)
  1175. return out;
  1176. l1 = l2;
  1177. iw += (ip - 1)*ido;
  1178. if(out == work2)
  1179. {
  1180. out = work1;
  1181. in = work2;
  1182. }
  1183. else
  1184. {
  1185. out = work2;
  1186. in = work1;
  1187. }
  1188. }
  1189. }
  1190. v4sf *cfftf1_ps(const size_t n, const v4sf *input_readonly, v4sf *work1, v4sf *work2,
  1191. const float *wa, const al::span<const uint,15> ifac, const float fsign)
  1192. {
  1193. assert(work1 != work2);
  1194. const v4sf *in{input_readonly};
  1195. v4sf *out{in == work2 ? work1 : work2};
  1196. const size_t nf{ifac[1]};
  1197. size_t l1{1}, iw{0};
  1198. size_t k1{2};
  1199. while(true)
  1200. {
  1201. const size_t ip{ifac[k1]};
  1202. const size_t l2{ip*l1};
  1203. const size_t ido{n / l2};
  1204. const size_t idot{ido + ido};
  1205. switch(ip)
  1206. {
  1207. case 5:
  1208. passf5_ps(idot, l1, in, out, &wa[iw], fsign);
  1209. break;
  1210. case 4:
  1211. passf4_ps(idot, l1, in, out, &wa[iw], fsign);
  1212. break;
  1213. case 3:
  1214. passf3_ps(idot, l1, in, out, &wa[iw], fsign);
  1215. break;
  1216. case 2:
  1217. passf2_ps(idot, l1, in, out, &wa[iw], fsign);
  1218. break;
  1219. default:
  1220. assert(0);
  1221. }
  1222. if(++k1 > nf+1)
  1223. return out;
  1224. l1 = l2;
  1225. iw += (ip - 1)*idot;
  1226. if(out == work2)
  1227. {
  1228. out = work1;
  1229. in = work2;
  1230. }
  1231. else
  1232. {
  1233. out = work2;
  1234. in = work1;
  1235. }
  1236. }
  1237. }
  1238. uint decompose(const uint n, const al::span<uint,15> ifac, const al::span<const uint,4> ntryh)
  1239. {
  1240. uint nl{n}, nf{0};
  1241. for(const uint ntry : ntryh)
  1242. {
  1243. while(nl != 1)
  1244. {
  1245. const uint nq{nl / ntry};
  1246. const uint nr{nl % ntry};
  1247. if(nr != 0) break;
  1248. ifac[2+nf++] = ntry;
  1249. nl = nq;
  1250. if(ntry == 2 && nf != 1)
  1251. {
  1252. for(size_t i{2};i <= nf;++i)
  1253. {
  1254. size_t ib{nf - i + 2};
  1255. ifac[ib + 1] = ifac[ib];
  1256. }
  1257. ifac[2] = 2;
  1258. }
  1259. }
  1260. }
  1261. ifac[0] = n;
  1262. ifac[1] = nf;
  1263. return nf;
  1264. }
  1265. void rffti1_ps(const uint n, float *wa, const al::span<uint,15> ifac)
  1266. {
  1267. static constexpr std::array ntryh{4u,2u,3u,5u};
  1268. const uint nf{decompose(n, ifac, ntryh)};
  1269. const double argh{2.0*al::numbers::pi / n};
  1270. size_t is{0};
  1271. size_t nfm1{nf - 1};
  1272. size_t l1{1};
  1273. for(size_t k1{0};k1 < nfm1;++k1)
  1274. {
  1275. const size_t ip{ifac[k1+2]};
  1276. const size_t l2{l1*ip};
  1277. const size_t ido{n / l2};
  1278. const size_t ipm{ip - 1};
  1279. size_t ld{0};
  1280. for(size_t j{0};j < ipm;++j)
  1281. {
  1282. size_t i{is};
  1283. ld += l1;
  1284. const double argld{static_cast<double>(ld)*argh};
  1285. double fi{0.0};
  1286. for(size_t ii{2};ii < ido;ii += 2)
  1287. {
  1288. fi += 1.0;
  1289. wa[i++] = static_cast<float>(std::cos(fi*argld));
  1290. wa[i++] = static_cast<float>(std::sin(fi*argld));
  1291. }
  1292. is += ido;
  1293. }
  1294. l1 = l2;
  1295. }
  1296. } /* rffti1 */
  1297. void cffti1_ps(const uint n, float *wa, const al::span<uint,15> ifac)
  1298. {
  1299. static constexpr std::array ntryh{5u,3u,4u,2u};
  1300. const uint nf{decompose(n, ifac, ntryh)};
  1301. const double argh{2.0*al::numbers::pi / n};
  1302. size_t i{1};
  1303. size_t l1{1};
  1304. for(size_t k1{0};k1 < nf;++k1)
  1305. {
  1306. const size_t ip{ifac[k1+2]};
  1307. const size_t l2{l1*ip};
  1308. const size_t ido{n / l2};
  1309. const size_t idot{ido + ido + 2};
  1310. const size_t ipm{ip - 1};
  1311. size_t ld{0};
  1312. for(size_t j{0};j < ipm;++j)
  1313. {
  1314. size_t i1{i};
  1315. wa[i-1] = 1;
  1316. wa[i] = 0;
  1317. ld += l1;
  1318. const double argld{static_cast<double>(ld)*argh};
  1319. double fi{0.0};
  1320. for(size_t ii{3};ii < idot;ii += 2)
  1321. {
  1322. fi += 1.0;
  1323. wa[++i] = static_cast<float>(std::cos(fi*argld));
  1324. wa[++i] = static_cast<float>(std::sin(fi*argld));
  1325. }
  1326. if(ip > 5)
  1327. {
  1328. wa[i1-1] = wa[i-1];
  1329. wa[i1] = wa[i];
  1330. }
  1331. }
  1332. l1 = l2;
  1333. }
  1334. } /* cffti1 */
  1335. } // namespace
  1336. /* NOLINTNEXTLINE(clang-analyzer-optin.performance.Padding) */
  1337. struct PFFFT_Setup {
  1338. uint N{};
  1339. uint Ncvec{}; /* nb of complex simd vectors (N/4 if PFFFT_COMPLEX, N/8 if PFFFT_REAL) */
  1340. std::array<uint,15> ifac{};
  1341. pffft_transform_t transform{};
  1342. float *twiddle{}; /* N/4 elements */
  1343. al::span<v4sf> e; /* N/4*3 elements */
  1344. alignas(V4sfAlignment) std::byte end;
  1345. };
  1346. PFFFTSetupPtr pffft_new_setup(unsigned int N, pffft_transform_t transform)
  1347. {
  1348. assert(transform == PFFFT_REAL || transform == PFFFT_COMPLEX);
  1349. assert(N > 0);
  1350. /* unfortunately, the fft size must be a multiple of 16 for complex FFTs
  1351. * and 32 for real FFTs -- a lot of stuff would need to be rewritten to
  1352. * handle other cases (or maybe just switch to a scalar fft, I don't know..)
  1353. */
  1354. if(transform == PFFFT_REAL)
  1355. assert((N%(2*SimdSize*SimdSize)) == 0);
  1356. else
  1357. assert((N%(SimdSize*SimdSize)) == 0);
  1358. const uint Ncvec{(transform == PFFFT_REAL ? N/2 : N) / SimdSize};
  1359. const size_t storelen{std::max(offsetof(PFFFT_Setup, end) + 2_zu*Ncvec*sizeof(v4sf),
  1360. sizeof(PFFFT_Setup))};
  1361. auto storage = static_cast<gsl::owner<std::byte*>>(::operator new[](storelen, V4sfAlignVal));
  1362. al::span extrastore{&storage[offsetof(PFFFT_Setup, end)], 2_zu*Ncvec*sizeof(v4sf)};
  1363. PFFFTSetupPtr s{::new(storage) PFFFT_Setup{}};
  1364. s->N = N;
  1365. s->transform = transform;
  1366. s->Ncvec = Ncvec;
  1367. const size_t ecount{2_zu*Ncvec*(SimdSize-1)/SimdSize};
  1368. s->e = {std::launder(reinterpret_cast<v4sf*>(extrastore.data())), ecount};
  1369. s->twiddle = std::launder(reinterpret_cast<float*>(&extrastore[ecount*sizeof(v4sf)]));
  1370. if constexpr(SimdSize > 1)
  1371. {
  1372. auto e = std::vector<float>(s->e.size()*SimdSize, 0.0f);
  1373. for(size_t k{0};k < s->Ncvec;++k)
  1374. {
  1375. const size_t i{k / SimdSize};
  1376. const size_t j{k % SimdSize};
  1377. for(size_t m{0};m < SimdSize-1;++m)
  1378. {
  1379. const double A{-2.0*al::numbers::pi*static_cast<double>((m+1)*k) / N};
  1380. e[((i*3 + m)*2 + 0)*SimdSize + j] = static_cast<float>(std::cos(A));
  1381. e[((i*3 + m)*2 + 1)*SimdSize + j] = static_cast<float>(std::sin(A));
  1382. }
  1383. }
  1384. std::memcpy(s->e.data(), e.data(), e.size()*sizeof(float));
  1385. }
  1386. if(transform == PFFFT_REAL)
  1387. rffti1_ps(N/SimdSize, s->twiddle, s->ifac);
  1388. else
  1389. cffti1_ps(N/SimdSize, s->twiddle, s->ifac);
  1390. /* check that N is decomposable with allowed prime factors */
  1391. size_t m{1};
  1392. for(size_t k{0};k < s->ifac[1];++k)
  1393. m *= s->ifac[2+k];
  1394. if(m != N/SimdSize)
  1395. s = nullptr;
  1396. return s;
  1397. }
  1398. void pffft_destroy_setup(gsl::owner<PFFFT_Setup*> s) noexcept
  1399. {
  1400. std::destroy_at(s);
  1401. ::operator delete[](gsl::owner<void*>{s}, V4sfAlignVal);
  1402. }
  1403. #if !defined(PFFFT_SIMD_DISABLE)
  1404. namespace {
  1405. /* [0 0 1 2 3 4 5 6 7 8] -> [0 8 7 6 5 4 3 2 1] */
  1406. void reversed_copy(const size_t N, const v4sf *in, const int in_stride, v4sf *RESTRICT out)
  1407. {
  1408. v4sf g0, g1;
  1409. interleave2(in[0], in[1], g0, g1);
  1410. in += in_stride;
  1411. *--out = vswaphl(g0, g1); // [g0l, g0h], [g1l g1h] -> [g1l, g0h]
  1412. for(size_t k{1};k < N;++k)
  1413. {
  1414. v4sf h0, h1;
  1415. interleave2(in[0], in[1], h0, h1);
  1416. in += in_stride;
  1417. *--out = vswaphl(g1, h0);
  1418. *--out = vswaphl(h0, h1);
  1419. g1 = h1;
  1420. }
  1421. *--out = vswaphl(g1, g0);
  1422. }
  1423. void unreversed_copy(const size_t N, const v4sf *in, v4sf *RESTRICT out, const int out_stride)
  1424. {
  1425. v4sf g0{in[0]}, g1{g0};
  1426. ++in;
  1427. for(size_t k{1};k < N;++k)
  1428. {
  1429. v4sf h0{*in++}; v4sf h1{*in++};
  1430. g1 = vswaphl(g1, h0);
  1431. h0 = vswaphl(h0, h1);
  1432. uninterleave2(h0, g1, out[0], out[1]);
  1433. out += out_stride;
  1434. g1 = h1;
  1435. }
  1436. v4sf h0{*in++}, h1{g0};
  1437. g1 = vswaphl(g1, h0);
  1438. h0 = vswaphl(h0, h1);
  1439. uninterleave2(h0, g1, out[0], out[1]);
  1440. }
  1441. void pffft_cplx_finalize(const size_t Ncvec, const v4sf *in, v4sf *RESTRICT out, const v4sf *e)
  1442. {
  1443. assert(in != out);
  1444. const size_t dk{Ncvec/SimdSize}; // number of 4x4 matrix blocks
  1445. for(size_t k{0};k < dk;++k)
  1446. {
  1447. v4sf r0{in[8*k+0]}, i0{in[8*k+1]};
  1448. v4sf r1{in[8*k+2]}, i1{in[8*k+3]};
  1449. v4sf r2{in[8*k+4]}, i2{in[8*k+5]};
  1450. v4sf r3{in[8*k+6]}, i3{in[8*k+7]};
  1451. vtranspose4(r0,r1,r2,r3);
  1452. vtranspose4(i0,i1,i2,i3);
  1453. vcplxmul(r1,i1,e[k*6+0],e[k*6+1]);
  1454. vcplxmul(r2,i2,e[k*6+2],e[k*6+3]);
  1455. vcplxmul(r3,i3,e[k*6+4],e[k*6+5]);
  1456. v4sf sr0{vadd(r0,r2)}, dr0{vsub(r0, r2)};
  1457. v4sf sr1{vadd(r1,r3)}, dr1{vsub(r1, r3)};
  1458. v4sf si0{vadd(i0,i2)}, di0{vsub(i0, i2)};
  1459. v4sf si1{vadd(i1,i3)}, di1{vsub(i1, i3)};
  1460. /* transformation for each column is:
  1461. *
  1462. * [1 1 1 1 0 0 0 0] [r0]
  1463. * [1 0 -1 0 0 -1 0 1] [r1]
  1464. * [1 -1 1 -1 0 0 0 0] [r2]
  1465. * [1 0 -1 0 0 1 0 -1] [r3]
  1466. * [0 0 0 0 1 1 1 1] * [i0]
  1467. * [0 1 0 -1 1 0 -1 0] [i1]
  1468. * [0 0 0 0 1 -1 1 -1] [i2]
  1469. * [0 -1 0 1 1 0 -1 0] [i3]
  1470. */
  1471. r0 = vadd(sr0, sr1); i0 = vadd(si0, si1);
  1472. r1 = vadd(dr0, di1); i1 = vsub(di0, dr1);
  1473. r2 = vsub(sr0, sr1); i2 = vsub(si0, si1);
  1474. r3 = vsub(dr0, di1); i3 = vadd(di0, dr1);
  1475. *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1;
  1476. *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3;
  1477. }
  1478. }
  1479. void pffft_cplx_preprocess(const size_t Ncvec, const v4sf *in, v4sf *RESTRICT out, const v4sf *e)
  1480. {
  1481. assert(in != out);
  1482. const size_t dk{Ncvec/SimdSize}; // number of 4x4 matrix blocks
  1483. for(size_t k{0};k < dk;++k)
  1484. {
  1485. v4sf r0{in[8*k+0]}, i0{in[8*k+1]};
  1486. v4sf r1{in[8*k+2]}, i1{in[8*k+3]};
  1487. v4sf r2{in[8*k+4]}, i2{in[8*k+5]};
  1488. v4sf r3{in[8*k+6]}, i3{in[8*k+7]};
  1489. v4sf sr0{vadd(r0,r2)}, dr0{vsub(r0, r2)};
  1490. v4sf sr1{vadd(r1,r3)}, dr1{vsub(r1, r3)};
  1491. v4sf si0{vadd(i0,i2)}, di0{vsub(i0, i2)};
  1492. v4sf si1{vadd(i1,i3)}, di1{vsub(i1, i3)};
  1493. r0 = vadd(sr0, sr1); i0 = vadd(si0, si1);
  1494. r1 = vsub(dr0, di1); i1 = vadd(di0, dr1);
  1495. r2 = vsub(sr0, sr1); i2 = vsub(si0, si1);
  1496. r3 = vadd(dr0, di1); i3 = vsub(di0, dr1);
  1497. vcplxmulconj(r1,i1,e[k*6+0],e[k*6+1]);
  1498. vcplxmulconj(r2,i2,e[k*6+2],e[k*6+3]);
  1499. vcplxmulconj(r3,i3,e[k*6+4],e[k*6+5]);
  1500. vtranspose4(r0,r1,r2,r3);
  1501. vtranspose4(i0,i1,i2,i3);
  1502. *out++ = r0; *out++ = i0; *out++ = r1; *out++ = i1;
  1503. *out++ = r2; *out++ = i2; *out++ = r3; *out++ = i3;
  1504. }
  1505. }
  1506. force_inline void pffft_real_finalize_4x4(const v4sf *in0, const v4sf *in1, const v4sf *in,
  1507. const v4sf *e, v4sf *RESTRICT out)
  1508. {
  1509. v4sf r0{*in0}, i0{*in1};
  1510. v4sf r1{*in++}; v4sf i1{*in++};
  1511. v4sf r2{*in++}; v4sf i2{*in++};
  1512. v4sf r3{*in++}; v4sf i3{*in++};
  1513. vtranspose4(r0,r1,r2,r3);
  1514. vtranspose4(i0,i1,i2,i3);
  1515. /* transformation for each column is:
  1516. *
  1517. * [1 1 1 1 0 0 0 0] [r0]
  1518. * [1 0 -1 0 0 -1 0 1] [r1]
  1519. * [1 0 -1 0 0 1 0 -1] [r2]
  1520. * [1 -1 1 -1 0 0 0 0] [r3]
  1521. * [0 0 0 0 1 1 1 1] * [i0]
  1522. * [0 -1 0 1 -1 0 1 0] [i1]
  1523. * [0 -1 0 1 1 0 -1 0] [i2]
  1524. * [0 0 0 0 -1 1 -1 1] [i3]
  1525. */
  1526. //cerr << "matrix initial, before e , REAL:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n";
  1527. //cerr << "matrix initial, before e, IMAG :\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n";
  1528. vcplxmul(r1,i1,e[0],e[1]);
  1529. vcplxmul(r2,i2,e[2],e[3]);
  1530. vcplxmul(r3,i3,e[4],e[5]);
  1531. //cerr << "matrix initial, real part:\n 1: " << r0 << "\n 1: " << r1 << "\n 1: " << r2 << "\n 1: " << r3 << "\n";
  1532. //cerr << "matrix initial, imag part:\n 1: " << i0 << "\n 1: " << i1 << "\n 1: " << i2 << "\n 1: " << i3 << "\n";
  1533. v4sf sr0{vadd(r0,r2)}, dr0{vsub(r0,r2)};
  1534. v4sf sr1{vadd(r1,r3)}, dr1{vsub(r3,r1)};
  1535. v4sf si0{vadd(i0,i2)}, di0{vsub(i0,i2)};
  1536. v4sf si1{vadd(i1,i3)}, di1{vsub(i3,i1)};
  1537. r0 = vadd(sr0, sr1);
  1538. r3 = vsub(sr0, sr1);
  1539. i0 = vadd(si0, si1);
  1540. i3 = vsub(si1, si0);
  1541. r1 = vadd(dr0, di1);
  1542. r2 = vsub(dr0, di1);
  1543. i1 = vsub(dr1, di0);
  1544. i2 = vadd(dr1, di0);
  1545. *out++ = r0;
  1546. *out++ = i0;
  1547. *out++ = r1;
  1548. *out++ = i1;
  1549. *out++ = r2;
  1550. *out++ = i2;
  1551. *out++ = r3;
  1552. *out++ = i3;
  1553. }
  1554. NOINLINE void pffft_real_finalize(const size_t Ncvec, const v4sf *in, v4sf *RESTRICT out,
  1555. const v4sf *e)
  1556. {
  1557. static constexpr float s{al::numbers::sqrt2_v<float>/2.0f};
  1558. assert(in != out);
  1559. const size_t dk{Ncvec/SimdSize}; // number of 4x4 matrix blocks
  1560. /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */
  1561. const v4sf zero{vzero()};
  1562. const auto cr = al::bit_cast<std::array<float,SimdSize>>(in[0]);
  1563. const auto ci = al::bit_cast<std::array<float,SimdSize>>(in[Ncvec*2-1]);
  1564. pffft_real_finalize_4x4(&zero, &zero, in+1, e, out);
  1565. /* [cr0 cr1 cr2 cr3 ci0 ci1 ci2 ci3]
  1566. *
  1567. * [Xr(1)] ] [1 1 1 1 0 0 0 0]
  1568. * [Xr(N/4) ] [0 0 0 0 1 s 0 -s]
  1569. * [Xr(N/2) ] [1 0 -1 0 0 0 0 0]
  1570. * [Xr(3N/4)] [0 0 0 0 1 -s 0 s]
  1571. * [Xi(1) ] [1 -1 1 -1 0 0 0 0]
  1572. * [Xi(N/4) ] [0 0 0 0 0 -s -1 -s]
  1573. * [Xi(N/2) ] [0 -1 0 1 0 0 0 0]
  1574. * [Xi(3N/4)] [0 0 0 0 0 -s 1 -s]
  1575. */
  1576. const float xr0{(cr[0]+cr[2]) + (cr[1]+cr[3])}; out[0] = vinsert0(out[0], xr0);
  1577. const float xi0{(cr[0]+cr[2]) - (cr[1]+cr[3])}; out[1] = vinsert0(out[1], xi0);
  1578. const float xr2{(cr[0]-cr[2])}; out[4] = vinsert0(out[4], xr2);
  1579. const float xi2{(cr[3]-cr[1])}; out[5] = vinsert0(out[5], xi2);
  1580. const float xr1{ ci[0] + s*(ci[1]-ci[3])}; out[2] = vinsert0(out[2], xr1);
  1581. const float xi1{-ci[2] - s*(ci[1]+ci[3])}; out[3] = vinsert0(out[3], xi1);
  1582. const float xr3{ ci[0] - s*(ci[1]-ci[3])}; out[6] = vinsert0(out[6], xr3);
  1583. const float xi3{ ci[2] - s*(ci[1]+ci[3])}; out[7] = vinsert0(out[7], xi3);
  1584. for(size_t k{1};k < dk;++k)
  1585. pffft_real_finalize_4x4(&in[8*k-1], &in[8*k+0], in + 8*k+1, e + k*6, out + k*8);
  1586. }
  1587. force_inline void pffft_real_preprocess_4x4(const v4sf *in, const v4sf *e, v4sf *RESTRICT out,
  1588. const bool first)
  1589. {
  1590. v4sf r0{in[0]}, i0{in[1]}, r1{in[2]}, i1{in[3]};
  1591. v4sf r2{in[4]}, i2{in[5]}, r3{in[6]}, i3{in[7]};
  1592. /* transformation for each column is:
  1593. *
  1594. * [1 1 1 1 0 0 0 0] [r0]
  1595. * [1 0 0 -1 0 -1 -1 0] [r1]
  1596. * [1 -1 -1 1 0 0 0 0] [r2]
  1597. * [1 0 0 -1 0 1 1 0] [r3]
  1598. * [0 0 0 0 1 -1 1 -1] * [i0]
  1599. * [0 -1 1 0 1 0 0 1] [i1]
  1600. * [0 0 0 0 1 1 -1 -1] [i2]
  1601. * [0 1 -1 0 1 0 0 1] [i3]
  1602. */
  1603. v4sf sr0{vadd(r0,r3)}, dr0{vsub(r0,r3)};
  1604. v4sf sr1{vadd(r1,r2)}, dr1{vsub(r1,r2)};
  1605. v4sf si0{vadd(i0,i3)}, di0{vsub(i0,i3)};
  1606. v4sf si1{vadd(i1,i2)}, di1{vsub(i1,i2)};
  1607. r0 = vadd(sr0, sr1);
  1608. r2 = vsub(sr0, sr1);
  1609. r1 = vsub(dr0, si1);
  1610. r3 = vadd(dr0, si1);
  1611. i0 = vsub(di0, di1);
  1612. i2 = vadd(di0, di1);
  1613. i1 = vsub(si0, dr1);
  1614. i3 = vadd(si0, dr1);
  1615. vcplxmulconj(r1,i1,e[0],e[1]);
  1616. vcplxmulconj(r2,i2,e[2],e[3]);
  1617. vcplxmulconj(r3,i3,e[4],e[5]);
  1618. vtranspose4(r0,r1,r2,r3);
  1619. vtranspose4(i0,i1,i2,i3);
  1620. if(!first)
  1621. {
  1622. *out++ = r0;
  1623. *out++ = i0;
  1624. }
  1625. *out++ = r1;
  1626. *out++ = i1;
  1627. *out++ = r2;
  1628. *out++ = i2;
  1629. *out++ = r3;
  1630. *out++ = i3;
  1631. }
  1632. NOINLINE void pffft_real_preprocess(const size_t Ncvec, const v4sf *in, v4sf *RESTRICT out,
  1633. const v4sf *e)
  1634. {
  1635. static constexpr float sqrt2{al::numbers::sqrt2_v<float>};
  1636. assert(in != out);
  1637. const size_t dk{Ncvec/SimdSize}; // number of 4x4 matrix blocks
  1638. /* fftpack order is f0r f1r f1i f2r f2i ... f(n-1)r f(n-1)i f(n)r */
  1639. std::array<float,SimdSize> Xr{}, Xi{};
  1640. for(size_t k{0};k < SimdSize;++k)
  1641. {
  1642. Xr[k] = vextract0(in[2*k]);
  1643. Xi[k] = vextract0(in[2*k + 1]);
  1644. }
  1645. pffft_real_preprocess_4x4(in, e, out+1, true); // will write only 6 values
  1646. /* [Xr0 Xr1 Xr2 Xr3 Xi0 Xi1 Xi2 Xi3]
  1647. *
  1648. * [cr0] [1 0 2 0 1 0 0 0]
  1649. * [cr1] [1 0 0 0 -1 0 -2 0]
  1650. * [cr2] [1 0 -2 0 1 0 0 0]
  1651. * [cr3] [1 0 0 0 -1 0 2 0]
  1652. * [ci0] [0 2 0 2 0 0 0 0]
  1653. * [ci1] [0 s 0 -s 0 -s 0 -s]
  1654. * [ci2] [0 0 0 0 0 -2 0 2]
  1655. * [ci3] [0 -s 0 s 0 -s 0 -s]
  1656. */
  1657. for(size_t k{1};k < dk;++k)
  1658. pffft_real_preprocess_4x4(in+8*k, e + k*6, out-1+k*8, false);
  1659. const float cr0{(Xr[0]+Xi[0]) + 2*Xr[2]};
  1660. const float cr1{(Xr[0]-Xi[0]) - 2*Xi[2]};
  1661. const float cr2{(Xr[0]+Xi[0]) - 2*Xr[2]};
  1662. const float cr3{(Xr[0]-Xi[0]) + 2*Xi[2]};
  1663. out[0] = vset4(cr0, cr1, cr2, cr3);
  1664. const float ci0{ 2*(Xr[1]+Xr[3])};
  1665. const float ci1{ sqrt2*(Xr[1]-Xr[3]) - sqrt2*(Xi[1]+Xi[3])};
  1666. const float ci2{ 2*(Xi[3]-Xi[1])};
  1667. const float ci3{-sqrt2*(Xr[1]-Xr[3]) - sqrt2*(Xi[1]+Xi[3])};
  1668. out[2*Ncvec-1] = vset4(ci0, ci1, ci2, ci3);
  1669. }
  1670. void pffft_transform_internal(const PFFFT_Setup *setup, const v4sf *vinput, v4sf *voutput,
  1671. v4sf *scratch, const pffft_direction_t direction, const bool ordered)
  1672. {
  1673. assert(scratch != nullptr);
  1674. assert(voutput != scratch);
  1675. const size_t Ncvec{setup->Ncvec};
  1676. const bool nf_odd{(setup->ifac[1]&1) != 0};
  1677. std::array buff{voutput, scratch};
  1678. bool ib{nf_odd != ordered};
  1679. if(direction == PFFFT_FORWARD)
  1680. {
  1681. /* Swap the initial work buffer for forward FFTs, which helps avoid an
  1682. * extra copy for output.
  1683. */
  1684. ib = !ib;
  1685. if(setup->transform == PFFFT_REAL)
  1686. {
  1687. ib = (rfftf1_ps(Ncvec*2, vinput, buff[ib], buff[!ib], setup->twiddle, setup->ifac) == buff[1]);
  1688. pffft_real_finalize(Ncvec, buff[ib], buff[!ib], setup->e.data());
  1689. }
  1690. else
  1691. {
  1692. v4sf *tmp{buff[ib]};
  1693. for(size_t k=0; k < Ncvec; ++k)
  1694. uninterleave2(vinput[k*2], vinput[k*2+1], tmp[k*2], tmp[k*2+1]);
  1695. ib = (cfftf1_ps(Ncvec, buff[ib], buff[!ib], buff[ib], setup->twiddle, setup->ifac, -1.0f) == buff[1]);
  1696. pffft_cplx_finalize(Ncvec, buff[ib], buff[!ib], setup->e.data());
  1697. }
  1698. if(ordered)
  1699. pffft_zreorder(setup, reinterpret_cast<float*>(buff[!ib]),
  1700. reinterpret_cast<float*>(buff[ib]), PFFFT_FORWARD);
  1701. else
  1702. ib = !ib;
  1703. }
  1704. else
  1705. {
  1706. if(vinput == buff[ib])
  1707. ib = !ib; // may happen when finput == foutput
  1708. if(ordered)
  1709. {
  1710. pffft_zreorder(setup, reinterpret_cast<const float*>(vinput),
  1711. reinterpret_cast<float*>(buff[ib]), PFFFT_BACKWARD);
  1712. vinput = buff[ib];
  1713. ib = !ib;
  1714. }
  1715. if(setup->transform == PFFFT_REAL)
  1716. {
  1717. pffft_real_preprocess(Ncvec, vinput, buff[ib], setup->e.data());
  1718. ib = (rfftb1_ps(Ncvec*2, buff[ib], buff[0], buff[1], setup->twiddle, setup->ifac) == buff[1]);
  1719. }
  1720. else
  1721. {
  1722. pffft_cplx_preprocess(Ncvec, vinput, buff[ib], setup->e.data());
  1723. ib = (cfftf1_ps(Ncvec, buff[ib], buff[0], buff[1], setup->twiddle, setup->ifac, +1.0f) == buff[1]);
  1724. for(size_t k{0};k < Ncvec;++k)
  1725. interleave2(buff[ib][k*2], buff[ib][k*2+1], buff[ib][k*2], buff[ib][k*2+1]);
  1726. }
  1727. }
  1728. if(buff[ib] != voutput)
  1729. {
  1730. /* extra copy required -- this situation should only happen when finput == foutput */
  1731. assert(vinput==voutput);
  1732. for(size_t k{0};k < Ncvec;++k)
  1733. {
  1734. v4sf a{buff[ib][2*k]}, b{buff[ib][2*k+1]};
  1735. voutput[2*k] = a; voutput[2*k+1] = b;
  1736. }
  1737. }
  1738. }
  1739. } // namespace
  1740. void pffft_zreorder(const PFFFT_Setup *setup, const float *in, float *out,
  1741. pffft_direction_t direction)
  1742. {
  1743. assert(in != out);
  1744. const size_t N{setup->N}, Ncvec{setup->Ncvec};
  1745. const v4sf *vin{reinterpret_cast<const v4sf*>(in)};
  1746. v4sf *RESTRICT vout{reinterpret_cast<v4sf*>(out)};
  1747. if(setup->transform == PFFFT_REAL)
  1748. {
  1749. const size_t dk{N/32};
  1750. if(direction == PFFFT_FORWARD)
  1751. {
  1752. for(size_t k{0};k < dk;++k)
  1753. {
  1754. interleave2(vin[k*8 + 0], vin[k*8 + 1], vout[2*(0*dk + k) + 0], vout[2*(0*dk + k) + 1]);
  1755. interleave2(vin[k*8 + 4], vin[k*8 + 5], vout[2*(2*dk + k) + 0], vout[2*(2*dk + k) + 1]);
  1756. }
  1757. reversed_copy(dk, vin+2, 8, vout + N/SimdSize/2);
  1758. reversed_copy(dk, vin+6, 8, vout + N/SimdSize);
  1759. }
  1760. else
  1761. {
  1762. for(size_t k{0};k < dk;++k)
  1763. {
  1764. uninterleave2(vin[2*(0*dk + k) + 0], vin[2*(0*dk + k) + 1], vout[k*8 + 0], vout[k*8 + 1]);
  1765. uninterleave2(vin[2*(2*dk + k) + 0], vin[2*(2*dk + k) + 1], vout[k*8 + 4], vout[k*8 + 5]);
  1766. }
  1767. unreversed_copy(dk, vin + N/SimdSize/4, vout + N/SimdSize - 6, -8);
  1768. unreversed_copy(dk, vin + 3_uz*N/SimdSize/4, vout + N/SimdSize - 2, -8);
  1769. }
  1770. }
  1771. else
  1772. {
  1773. if(direction == PFFFT_FORWARD)
  1774. {
  1775. for(size_t k{0};k < Ncvec;++k)
  1776. {
  1777. size_t kk{(k/4) + (k%4)*(Ncvec/4)};
  1778. interleave2(vin[k*2], vin[k*2+1], vout[kk*2], vout[kk*2+1]);
  1779. }
  1780. }
  1781. else
  1782. {
  1783. for(size_t k{0};k < Ncvec;++k)
  1784. {
  1785. size_t kk{(k/4) + (k%4)*(Ncvec/4)};
  1786. uninterleave2(vin[kk*2], vin[kk*2+1], vout[k*2], vout[k*2+1]);
  1787. }
  1788. }
  1789. }
  1790. }
  1791. void pffft_zconvolve_scale_accumulate(const PFFFT_Setup *s, const float *a, const float *b,
  1792. float *ab, float scaling)
  1793. {
  1794. const size_t Ncvec{s->Ncvec};
  1795. const v4sf *RESTRICT va{reinterpret_cast<const v4sf*>(a)};
  1796. const v4sf *RESTRICT vb{reinterpret_cast<const v4sf*>(b)};
  1797. v4sf *RESTRICT vab{reinterpret_cast<v4sf*>(ab)};
  1798. #ifdef __arm__
  1799. __builtin_prefetch(va);
  1800. __builtin_prefetch(vb);
  1801. __builtin_prefetch(vab);
  1802. __builtin_prefetch(va+2);
  1803. __builtin_prefetch(vb+2);
  1804. __builtin_prefetch(vab+2);
  1805. __builtin_prefetch(va+4);
  1806. __builtin_prefetch(vb+4);
  1807. __builtin_prefetch(vab+4);
  1808. __builtin_prefetch(va+6);
  1809. __builtin_prefetch(vb+6);
  1810. __builtin_prefetch(vab+6);
  1811. #ifndef __clang__
  1812. #define ZCONVOLVE_USING_INLINE_NEON_ASM
  1813. #endif
  1814. #endif
  1815. const float ar1{vextract0(va[0])};
  1816. const float ai1{vextract0(va[1])};
  1817. const float br1{vextract0(vb[0])};
  1818. const float bi1{vextract0(vb[1])};
  1819. const float abr1{vextract0(vab[0])};
  1820. const float abi1{vextract0(vab[1])};
  1821. #ifdef ZCONVOLVE_USING_INLINE_ASM
  1822. /* Inline asm version, unfortunately miscompiled by clang 3.2, at least on
  1823. * Ubuntu. So this will be restricted to GCC.
  1824. *
  1825. * Does it still miscompile with Clang? Is it even needed with today's
  1826. * optimizers?
  1827. */
  1828. const float *a_{a}, *b_{b}; float *ab_{ab};
  1829. size_t N{Ncvec};
  1830. asm volatile("mov r8, %2 \n"
  1831. "vdup.f32 q15, %4 \n"
  1832. "1: \n"
  1833. "pld [%0,#64] \n"
  1834. "pld [%1,#64] \n"
  1835. "pld [%2,#64] \n"
  1836. "pld [%0,#96] \n"
  1837. "pld [%1,#96] \n"
  1838. "pld [%2,#96] \n"
  1839. "vld1.f32 {q0,q1}, [%0,:128]! \n"
  1840. "vld1.f32 {q4,q5}, [%1,:128]! \n"
  1841. "vld1.f32 {q2,q3}, [%0,:128]! \n"
  1842. "vld1.f32 {q6,q7}, [%1,:128]! \n"
  1843. "vld1.f32 {q8,q9}, [r8,:128]! \n"
  1844. "vmul.f32 q10, q0, q4 \n"
  1845. "vmul.f32 q11, q0, q5 \n"
  1846. "vmul.f32 q12, q2, q6 \n"
  1847. "vmul.f32 q13, q2, q7 \n"
  1848. "vmls.f32 q10, q1, q5 \n"
  1849. "vmla.f32 q11, q1, q4 \n"
  1850. "vld1.f32 {q0,q1}, [r8,:128]! \n"
  1851. "vmls.f32 q12, q3, q7 \n"
  1852. "vmla.f32 q13, q3, q6 \n"
  1853. "vmla.f32 q8, q10, q15 \n"
  1854. "vmla.f32 q9, q11, q15 \n"
  1855. "vmla.f32 q0, q12, q15 \n"
  1856. "vmla.f32 q1, q13, q15 \n"
  1857. "vst1.f32 {q8,q9},[%2,:128]! \n"
  1858. "vst1.f32 {q0,q1},[%2,:128]! \n"
  1859. "subs %3, #2 \n"
  1860. "bne 1b \n"
  1861. : "+r"(a_), "+r"(b_), "+r"(ab_), "+r"(N) : "r"(scaling) : "r8", "q0","q1","q2","q3","q4","q5","q6","q7","q8","q9", "q10","q11","q12","q13","q15","memory");
  1862. #else
  1863. /* Default routine, works fine for non-arm cpus with current compilers. */
  1864. const v4sf vscal{ld_ps1(scaling)};
  1865. for(size_t i{0};i < Ncvec;i += 2)
  1866. {
  1867. v4sf ar4{va[2*i+0]}, ai4{va[2*i+1]};
  1868. v4sf br4{vb[2*i+0]}, bi4{vb[2*i+1]};
  1869. vcplxmul(ar4, ai4, br4, bi4);
  1870. vab[2*i+0] = vmadd(ar4, vscal, vab[2*i+0]);
  1871. vab[2*i+1] = vmadd(ai4, vscal, vab[2*i+1]);
  1872. ar4 = va[2*i+2]; ai4 = va[2*i+3];
  1873. br4 = vb[2*i+2]; bi4 = vb[2*i+3];
  1874. vcplxmul(ar4, ai4, br4, bi4);
  1875. vab[2*i+2] = vmadd(ar4, vscal, vab[2*i+2]);
  1876. vab[2*i+3] = vmadd(ai4, vscal, vab[2*i+3]);
  1877. }
  1878. #endif
  1879. if(s->transform == PFFFT_REAL)
  1880. {
  1881. vab[0] = vinsert0(vab[0], abr1 + ar1*br1*scaling);
  1882. vab[1] = vinsert0(vab[1], abi1 + ai1*bi1*scaling);
  1883. }
  1884. }
  1885. void pffft_zconvolve_accumulate(const PFFFT_Setup *s, const float *a, const float *b, float *ab)
  1886. {
  1887. const size_t Ncvec{s->Ncvec};
  1888. const v4sf *RESTRICT va{reinterpret_cast<const v4sf*>(a)};
  1889. const v4sf *RESTRICT vb{reinterpret_cast<const v4sf*>(b)};
  1890. v4sf *RESTRICT vab{reinterpret_cast<v4sf*>(ab)};
  1891. #ifdef __arm__
  1892. __builtin_prefetch(va);
  1893. __builtin_prefetch(vb);
  1894. __builtin_prefetch(vab);
  1895. __builtin_prefetch(va+2);
  1896. __builtin_prefetch(vb+2);
  1897. __builtin_prefetch(vab+2);
  1898. __builtin_prefetch(va+4);
  1899. __builtin_prefetch(vb+4);
  1900. __builtin_prefetch(vab+4);
  1901. __builtin_prefetch(va+6);
  1902. __builtin_prefetch(vb+6);
  1903. __builtin_prefetch(vab+6);
  1904. #endif
  1905. const float ar1{vextract0(va[0])};
  1906. const float ai1{vextract0(va[1])};
  1907. const float br1{vextract0(vb[0])};
  1908. const float bi1{vextract0(vb[1])};
  1909. const float abr1{vextract0(vab[0])};
  1910. const float abi1{vextract0(vab[1])};
  1911. /* No inline assembly for this version. I'm not familiar enough with NEON
  1912. * assembly, and I don't know that it's needed with today's optimizers.
  1913. */
  1914. for(size_t i{0};i < Ncvec;i += 2)
  1915. {
  1916. v4sf ar4{va[2*i+0]}, ai4{va[2*i+1]};
  1917. v4sf br4{vb[2*i+0]}, bi4{vb[2*i+1]};
  1918. vcplxmul(ar4, ai4, br4, bi4);
  1919. vab[2*i+0] = vadd(ar4, vab[2*i+0]);
  1920. vab[2*i+1] = vadd(ai4, vab[2*i+1]);
  1921. ar4 = va[2*i+2]; ai4 = va[2*i+3];
  1922. br4 = vb[2*i+2]; bi4 = vb[2*i+3];
  1923. vcplxmul(ar4, ai4, br4, bi4);
  1924. vab[2*i+2] = vadd(ar4, vab[2*i+2]);
  1925. vab[2*i+3] = vadd(ai4, vab[2*i+3]);
  1926. }
  1927. if(s->transform == PFFFT_REAL)
  1928. {
  1929. vab[0] = vinsert0(vab[0], abr1 + ar1*br1);
  1930. vab[1] = vinsert0(vab[1], abi1 + ai1*bi1);
  1931. }
  1932. }
  1933. void pffft_transform(const PFFFT_Setup *setup, const float *input, float *output, float *work,
  1934. pffft_direction_t direction)
  1935. {
  1936. assert(valigned(input) && valigned(output) && valigned(work));
  1937. pffft_transform_internal(setup, reinterpret_cast<const v4sf*>(al::assume_aligned<16>(input)),
  1938. reinterpret_cast<v4sf*>(al::assume_aligned<16>(output)),
  1939. reinterpret_cast<v4sf*>(al::assume_aligned<16>(work)), direction, false);
  1940. }
  1941. void pffft_transform_ordered(const PFFFT_Setup *setup, const float *input, float *output,
  1942. float *work, pffft_direction_t direction)
  1943. {
  1944. assert(valigned(input) && valigned(output) && valigned(work));
  1945. pffft_transform_internal(setup, reinterpret_cast<const v4sf*>(al::assume_aligned<16>(input)),
  1946. reinterpret_cast<v4sf*>(al::assume_aligned<16>(output)),
  1947. reinterpret_cast<v4sf*>(al::assume_aligned<16>(work)), direction, true);
  1948. }
  1949. #else // defined(PFFFT_SIMD_DISABLE)
  1950. // standard routine using scalar floats, without SIMD stuff.
  1951. namespace {
  1952. void pffft_transform_internal(const PFFFT_Setup *setup, const float *input, float *output,
  1953. float *scratch, const pffft_direction_t direction, bool ordered)
  1954. {
  1955. const size_t Ncvec{setup->Ncvec};
  1956. const bool nf_odd{(setup->ifac[1]&1) != 0};
  1957. assert(scratch != nullptr);
  1958. /* z-domain data for complex transforms is already ordered without SIMD. */
  1959. if(setup->transform == PFFFT_COMPLEX)
  1960. ordered = false;
  1961. float *buff[2]{output, scratch};
  1962. bool ib{nf_odd != ordered};
  1963. if(direction == PFFFT_FORWARD)
  1964. {
  1965. if(setup->transform == PFFFT_REAL)
  1966. ib = (rfftf1_ps(Ncvec*2, input, buff[ib], buff[!ib], setup->twiddle, setup->ifac) == buff[1]);
  1967. else
  1968. ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], setup->twiddle, setup->ifac, -1.0f) == buff[1]);
  1969. if(ordered)
  1970. {
  1971. pffft_zreorder(setup, buff[ib], buff[!ib], PFFFT_FORWARD);
  1972. ib = !ib;
  1973. }
  1974. }
  1975. else
  1976. {
  1977. if (input == buff[ib])
  1978. ib = !ib; // may happen when finput == foutput
  1979. if(ordered)
  1980. {
  1981. pffft_zreorder(setup, input, buff[ib], PFFFT_BACKWARD);
  1982. input = buff[ib];
  1983. ib = !ib;
  1984. }
  1985. if(setup->transform == PFFFT_REAL)
  1986. ib = (rfftb1_ps(Ncvec*2, input, buff[ib], buff[!ib], setup->twiddle, setup->ifac) == buff[1]);
  1987. else
  1988. ib = (cfftf1_ps(Ncvec, input, buff[ib], buff[!ib], setup->twiddle, setup->ifac, +1.0f) == buff[1]);
  1989. }
  1990. if(buff[ib] != output)
  1991. {
  1992. // extra copy required -- this situation should happens only when finput == foutput
  1993. assert(input==output);
  1994. for(size_t k{0};k < Ncvec;++k)
  1995. {
  1996. float a{buff[ib][2*k]}, b{buff[ib][2*k+1]};
  1997. output[2*k] = a; output[2*k+1] = b;
  1998. }
  1999. }
  2000. }
  2001. } // namespace
  2002. void pffft_zreorder(const PFFFT_Setup *setup, const float *in, float *RESTRICT out,
  2003. pffft_direction_t direction)
  2004. {
  2005. const size_t N{setup->N};
  2006. if(setup->transform == PFFFT_COMPLEX)
  2007. {
  2008. for(size_t k{0};k < 2*N;++k)
  2009. out[k] = in[k];
  2010. return;
  2011. }
  2012. else if(direction == PFFFT_FORWARD)
  2013. {
  2014. float x_N{in[N-1]};
  2015. for(size_t k{N-1};k > 1;--k)
  2016. out[k] = in[k-1];
  2017. out[0] = in[0];
  2018. out[1] = x_N;
  2019. }
  2020. else
  2021. {
  2022. float x_N{in[1]};
  2023. for(size_t k{1};k < N-1;++k)
  2024. out[k] = in[k+1];
  2025. out[0] = in[0];
  2026. out[N-1] = x_N;
  2027. }
  2028. }
  2029. void pffft_zconvolve_scale_accumulate(const PFFFT_Setup *s, const float *a, const float *b,
  2030. float *ab, float scaling)
  2031. {
  2032. size_t Ncvec{s->Ncvec};
  2033. if(s->transform == PFFFT_REAL)
  2034. {
  2035. // take care of the fftpack ordering
  2036. ab[0] += a[0]*b[0]*scaling;
  2037. ab[2*Ncvec-1] += a[2*Ncvec-1]*b[2*Ncvec-1]*scaling;
  2038. ++ab; ++a; ++b; --Ncvec;
  2039. }
  2040. for(size_t i{0};i < Ncvec;++i)
  2041. {
  2042. float ar{a[2*i+0]}, ai{a[2*i+1]};
  2043. const float br{b[2*i+0]}, bi{b[2*i+1]};
  2044. vcplxmul(ar, ai, br, bi);
  2045. ab[2*i+0] += ar*scaling;
  2046. ab[2*i+1] += ai*scaling;
  2047. }
  2048. }
  2049. void pffft_zconvolve_accumulate(const PFFFT_Setup *s, const float *a, const float *b, float *ab)
  2050. {
  2051. size_t Ncvec{s->Ncvec};
  2052. if(s->transform == PFFFT_REAL)
  2053. {
  2054. // take care of the fftpack ordering
  2055. ab[0] += a[0]*b[0];
  2056. ab[2*Ncvec-1] += a[2*Ncvec-1]*b[2*Ncvec-1];
  2057. ++ab; ++a; ++b; --Ncvec;
  2058. }
  2059. for(size_t i{0};i < Ncvec;++i)
  2060. {
  2061. float ar{a[2*i+0]}, ai{a[2*i+1]};
  2062. const float br{b[2*i+0]}, bi{b[2*i+1]};
  2063. vcplxmul(ar, ai, br, bi);
  2064. ab[2*i+0] += ar;
  2065. ab[2*i+1] += ai;
  2066. }
  2067. }
  2068. void pffft_transform(const PFFFT_Setup *setup, const float *input, float *output, float *work,
  2069. pffft_direction_t direction)
  2070. {
  2071. pffft_transform_internal(setup, input, output, work, direction, false);
  2072. }
  2073. void pffft_transform_ordered(const PFFFT_Setup *setup, const float *input, float *output,
  2074. float *work, pffft_direction_t direction)
  2075. {
  2076. pffft_transform_internal(setup, input, output, work, direction, true);
  2077. }
  2078. #endif /* defined(PFFFT_SIMD_DISABLE) */
  2079. /* NOLINTEND(cppcoreguidelines-pro-bounds-pointer-arithmetic) */