misc.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
  4. * *
  5. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  6. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  7. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  8. * *
  9. * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
  10. * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
  11. * *
  12. ********************************************************************
  13. function: miscellaneous math and prototypes
  14. ********************************************************************/
  15. #ifndef _V_RANDOM_H_
  16. #define _V_RANDOM_H_
  17. #include "ivorbiscodec.h"
  18. #include "os.h"
  19. #ifdef _LOW_ACCURACY_
  20. # define X(n) (((((n)>>22)+1)>>1) - ((((n)>>22)+1)>>9))
  21. # define LOOKUP_T const unsigned char
  22. #else
  23. # define X(n) (n)
  24. # define LOOKUP_T const ogg_int32_t
  25. #endif
  26. #include "asm_arm.h"
  27. #include <stdlib.h> /* for abs() */
  28. #ifndef _V_WIDE_MATH
  29. #define _V_WIDE_MATH
  30. #ifndef _LOW_ACCURACY_
  31. /* 64 bit multiply */
  32. #if !(defined WIN32 && defined WINCE)
  33. #include <sys/types.h>
  34. #endif
  35. #if BYTE_ORDER==LITTLE_ENDIAN
  36. union magic {
  37. struct {
  38. ogg_int32_t lo;
  39. ogg_int32_t hi;
  40. } halves;
  41. ogg_int64_t whole;
  42. };
  43. #endif
  44. /*
  45. #if BYTE_ORDER==BIG_ENDIAN
  46. union magic {
  47. struct {
  48. ogg_int32_t hi;
  49. ogg_int32_t lo;
  50. } halves;
  51. ogg_int64_t whole;
  52. };
  53. #endif
  54. */
  55. STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  56. union magic magic;
  57. magic.whole = (ogg_int64_t)x * y;
  58. return magic.halves.hi;
  59. }
  60. STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  61. return MULT32(x,y)<<1;
  62. }
  63. STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  64. union magic magic;
  65. magic.whole = (ogg_int64_t)x * y;
  66. return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
  67. }
  68. #else
  69. /* 32 bit multiply, more portable but less accurate */
  70. /*
  71. * Note: Precision is biased towards the first argument therefore ordering
  72. * is important. Shift values were chosen for the best sound quality after
  73. * many listening tests.
  74. */
  75. /*
  76. * For MULT32 and MULT31: The second argument is always a lookup table
  77. * value already preshifted from 31 to 8 bits. We therefore take the
  78. * opportunity to save on text space and use unsigned char for those
  79. * tables in this case.
  80. */
  81. STIN ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
  82. return (x >> 9) * y; /* y preshifted >>23 */
  83. }
  84. STIN ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
  85. return (x >> 8) * y; /* y preshifted >>23 */
  86. }
  87. STIN ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
  88. return (x >> 6) * y; /* y preshifted >>9 */
  89. }
  90. #endif
  91. /*
  92. * This should be used as a memory barrier, forcing all cached values in
  93. * registers to wr writen back to memory. Might or might not be beneficial
  94. * depending on the architecture and compiler.
  95. */
  96. #define MB()
  97. /*
  98. * The XPROD functions are meant to optimize the cross products found all
  99. * over the place in mdct.c by forcing memory operation ordering to avoid
  100. * unnecessary register reloads as soon as memory is being written to.
  101. * However this is only beneficial on CPUs with a sane number of general
  102. * purpose registers which exclude the Intel x86. On Intel, better let the
  103. * compiler actually reload registers directly from original memory by using
  104. * macros.
  105. */
  106. #ifdef __i386__
  107. #define XPROD32(_a, _b, _t, _v, _x, _y) \
  108. { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
  109. *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
  110. #define XPROD31(_a, _b, _t, _v, _x, _y) \
  111. { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
  112. *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
  113. #define XNPROD31(_a, _b, _t, _v, _x, _y) \
  114. { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
  115. *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
  116. #else
  117. STIN void XPROD32(ogg_int32_t a, ogg_int32_t b,
  118. ogg_int32_t t, ogg_int32_t v,
  119. ogg_int32_t *x, ogg_int32_t *y)
  120. {
  121. *x = MULT32(a, t) + MULT32(b, v);
  122. *y = MULT32(b, t) - MULT32(a, v);
  123. }
  124. STIN void XPROD31(ogg_int32_t a, ogg_int32_t b,
  125. ogg_int32_t t, ogg_int32_t v,
  126. ogg_int32_t *x, ogg_int32_t *y)
  127. {
  128. *x = MULT31(a, t) + MULT31(b, v);
  129. *y = MULT31(b, t) - MULT31(a, v);
  130. }
  131. STIN void XNPROD31(ogg_int32_t a, ogg_int32_t b,
  132. ogg_int32_t t, ogg_int32_t v,
  133. ogg_int32_t *x, ogg_int32_t *y)
  134. {
  135. *x = MULT31(a, t) - MULT31(b, v);
  136. *y = MULT31(b, t) + MULT31(a, v);
  137. }
  138. #endif
  139. #endif
  140. #ifndef _V_CLIP_MATH
  141. #define _V_CLIP_MATH
  142. STIN ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
  143. int ret=x;
  144. ret-= ((x<=32767)-1)&(x-32767);
  145. ret-= ((x>=-32768)-1)&(x+32768);
  146. return(ret);
  147. }
  148. #endif
  149. STIN ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
  150. ogg_int32_t b,ogg_int32_t bp,
  151. ogg_int32_t *p){
  152. if(a && b){
  153. #ifndef _LOW_ACCURACY_
  154. *p=ap+bp+32;
  155. return MULT32(a,b);
  156. #else
  157. *p=ap+bp+31;
  158. return (a>>15)*(b>>16);
  159. #endif
  160. }else
  161. return 0;
  162. }
  163. int _ilog(unsigned int);
  164. STIN ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
  165. ogg_int32_t i,
  166. ogg_int32_t *p){
  167. int ip=_ilog(abs(i))-31;
  168. return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
  169. }
  170. STIN ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
  171. ogg_int32_t b,ogg_int32_t bp,
  172. ogg_int32_t *p){
  173. if(!a){
  174. *p=bp;
  175. return b;
  176. }else if(!b){
  177. *p=ap;
  178. return a;
  179. }
  180. /* yes, this can leak a bit. */
  181. if(ap>bp){
  182. int shift=ap-bp+1;
  183. *p=ap+1;
  184. a>>=1;
  185. if(shift<32){
  186. b=(b+(1<<(shift-1)))>>shift;
  187. }else{
  188. b=0;
  189. }
  190. }else{
  191. int shift=bp-ap+1;
  192. *p=bp+1;
  193. b>>=1;
  194. if(shift<32){
  195. a=(a+(1<<(shift-1)))>>shift;
  196. }else{
  197. a=0;
  198. }
  199. }
  200. a+=b;
  201. if((a&0xc0000000)==0xc0000000 ||
  202. (a&0xc0000000)==0){
  203. a<<=1;
  204. (*p)--;
  205. }
  206. return(a);
  207. }
  208. #endif