avg.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdlib.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_ports/mem.h"
  13. unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) {
  14. int i, j;
  15. int sum = 0;
  16. for (i = 0; i < 8; ++i, s += p)
  17. for (j = 0; j < 8; sum += s[j], ++j) {
  18. }
  19. return (sum + 32) >> 6;
  20. }
  21. unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) {
  22. int i, j;
  23. int sum = 0;
  24. for (i = 0; i < 4; ++i, s += p)
  25. for (j = 0; j < 4; sum += s[j], ++j) {
  26. }
  27. return (sum + 8) >> 4;
  28. }
  29. // src_diff: first pass, 9 bit, dynamic range [-255, 255]
  30. // second pass, 12 bit, dynamic range [-2040, 2040]
  31. static void hadamard_col8(const int16_t *src_diff, int src_stride,
  32. int16_t *coeff) {
  33. int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
  34. int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
  35. int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
  36. int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
  37. int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
  38. int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
  39. int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
  40. int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];
  41. int16_t c0 = b0 + b2;
  42. int16_t c1 = b1 + b3;
  43. int16_t c2 = b0 - b2;
  44. int16_t c3 = b1 - b3;
  45. int16_t c4 = b4 + b6;
  46. int16_t c5 = b5 + b7;
  47. int16_t c6 = b4 - b6;
  48. int16_t c7 = b5 - b7;
  49. coeff[0] = c0 + c4;
  50. coeff[7] = c1 + c5;
  51. coeff[3] = c2 + c6;
  52. coeff[4] = c3 + c7;
  53. coeff[2] = c0 - c4;
  54. coeff[6] = c1 - c5;
  55. coeff[1] = c2 - c6;
  56. coeff[5] = c3 - c7;
  57. }
  58. // The order of the output coeff of the hadamard is not important. For
  59. // optimization purposes the final transpose may be skipped.
  60. void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
  61. tran_low_t *coeff) {
  62. int idx;
  63. int16_t buffer[64];
  64. int16_t buffer2[64];
  65. int16_t *tmp_buf = &buffer[0];
  66. for (idx = 0; idx < 8; ++idx) {
  67. hadamard_col8(src_diff, src_stride, tmp_buf); // src_diff: 9 bit
  68. // dynamic range [-255, 255]
  69. tmp_buf += 8;
  70. ++src_diff;
  71. }
  72. tmp_buf = &buffer[0];
  73. for (idx = 0; idx < 8; ++idx) {
  74. hadamard_col8(tmp_buf, 8, buffer2 + 8 * idx); // tmp_buf: 12 bit
  75. // dynamic range [-2040, 2040]
  76. // buffer2: 15 bit
  77. // dynamic range [-16320, 16320]
  78. ++tmp_buf;
  79. }
  80. for (idx = 0; idx < 64; ++idx) coeff[idx] = (tran_low_t)buffer2[idx];
  81. }
  82. // In place 16x16 2D Hadamard transform
  83. void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
  84. tran_low_t *coeff) {
  85. int idx;
  86. for (idx = 0; idx < 4; ++idx) {
  87. // src_diff: 9 bit, dynamic range [-255, 255]
  88. const int16_t *src_ptr =
  89. src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8;
  90. vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
  91. }
  92. // coeff: 15 bit, dynamic range [-16320, 16320]
  93. for (idx = 0; idx < 64; ++idx) {
  94. tran_low_t a0 = coeff[0];
  95. tran_low_t a1 = coeff[64];
  96. tran_low_t a2 = coeff[128];
  97. tran_low_t a3 = coeff[192];
  98. tran_low_t b0 = (a0 + a1) >> 1; // (a0 + a1): 16 bit, [-32640, 32640]
  99. tran_low_t b1 = (a0 - a1) >> 1; // b0-b3: 15 bit, dynamic range
  100. tran_low_t b2 = (a2 + a3) >> 1; // [-16320, 16320]
  101. tran_low_t b3 = (a2 - a3) >> 1;
  102. coeff[0] = b0 + b2; // 16 bit, [-32640, 32640]
  103. coeff[64] = b1 + b3;
  104. coeff[128] = b0 - b2;
  105. coeff[192] = b1 - b3;
  106. ++coeff;
  107. }
  108. }
  109. // coeff: 16 bits, dynamic range [-32640, 32640].
  110. // length: value range {16, 64, 256, 1024}.
  111. int vpx_satd_c(const tran_low_t *coeff, int length) {
  112. int i;
  113. int satd = 0;
  114. for (i = 0; i < length; ++i) satd += abs(coeff[i]);
  115. // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
  116. return satd;
  117. }
  118. // Integer projection onto row vectors.
  119. // height: value range {16, 32, 64}.
  120. void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
  121. const int ref_stride, const int height) {
  122. int idx;
  123. const int norm_factor = height >> 1;
  124. for (idx = 0; idx < 16; ++idx) {
  125. int i;
  126. hbuf[idx] = 0;
  127. // hbuf[idx]: 14 bit, dynamic range [0, 16320].
  128. for (i = 0; i < height; ++i) hbuf[idx] += ref[i * ref_stride];
  129. // hbuf[idx]: 9 bit, dynamic range [0, 510].
  130. hbuf[idx] /= norm_factor;
  131. ++ref;
  132. }
  133. }
  134. // width: value range {16, 32, 64}.
  135. int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) {
  136. int idx;
  137. int16_t sum = 0;
  138. // sum: 14 bit, dynamic range [0, 16320]
  139. for (idx = 0; idx < width; ++idx) sum += ref[idx];
  140. return sum;
  141. }
  142. // ref: [0 - 510]
  143. // src: [0 - 510]
  144. // bwl: {2, 3, 4}
  145. int vpx_vector_var_c(const int16_t *ref, const int16_t *src, const int bwl) {
  146. int i;
  147. int width = 4 << bwl;
  148. int sse = 0, mean = 0, var;
  149. for (i = 0; i < width; ++i) {
  150. int diff = ref[i] - src[i]; // diff: dynamic range [-510, 510], 10 bits.
  151. mean += diff; // mean: dynamic range 16 bits.
  152. sse += diff * diff; // sse: dynamic range 26 bits.
  153. }
  154. // (mean * mean): dynamic range 31 bits.
  155. var = sse - ((mean * mean) >> (bwl + 2));
  156. return var;
  157. }
  158. void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
  159. int *min, int *max) {
  160. int i, j;
  161. *min = 255;
  162. *max = 0;
  163. for (i = 0; i < 8; ++i, s += p, d += dp) {
  164. for (j = 0; j < 8; ++j) {
  165. int diff = abs(s[j] - d[j]);
  166. *min = diff < *min ? diff : *min;
  167. *max = diff > *max ? diff : *max;
  168. }
  169. }
  170. }
  171. #if CONFIG_VP9_HIGHBITDEPTH
  172. unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) {
  173. int i, j;
  174. int sum = 0;
  175. const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
  176. for (i = 0; i < 8; ++i, s += p)
  177. for (j = 0; j < 8; sum += s[j], ++j) {
  178. }
  179. return (sum + 32) >> 6;
  180. }
  181. unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) {
  182. int i, j;
  183. int sum = 0;
  184. const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
  185. for (i = 0; i < 4; ++i, s += p)
  186. for (j = 0; j < 4; sum += s[j], ++j) {
  187. }
  188. return (sum + 8) >> 4;
  189. }
  190. void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
  191. int dp, int *min, int *max) {
  192. int i, j;
  193. const uint16_t *s = CONVERT_TO_SHORTPTR(s8);
  194. const uint16_t *d = CONVERT_TO_SHORTPTR(d8);
  195. *min = 255;
  196. *max = 0;
  197. for (i = 0; i < 8; ++i, s += p, d += dp) {
  198. for (j = 0; j < 8; ++j) {
  199. int diff = abs(s[j] - d[j]);
  200. *min = diff < *min ? diff : *min;
  201. *max = diff > *max ? diff : *max;
  202. }
  203. }
  204. }
  205. #endif // CONFIG_VP9_HIGHBITDEPTH