vp9_denoiser.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include <limits.h>
  12. #include <math.h>
  13. #include "./vpx_dsp_rtcd.h"
  14. #include "vpx_dsp/vpx_dsp_common.h"
  15. #include "vpx_scale/yv12config.h"
  16. #include "vpx/vpx_integer.h"
  17. #include "vp9/common/vp9_reconinter.h"
  18. #include "vp9/encoder/vp9_context_tree.h"
  19. #include "vp9/encoder/vp9_denoiser.h"
  20. #include "vp9/encoder/vp9_encoder.h"
  21. #ifdef OUTPUT_YUV_DENOISED
  22. static void make_grayscale(YV12_BUFFER_CONFIG *yuv);
  23. #endif
  24. static int absdiff_thresh(BLOCK_SIZE bs, int increase_denoising) {
  25. (void)bs;
  26. return 3 + (increase_denoising ? 1 : 0);
  27. }
  28. static int delta_thresh(BLOCK_SIZE bs, int increase_denoising) {
  29. (void)bs;
  30. (void)increase_denoising;
  31. return 4;
  32. }
  33. static int noise_motion_thresh(BLOCK_SIZE bs, int increase_denoising) {
  34. (void)bs;
  35. (void)increase_denoising;
  36. return 625;
  37. }
  38. static unsigned int sse_thresh(BLOCK_SIZE bs, int increase_denoising) {
  39. return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 80 : 40);
  40. }
  41. static int sse_diff_thresh(BLOCK_SIZE bs, int increase_denoising,
  42. int motion_magnitude) {
  43. if (motion_magnitude > noise_motion_thresh(bs, increase_denoising)) {
  44. if (increase_denoising)
  45. return (1 << num_pels_log2_lookup[bs]) << 2;
  46. else
  47. return 0;
  48. } else {
  49. return (1 << num_pels_log2_lookup[bs]) << 4;
  50. }
  51. }
  52. static int total_adj_weak_thresh(BLOCK_SIZE bs, int increase_denoising) {
  53. return (1 << num_pels_log2_lookup[bs]) * (increase_denoising ? 3 : 2);
  54. }
  55. // TODO(jackychen): If increase_denoising is enabled in the future,
  56. // we might need to update the code for calculating 'total_adj' in
  57. // case the C code is not bit-exact with corresponding sse2 code.
  58. int vp9_denoiser_filter_c(const uint8_t *sig, int sig_stride,
  59. const uint8_t *mc_avg, int mc_avg_stride,
  60. uint8_t *avg, int avg_stride, int increase_denoising,
  61. BLOCK_SIZE bs, int motion_magnitude) {
  62. int r, c;
  63. const uint8_t *sig_start = sig;
  64. const uint8_t *mc_avg_start = mc_avg;
  65. uint8_t *avg_start = avg;
  66. int diff, adj, absdiff, delta;
  67. int adj_val[] = { 3, 4, 6 };
  68. int total_adj = 0;
  69. int shift_inc = 1;
  70. // If motion_magnitude is small, making the denoiser more aggressive by
  71. // increasing the adjustment for each level. Add another increment for
  72. // blocks that are labeled for increase denoising.
  73. if (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) {
  74. if (increase_denoising) {
  75. shift_inc = 2;
  76. }
  77. adj_val[0] += shift_inc;
  78. adj_val[1] += shift_inc;
  79. adj_val[2] += shift_inc;
  80. }
  81. // First attempt to apply a strong temporal denoising filter.
  82. for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
  83. for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
  84. diff = mc_avg[c] - sig[c];
  85. absdiff = abs(diff);
  86. if (absdiff <= absdiff_thresh(bs, increase_denoising)) {
  87. avg[c] = mc_avg[c];
  88. total_adj += diff;
  89. } else {
  90. switch (absdiff) {
  91. case 4:
  92. case 5:
  93. case 6:
  94. case 7: adj = adj_val[0]; break;
  95. case 8:
  96. case 9:
  97. case 10:
  98. case 11:
  99. case 12:
  100. case 13:
  101. case 14:
  102. case 15: adj = adj_val[1]; break;
  103. default: adj = adj_val[2];
  104. }
  105. if (diff > 0) {
  106. avg[c] = VPXMIN(UINT8_MAX, sig[c] + adj);
  107. total_adj += adj;
  108. } else {
  109. avg[c] = VPXMAX(0, sig[c] - adj);
  110. total_adj -= adj;
  111. }
  112. }
  113. }
  114. sig += sig_stride;
  115. avg += avg_stride;
  116. mc_avg += mc_avg_stride;
  117. }
  118. // If the strong filter did not modify the signal too much, we're all set.
  119. if (abs(total_adj) <= total_adj_strong_thresh(bs, increase_denoising)) {
  120. return FILTER_BLOCK;
  121. }
  122. // Otherwise, we try to dampen the filter if the delta is not too high.
  123. delta = ((abs(total_adj) - total_adj_strong_thresh(bs, increase_denoising)) >>
  124. num_pels_log2_lookup[bs]) +
  125. 1;
  126. if (delta >= delta_thresh(bs, increase_denoising)) {
  127. return COPY_BLOCK;
  128. }
  129. mc_avg = mc_avg_start;
  130. avg = avg_start;
  131. sig = sig_start;
  132. for (r = 0; r < (4 << b_height_log2_lookup[bs]); ++r) {
  133. for (c = 0; c < (4 << b_width_log2_lookup[bs]); ++c) {
  134. diff = mc_avg[c] - sig[c];
  135. adj = abs(diff);
  136. if (adj > delta) {
  137. adj = delta;
  138. }
  139. if (diff > 0) {
  140. // Diff positive means we made positive adjustment above
  141. // (in first try/attempt), so now make negative adjustment to bring
  142. // denoised signal down.
  143. avg[c] = VPXMAX(0, avg[c] - adj);
  144. total_adj -= adj;
  145. } else {
  146. // Diff negative means we made negative adjustment above
  147. // (in first try/attempt), so now make positive adjustment to bring
  148. // denoised signal up.
  149. avg[c] = VPXMIN(UINT8_MAX, avg[c] + adj);
  150. total_adj += adj;
  151. }
  152. }
  153. sig += sig_stride;
  154. avg += avg_stride;
  155. mc_avg += mc_avg_stride;
  156. }
  157. // We can use the filter if it has been sufficiently dampened
  158. if (abs(total_adj) <= total_adj_weak_thresh(bs, increase_denoising)) {
  159. return FILTER_BLOCK;
  160. }
  161. return COPY_BLOCK;
  162. }
  163. static uint8_t *block_start(uint8_t *framebuf, int stride, int mi_row,
  164. int mi_col) {
  165. return framebuf + (stride * mi_row << 3) + (mi_col << 3);
  166. }
  167. static VP9_DENOISER_DECISION perform_motion_compensation(
  168. VP9_COMMON *const cm, VP9_DENOISER *denoiser, MACROBLOCK *mb, BLOCK_SIZE bs,
  169. int increase_denoising, int mi_row, int mi_col, PICK_MODE_CONTEXT *ctx,
  170. int motion_magnitude, int is_skin, int *zeromv_filter, int consec_zeromv,
  171. int num_spatial_layers, int width, int lst_fb_idx, int gld_fb_idx,
  172. int use_svc) {
  173. const int sse_diff = (ctx->newmv_sse == UINT_MAX)
  174. ? 0
  175. : ((int)ctx->zeromv_sse - (int)ctx->newmv_sse);
  176. int frame;
  177. MACROBLOCKD *filter_mbd = &mb->e_mbd;
  178. MODE_INFO *mi = filter_mbd->mi[0];
  179. MODE_INFO saved_mi;
  180. int i;
  181. struct buf_2d saved_dst[MAX_MB_PLANE];
  182. struct buf_2d saved_pre[MAX_MB_PLANE];
  183. RefBuffer *saved_block_refs[2];
  184. MV_REFERENCE_FRAME saved_frame;
  185. frame = ctx->best_reference_frame;
  186. saved_mi = *mi;
  187. if (is_skin && (motion_magnitude > 0 || consec_zeromv < 4)) return COPY_BLOCK;
  188. // Avoid denoising small blocks. When noise > kDenLow or frame width > 480,
  189. // denoise 16x16 blocks.
  190. if (bs == BLOCK_8X8 || bs == BLOCK_8X16 || bs == BLOCK_16X8 ||
  191. (bs == BLOCK_16X16 && width > 480 &&
  192. denoiser->denoising_level <= kDenLow))
  193. return COPY_BLOCK;
  194. // If the best reference frame uses inter-prediction and there is enough of a
  195. // difference in sum-squared-error, use it.
  196. if (frame != INTRA_FRAME && frame != ALTREF_FRAME &&
  197. (frame != GOLDEN_FRAME || num_spatial_layers == 1) &&
  198. sse_diff > sse_diff_thresh(bs, increase_denoising, motion_magnitude)) {
  199. mi->ref_frame[0] = ctx->best_reference_frame;
  200. mi->mode = ctx->best_sse_inter_mode;
  201. mi->mv[0] = ctx->best_sse_mv;
  202. } else {
  203. // Otherwise, use the zero reference frame.
  204. frame = ctx->best_zeromv_reference_frame;
  205. ctx->newmv_sse = ctx->zeromv_sse;
  206. // Bias to last reference.
  207. if (num_spatial_layers > 1 || frame == ALTREF_FRAME ||
  208. (frame != LAST_FRAME &&
  209. ((ctx->zeromv_lastref_sse<(5 * ctx->zeromv_sse)>> 2) ||
  210. denoiser->denoising_level >= kDenHigh))) {
  211. frame = LAST_FRAME;
  212. ctx->newmv_sse = ctx->zeromv_lastref_sse;
  213. }
  214. mi->ref_frame[0] = frame;
  215. mi->mode = ZEROMV;
  216. mi->mv[0].as_int = 0;
  217. ctx->best_sse_inter_mode = ZEROMV;
  218. ctx->best_sse_mv.as_int = 0;
  219. *zeromv_filter = 1;
  220. if (denoiser->denoising_level > kDenMedium) {
  221. motion_magnitude = 0;
  222. }
  223. }
  224. saved_frame = frame;
  225. // When using SVC, we need to map REF_FRAME to the frame buffer index.
  226. if (use_svc) {
  227. if (frame == LAST_FRAME)
  228. frame = lst_fb_idx + 1;
  229. else if (frame == GOLDEN_FRAME)
  230. frame = gld_fb_idx + 1;
  231. }
  232. if (ctx->newmv_sse > sse_thresh(bs, increase_denoising)) {
  233. // Restore everything to its original state
  234. *mi = saved_mi;
  235. return COPY_BLOCK;
  236. }
  237. if (motion_magnitude > (noise_motion_thresh(bs, increase_denoising) << 3)) {
  238. // Restore everything to its original state
  239. *mi = saved_mi;
  240. return COPY_BLOCK;
  241. }
  242. // We will restore these after motion compensation.
  243. for (i = 0; i < MAX_MB_PLANE; ++i) {
  244. saved_pre[i] = filter_mbd->plane[i].pre[0];
  245. saved_dst[i] = filter_mbd->plane[i].dst;
  246. }
  247. saved_block_refs[0] = filter_mbd->block_refs[0];
  248. // Set the pointers in the MACROBLOCKD to point to the buffers in the denoiser
  249. // struct.
  250. filter_mbd->plane[0].pre[0].buf =
  251. block_start(denoiser->running_avg_y[frame].y_buffer,
  252. denoiser->running_avg_y[frame].y_stride, mi_row, mi_col);
  253. filter_mbd->plane[0].pre[0].stride = denoiser->running_avg_y[frame].y_stride;
  254. filter_mbd->plane[1].pre[0].buf =
  255. block_start(denoiser->running_avg_y[frame].u_buffer,
  256. denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
  257. filter_mbd->plane[1].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
  258. filter_mbd->plane[2].pre[0].buf =
  259. block_start(denoiser->running_avg_y[frame].v_buffer,
  260. denoiser->running_avg_y[frame].uv_stride, mi_row, mi_col);
  261. filter_mbd->plane[2].pre[0].stride = denoiser->running_avg_y[frame].uv_stride;
  262. filter_mbd->plane[0].dst.buf =
  263. block_start(denoiser->mc_running_avg_y.y_buffer,
  264. denoiser->mc_running_avg_y.y_stride, mi_row, mi_col);
  265. filter_mbd->plane[0].dst.stride = denoiser->mc_running_avg_y.y_stride;
  266. filter_mbd->plane[1].dst.buf =
  267. block_start(denoiser->mc_running_avg_y.u_buffer,
  268. denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
  269. filter_mbd->plane[1].dst.stride = denoiser->mc_running_avg_y.uv_stride;
  270. filter_mbd->plane[2].dst.buf =
  271. block_start(denoiser->mc_running_avg_y.v_buffer,
  272. denoiser->mc_running_avg_y.uv_stride, mi_row, mi_col);
  273. filter_mbd->plane[2].dst.stride = denoiser->mc_running_avg_y.uv_stride;
  274. set_ref_ptrs(cm, filter_mbd, saved_frame, NONE);
  275. vp9_build_inter_predictors_sby(filter_mbd, mi_row, mi_col, bs);
  276. // Restore everything to its original state
  277. *mi = saved_mi;
  278. filter_mbd->block_refs[0] = saved_block_refs[0];
  279. for (i = 0; i < MAX_MB_PLANE; ++i) {
  280. filter_mbd->plane[i].pre[0] = saved_pre[i];
  281. filter_mbd->plane[i].dst = saved_dst[i];
  282. }
  283. return FILTER_BLOCK;
  284. }
  285. void vp9_denoiser_denoise(VP9_COMP *cpi, MACROBLOCK *mb, int mi_row, int mi_col,
  286. BLOCK_SIZE bs, PICK_MODE_CONTEXT *ctx,
  287. VP9_DENOISER_DECISION *denoiser_decision) {
  288. int mv_col, mv_row;
  289. int motion_magnitude = 0;
  290. int zeromv_filter = 0;
  291. VP9_DENOISER *denoiser = &cpi->denoiser;
  292. VP9_DENOISER_DECISION decision = COPY_BLOCK;
  293. YV12_BUFFER_CONFIG avg = denoiser->running_avg_y[INTRA_FRAME];
  294. YV12_BUFFER_CONFIG mc_avg = denoiser->mc_running_avg_y;
  295. uint8_t *avg_start = block_start(avg.y_buffer, avg.y_stride, mi_row, mi_col);
  296. uint8_t *mc_avg_start =
  297. block_start(mc_avg.y_buffer, mc_avg.y_stride, mi_row, mi_col);
  298. struct buf_2d src = mb->plane[0].src;
  299. int is_skin = 0;
  300. int increase_denoising = 0;
  301. int consec_zeromv = 0;
  302. mv_col = ctx->best_sse_mv.as_mv.col;
  303. mv_row = ctx->best_sse_mv.as_mv.row;
  304. motion_magnitude = mv_row * mv_row + mv_col * mv_col;
  305. if (cpi->use_skin_detection && bs <= BLOCK_32X32 &&
  306. denoiser->denoising_level < kDenHigh) {
  307. int motion_level = (motion_magnitude < 16) ? 0 : 1;
  308. // If motion for current block is small/zero, compute consec_zeromv for
  309. // skin detection (early exit in skin detection is done for large
  310. // consec_zeromv when current block has small/zero motion).
  311. consec_zeromv = 0;
  312. if (motion_level == 0) {
  313. VP9_COMMON *const cm = &cpi->common;
  314. int j, i;
  315. // Loop through the 8x8 sub-blocks.
  316. const int bw = num_8x8_blocks_wide_lookup[bs];
  317. const int bh = num_8x8_blocks_high_lookup[bs];
  318. const int xmis = VPXMIN(cm->mi_cols - mi_col, bw);
  319. const int ymis = VPXMIN(cm->mi_rows - mi_row, bh);
  320. const int block_index = mi_row * cm->mi_cols + mi_col;
  321. consec_zeromv = 100;
  322. for (i = 0; i < ymis; i++) {
  323. for (j = 0; j < xmis; j++) {
  324. int bl_index = block_index + i * cm->mi_cols + j;
  325. consec_zeromv = VPXMIN(cpi->consec_zero_mv[bl_index], consec_zeromv);
  326. // No need to keep checking 8x8 blocks if any of the sub-blocks
  327. // has small consec_zeromv (since threshold for no_skin based on
  328. // zero/small motion in skin detection is high, i.e, > 4).
  329. if (consec_zeromv < 4) {
  330. i = ymis;
  331. j = xmis;
  332. }
  333. }
  334. }
  335. }
  336. // TODO(marpan): Compute skin detection over sub-blocks.
  337. is_skin = vp9_compute_skin_block(
  338. mb->plane[0].src.buf, mb->plane[1].src.buf, mb->plane[2].src.buf,
  339. mb->plane[0].src.stride, mb->plane[1].src.stride, bs, consec_zeromv,
  340. motion_level);
  341. }
  342. if (!is_skin && denoiser->denoising_level == kDenHigh) increase_denoising = 1;
  343. if (denoiser->denoising_level >= kDenLow && !ctx->sb_skip_denoising)
  344. decision = perform_motion_compensation(
  345. &cpi->common, denoiser, mb, bs, increase_denoising, mi_row, mi_col, ctx,
  346. motion_magnitude, is_skin, &zeromv_filter, consec_zeromv,
  347. cpi->svc.number_spatial_layers, cpi->Source->y_width, cpi->lst_fb_idx,
  348. cpi->gld_fb_idx, cpi->use_svc);
  349. if (decision == FILTER_BLOCK) {
  350. decision = vp9_denoiser_filter(src.buf, src.stride, mc_avg_start,
  351. mc_avg.y_stride, avg_start, avg.y_stride,
  352. increase_denoising, bs, motion_magnitude);
  353. }
  354. if (decision == FILTER_BLOCK) {
  355. vpx_convolve_copy(avg_start, avg.y_stride, src.buf, src.stride, NULL, 0, 0,
  356. 0, 0, num_4x4_blocks_wide_lookup[bs] << 2,
  357. num_4x4_blocks_high_lookup[bs] << 2);
  358. } else { // COPY_BLOCK
  359. vpx_convolve_copy(src.buf, src.stride, avg_start, avg.y_stride, NULL, 0, 0,
  360. 0, 0, num_4x4_blocks_wide_lookup[bs] << 2,
  361. num_4x4_blocks_high_lookup[bs] << 2);
  362. }
  363. *denoiser_decision = decision;
  364. if (decision == FILTER_BLOCK && zeromv_filter == 1)
  365. *denoiser_decision = FILTER_ZEROMV_BLOCK;
  366. }
  367. static void copy_frame(YV12_BUFFER_CONFIG *const dest,
  368. const YV12_BUFFER_CONFIG *const src) {
  369. int r;
  370. const uint8_t *srcbuf = src->y_buffer;
  371. uint8_t *destbuf = dest->y_buffer;
  372. assert(dest->y_width == src->y_width);
  373. assert(dest->y_height == src->y_height);
  374. for (r = 0; r < dest->y_height; ++r) {
  375. memcpy(destbuf, srcbuf, dest->y_width);
  376. destbuf += dest->y_stride;
  377. srcbuf += src->y_stride;
  378. }
  379. }
  380. static void swap_frame_buffer(YV12_BUFFER_CONFIG *const dest,
  381. YV12_BUFFER_CONFIG *const src) {
  382. uint8_t *tmp_buf = dest->y_buffer;
  383. assert(dest->y_width == src->y_width);
  384. assert(dest->y_height == src->y_height);
  385. dest->y_buffer = src->y_buffer;
  386. src->y_buffer = tmp_buf;
  387. }
  388. void vp9_denoiser_update_frame_info(
  389. VP9_DENOISER *denoiser, YV12_BUFFER_CONFIG src, FRAME_TYPE frame_type,
  390. int refresh_alt_ref_frame, int refresh_golden_frame, int refresh_last_frame,
  391. int alt_fb_idx, int gld_fb_idx, int lst_fb_idx, int resized,
  392. int svc_base_is_key) {
  393. // Copy source into denoised reference buffers on KEY_FRAME or
  394. // if the just encoded frame was resized. For SVC, copy source if the base
  395. // spatial layer was key frame.
  396. if (frame_type == KEY_FRAME || resized != 0 || denoiser->reset ||
  397. svc_base_is_key) {
  398. int i;
  399. // Start at 1 so as not to overwrite the INTRA_FRAME
  400. for (i = 1; i < denoiser->num_ref_frames; ++i) {
  401. if (denoiser->running_avg_y[i].buffer_alloc != NULL)
  402. copy_frame(&denoiser->running_avg_y[i], &src);
  403. }
  404. denoiser->reset = 0;
  405. return;
  406. }
  407. // If more than one refresh occurs, must copy frame buffer.
  408. if ((refresh_alt_ref_frame + refresh_golden_frame + refresh_last_frame) > 1) {
  409. if (refresh_alt_ref_frame) {
  410. copy_frame(&denoiser->running_avg_y[alt_fb_idx + 1],
  411. &denoiser->running_avg_y[INTRA_FRAME]);
  412. }
  413. if (refresh_golden_frame) {
  414. copy_frame(&denoiser->running_avg_y[gld_fb_idx + 1],
  415. &denoiser->running_avg_y[INTRA_FRAME]);
  416. }
  417. if (refresh_last_frame) {
  418. copy_frame(&denoiser->running_avg_y[lst_fb_idx + 1],
  419. &denoiser->running_avg_y[INTRA_FRAME]);
  420. }
  421. } else {
  422. if (refresh_alt_ref_frame) {
  423. swap_frame_buffer(&denoiser->running_avg_y[alt_fb_idx + 1],
  424. &denoiser->running_avg_y[INTRA_FRAME]);
  425. }
  426. if (refresh_golden_frame) {
  427. swap_frame_buffer(&denoiser->running_avg_y[gld_fb_idx + 1],
  428. &denoiser->running_avg_y[INTRA_FRAME]);
  429. }
  430. if (refresh_last_frame) {
  431. swap_frame_buffer(&denoiser->running_avg_y[lst_fb_idx + 1],
  432. &denoiser->running_avg_y[INTRA_FRAME]);
  433. }
  434. }
  435. }
  436. void vp9_denoiser_reset_frame_stats(PICK_MODE_CONTEXT *ctx) {
  437. ctx->zeromv_sse = UINT_MAX;
  438. ctx->newmv_sse = UINT_MAX;
  439. ctx->zeromv_lastref_sse = UINT_MAX;
  440. ctx->best_sse_mv.as_int = 0;
  441. }
  442. void vp9_denoiser_update_frame_stats(MODE_INFO *mi, unsigned int sse,
  443. PREDICTION_MODE mode,
  444. PICK_MODE_CONTEXT *ctx) {
  445. if (mi->mv[0].as_int == 0 && sse < ctx->zeromv_sse) {
  446. ctx->zeromv_sse = sse;
  447. ctx->best_zeromv_reference_frame = mi->ref_frame[0];
  448. if (mi->ref_frame[0] == LAST_FRAME) ctx->zeromv_lastref_sse = sse;
  449. }
  450. if (mi->mv[0].as_int != 0 && sse < ctx->newmv_sse) {
  451. ctx->newmv_sse = sse;
  452. ctx->best_sse_inter_mode = mode;
  453. ctx->best_sse_mv = mi->mv[0];
  454. ctx->best_reference_frame = mi->ref_frame[0];
  455. }
  456. }
  457. static int vp9_denoiser_realloc_svc_helper(VP9_COMMON *cm,
  458. VP9_DENOISER *denoiser, int fb_idx) {
  459. int fail = 0;
  460. if (denoiser->running_avg_y[fb_idx].buffer_alloc == NULL) {
  461. fail =
  462. vpx_alloc_frame_buffer(&denoiser->running_avg_y[fb_idx], cm->width,
  463. cm->height, cm->subsampling_x, cm->subsampling_y,
  464. #if CONFIG_VP9_HIGHBITDEPTH
  465. cm->use_highbitdepth,
  466. #endif
  467. VP9_ENC_BORDER_IN_PIXELS, 0);
  468. if (fail) {
  469. vp9_denoiser_free(denoiser);
  470. return 1;
  471. }
  472. }
  473. return 0;
  474. }
  475. int vp9_denoiser_realloc_svc(VP9_COMMON *cm, VP9_DENOISER *denoiser,
  476. int refresh_alt, int refresh_gld, int refresh_lst,
  477. int alt_fb_idx, int gld_fb_idx, int lst_fb_idx) {
  478. int fail = 0;
  479. if (refresh_alt) {
  480. // Increase the frame buffer index by 1 to map it to the buffer index in the
  481. // denoiser.
  482. fail = vp9_denoiser_realloc_svc_helper(cm, denoiser, alt_fb_idx + 1);
  483. if (fail) return 1;
  484. }
  485. if (refresh_gld) {
  486. fail = vp9_denoiser_realloc_svc_helper(cm, denoiser, gld_fb_idx + 1);
  487. if (fail) return 1;
  488. }
  489. if (refresh_lst) {
  490. fail = vp9_denoiser_realloc_svc_helper(cm, denoiser, lst_fb_idx + 1);
  491. if (fail) return 1;
  492. }
  493. return 0;
  494. }
  495. int vp9_denoiser_alloc(VP9_COMMON *cm, int use_svc, VP9_DENOISER *denoiser,
  496. int width, int height, int ssx, int ssy,
  497. #if CONFIG_VP9_HIGHBITDEPTH
  498. int use_highbitdepth,
  499. #endif
  500. int border) {
  501. int i, fail, init_num_ref_frames;
  502. const int legacy_byte_alignment = 0;
  503. assert(denoiser != NULL);
  504. denoiser->num_ref_frames = use_svc ? SVC_REF_FRAMES : NONSVC_REF_FRAMES;
  505. init_num_ref_frames = use_svc ? MAX_REF_FRAMES : NONSVC_REF_FRAMES;
  506. CHECK_MEM_ERROR(
  507. cm, denoiser->running_avg_y,
  508. vpx_calloc(denoiser->num_ref_frames, sizeof(denoiser->running_avg_y[0])));
  509. for (i = 0; i < init_num_ref_frames; ++i) {
  510. fail = vpx_alloc_frame_buffer(&denoiser->running_avg_y[i], width, height,
  511. ssx, ssy,
  512. #if CONFIG_VP9_HIGHBITDEPTH
  513. use_highbitdepth,
  514. #endif
  515. border, legacy_byte_alignment);
  516. if (fail) {
  517. vp9_denoiser_free(denoiser);
  518. return 1;
  519. }
  520. #ifdef OUTPUT_YUV_DENOISED
  521. make_grayscale(&denoiser->running_avg_y[i]);
  522. #endif
  523. }
  524. fail = vpx_alloc_frame_buffer(&denoiser->mc_running_avg_y, width, height, ssx,
  525. ssy,
  526. #if CONFIG_VP9_HIGHBITDEPTH
  527. use_highbitdepth,
  528. #endif
  529. border, legacy_byte_alignment);
  530. if (fail) {
  531. vp9_denoiser_free(denoiser);
  532. return 1;
  533. }
  534. fail = vpx_alloc_frame_buffer(&denoiser->last_source, width, height, ssx, ssy,
  535. #if CONFIG_VP9_HIGHBITDEPTH
  536. use_highbitdepth,
  537. #endif
  538. border, legacy_byte_alignment);
  539. if (fail) {
  540. vp9_denoiser_free(denoiser);
  541. return 1;
  542. }
  543. #ifdef OUTPUT_YUV_DENOISED
  544. make_grayscale(&denoiser->running_avg_y[i]);
  545. #endif
  546. denoiser->frame_buffer_initialized = 1;
  547. denoiser->denoising_level = kDenLow;
  548. denoiser->prev_denoising_level = kDenLow;
  549. denoiser->reset = 0;
  550. return 0;
  551. }
  552. void vp9_denoiser_free(VP9_DENOISER *denoiser) {
  553. int i;
  554. if (denoiser == NULL) {
  555. return;
  556. }
  557. denoiser->frame_buffer_initialized = 0;
  558. for (i = 0; i < denoiser->num_ref_frames; ++i) {
  559. vpx_free_frame_buffer(&denoiser->running_avg_y[i]);
  560. }
  561. vpx_free(denoiser->running_avg_y);
  562. denoiser->running_avg_y = NULL;
  563. vpx_free_frame_buffer(&denoiser->mc_running_avg_y);
  564. vpx_free_frame_buffer(&denoiser->last_source);
  565. }
  566. void vp9_denoiser_set_noise_level(VP9_DENOISER *denoiser, int noise_level) {
  567. denoiser->denoising_level = noise_level;
  568. if (denoiser->denoising_level > kDenLowLow &&
  569. denoiser->prev_denoising_level == kDenLowLow)
  570. denoiser->reset = 1;
  571. else
  572. denoiser->reset = 0;
  573. denoiser->prev_denoising_level = denoiser->denoising_level;
  574. }
  575. // Scale/increase the partition threshold
  576. // for denoiser speed-up.
  577. int64_t vp9_scale_part_thresh(int64_t threshold, VP9_DENOISER_LEVEL noise_level,
  578. int content_state, int temporal_layer_id) {
  579. if ((content_state == kLowSadLowSumdiff) ||
  580. (content_state == kHighSadLowSumdiff) ||
  581. (content_state == kLowVarHighSumdiff) || (noise_level == kDenHigh) ||
  582. (temporal_layer_id != 0)) {
  583. int64_t scaled_thr =
  584. (temporal_layer_id < 2) ? (3 * threshold) >> 1 : (7 * threshold) >> 2;
  585. return scaled_thr;
  586. } else {
  587. return (5 * threshold) >> 2;
  588. }
  589. }
  590. // Scale/increase the ac skip threshold for
  591. // denoiser speed-up.
  592. int64_t vp9_scale_acskip_thresh(int64_t threshold,
  593. VP9_DENOISER_LEVEL noise_level, int abs_sumdiff,
  594. int temporal_layer_id) {
  595. if (noise_level >= kDenLow && abs_sumdiff < 5)
  596. return threshold *=
  597. (noise_level == kDenLow) ? 2 : (temporal_layer_id == 2) ? 10 : 6;
  598. else
  599. return threshold;
  600. }
  601. #ifdef OUTPUT_YUV_DENOISED
  602. static void make_grayscale(YV12_BUFFER_CONFIG *yuv) {
  603. int r, c;
  604. uint8_t *u = yuv->u_buffer;
  605. uint8_t *v = yuv->v_buffer;
  606. for (r = 0; r < yuv->uv_height; ++r) {
  607. for (c = 0; c < yuv->uv_width; ++c) {
  608. u[c] = UINT8_MAX / 2;
  609. v[c] = UINT8_MAX / 2;
  610. }
  611. u += yuv->uv_stride;
  612. v += yuv->uv_stride;
  613. }
  614. }
  615. #endif