vp9_decodemv.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. /*
  2. Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include "vp9/common/vp9_common.h"
  12. #include "vp9/common/vp9_entropy.h"
  13. #include "vp9/common/vp9_entropymode.h"
  14. #include "vp9/common/vp9_entropymv.h"
  15. #include "vp9/common/vp9_mvref_common.h"
  16. #include "vp9/common/vp9_pred_common.h"
  17. #include "vp9/common/vp9_reconinter.h"
  18. #include "vp9/common/vp9_seg_common.h"
  19. #include "vp9/decoder/vp9_decodemv.h"
  20. #include "vp9/decoder/vp9_decodeframe.h"
  21. #include "vpx_dsp/vpx_dsp_common.h"
  22. static PREDICTION_MODE read_intra_mode(vpx_reader *r, const vpx_prob *p) {
  23. return (PREDICTION_MODE)vpx_read_tree(r, vp9_intra_mode_tree, p);
  24. }
  25. static PREDICTION_MODE read_intra_mode_y(VP9_COMMON *cm, MACROBLOCKD *xd,
  26. vpx_reader *r, int size_group) {
  27. const PREDICTION_MODE y_mode =
  28. read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
  29. FRAME_COUNTS *counts = xd->counts;
  30. if (counts)
  31. ++counts->y_mode[size_group][y_mode];
  32. return y_mode;
  33. }
  34. static PREDICTION_MODE read_intra_mode_uv(VP9_COMMON *cm, MACROBLOCKD *xd,
  35. vpx_reader *r,
  36. PREDICTION_MODE y_mode) {
  37. const PREDICTION_MODE uv_mode = read_intra_mode(r,
  38. cm->fc->uv_mode_prob[y_mode]);
  39. FRAME_COUNTS *counts = xd->counts;
  40. if (counts)
  41. ++counts->uv_mode[y_mode][uv_mode];
  42. return uv_mode;
  43. }
  44. static PREDICTION_MODE read_inter_mode(VP9_COMMON *cm, MACROBLOCKD *xd,
  45. vpx_reader *r, int ctx) {
  46. const int mode = vpx_read_tree(r, vp9_inter_mode_tree,
  47. cm->fc->inter_mode_probs[ctx]);
  48. FRAME_COUNTS *counts = xd->counts;
  49. if (counts)
  50. ++counts->inter_mode[ctx][mode];
  51. return NEARESTMV + mode;
  52. }
  53. static int read_segment_id(vpx_reader *r, const struct segmentation *seg) {
  54. return vpx_read_tree(r, vp9_segment_tree, seg->tree_probs);
  55. }
  56. static TX_SIZE read_selected_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
  57. TX_SIZE max_tx_size, vpx_reader *r) {
  58. FRAME_COUNTS *counts = xd->counts;
  59. const int ctx = get_tx_size_context(xd);
  60. const vpx_prob *tx_probs = get_tx_probs(max_tx_size, ctx, &cm->fc->tx_probs);
  61. int tx_size = vpx_read(r, tx_probs[0]);
  62. if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
  63. tx_size += vpx_read(r, tx_probs[1]);
  64. if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
  65. tx_size += vpx_read(r, tx_probs[2]);
  66. }
  67. if (counts)
  68. ++get_tx_counts(max_tx_size, ctx, &counts->tx)[tx_size];
  69. return (TX_SIZE)tx_size;
  70. }
  71. static INLINE TX_SIZE read_tx_size(VP9_COMMON *cm, MACROBLOCKD *xd,
  72. int allow_select, vpx_reader *r) {
  73. TX_MODE tx_mode = cm->tx_mode;
  74. BLOCK_SIZE bsize = xd->mi[0]->sb_type;
  75. const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
  76. if (allow_select && tx_mode == TX_MODE_SELECT && bsize >= BLOCK_8X8)
  77. return read_selected_tx_size(cm, xd, max_tx_size, r);
  78. else
  79. return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
  80. }
  81. static int dec_get_segment_id(const VP9_COMMON *cm, const uint8_t *segment_ids,
  82. int mi_offset, int x_mis, int y_mis) {
  83. int x, y, segment_id = INT_MAX;
  84. for (y = 0; y < y_mis; y++)
  85. for (x = 0; x < x_mis; x++)
  86. segment_id =
  87. VPXMIN(segment_id, segment_ids[mi_offset + y * cm->mi_cols + x]);
  88. assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
  89. return segment_id;
  90. }
  91. static void set_segment_id(VP9_COMMON *cm, int mi_offset,
  92. int x_mis, int y_mis, int segment_id) {
  93. int x, y;
  94. assert(segment_id >= 0 && segment_id < MAX_SEGMENTS);
  95. for (y = 0; y < y_mis; y++)
  96. for (x = 0; x < x_mis; x++)
  97. cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
  98. }
  99. static void copy_segment_id(const VP9_COMMON *cm,
  100. const uint8_t *last_segment_ids,
  101. uint8_t *current_segment_ids,
  102. int mi_offset, int x_mis, int y_mis) {
  103. int x, y;
  104. for (y = 0; y < y_mis; y++)
  105. for (x = 0; x < x_mis; x++)
  106. current_segment_ids[mi_offset + y * cm->mi_cols + x] = last_segment_ids ?
  107. last_segment_ids[mi_offset + y * cm->mi_cols + x] : 0;
  108. }
  109. static int read_intra_segment_id(VP9_COMMON *const cm, int mi_offset,
  110. int x_mis, int y_mis,
  111. vpx_reader *r) {
  112. struct segmentation *const seg = &cm->seg;
  113. int segment_id;
  114. if (!seg->enabled)
  115. return 0; // Default for disabled segmentation
  116. if (!seg->update_map) {
  117. copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
  118. mi_offset, x_mis, y_mis);
  119. return 0;
  120. }
  121. segment_id = read_segment_id(r, seg);
  122. set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
  123. return segment_id;
  124. }
  125. static int read_inter_segment_id(VP9_COMMON *const cm, MACROBLOCKD *const xd,
  126. int mi_row, int mi_col, vpx_reader *r,
  127. int x_mis, int y_mis) {
  128. struct segmentation *const seg = &cm->seg;
  129. MODE_INFO *const mi = xd->mi[0];
  130. int predicted_segment_id, segment_id;
  131. const int mi_offset = mi_row * cm->mi_cols + mi_col;
  132. if (!seg->enabled)
  133. return 0; // Default for disabled segmentation
  134. predicted_segment_id = cm->last_frame_seg_map ?
  135. dec_get_segment_id(cm, cm->last_frame_seg_map, mi_offset, x_mis, y_mis) :
  136. 0;
  137. if (!seg->update_map) {
  138. copy_segment_id(cm, cm->last_frame_seg_map, cm->current_frame_seg_map,
  139. mi_offset, x_mis, y_mis);
  140. return predicted_segment_id;
  141. }
  142. if (seg->temporal_update) {
  143. const vpx_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
  144. mi->seg_id_predicted = vpx_read(r, pred_prob);
  145. segment_id = mi->seg_id_predicted ? predicted_segment_id
  146. : read_segment_id(r, seg);
  147. } else {
  148. segment_id = read_segment_id(r, seg);
  149. }
  150. set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
  151. return segment_id;
  152. }
  153. static int read_skip(VP9_COMMON *cm, const MACROBLOCKD *xd,
  154. int segment_id, vpx_reader *r) {
  155. if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
  156. return 1;
  157. } else {
  158. const int ctx = vp9_get_skip_context(xd);
  159. const int skip = vpx_read(r, cm->fc->skip_probs[ctx]);
  160. FRAME_COUNTS *counts = xd->counts;
  161. if (counts)
  162. ++counts->skip[ctx][skip];
  163. return skip;
  164. }
  165. }
  166. static void read_intra_frame_mode_info(VP9_COMMON *const cm,
  167. MACROBLOCKD *const xd,
  168. int mi_row, int mi_col, vpx_reader *r,
  169. int x_mis, int y_mis) {
  170. MODE_INFO *const mi = xd->mi[0];
  171. const MODE_INFO *above_mi = xd->above_mi;
  172. const MODE_INFO *left_mi = xd->left_mi;
  173. const BLOCK_SIZE bsize = mi->sb_type;
  174. int i;
  175. const int mi_offset = mi_row * cm->mi_cols + mi_col;
  176. mi->segment_id = read_intra_segment_id(cm, mi_offset, x_mis, y_mis, r);
  177. mi->skip = read_skip(cm, xd, mi->segment_id, r);
  178. mi->tx_size = read_tx_size(cm, xd, 1, r);
  179. mi->ref_frame[0] = INTRA_FRAME;
  180. mi->ref_frame[1] = NONE;
  181. switch (bsize) {
  182. case BLOCK_4X4:
  183. for (i = 0; i < 4; ++i)
  184. mi->bmi[i].as_mode =
  185. read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, i));
  186. mi->mode = mi->bmi[3].as_mode;
  187. break;
  188. case BLOCK_4X8:
  189. mi->bmi[0].as_mode = mi->bmi[2].as_mode =
  190. read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
  191. mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
  192. read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 1));
  193. break;
  194. case BLOCK_8X4:
  195. mi->bmi[0].as_mode = mi->bmi[1].as_mode =
  196. read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 0));
  197. mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
  198. read_intra_mode(r, get_y_mode_probs(mi, above_mi, left_mi, 2));
  199. break;
  200. default:
  201. mi->mode = read_intra_mode(r,
  202. get_y_mode_probs(mi, above_mi, left_mi, 0));
  203. }
  204. mi->uv_mode = read_intra_mode(r, vp9_kf_uv_mode_prob[mi->mode]);
  205. }
  206. static int read_mv_component(vpx_reader *r,
  207. const nmv_component *mvcomp, int usehp) {
  208. int mag, d, fr, hp;
  209. const int sign = vpx_read(r, mvcomp->sign);
  210. const int mv_class = vpx_read_tree(r, vp9_mv_class_tree, mvcomp->classes);
  211. const int class0 = mv_class == MV_CLASS_0;
  212. // Integer part
  213. if (class0) {
  214. d = vpx_read_tree(r, vp9_mv_class0_tree, mvcomp->class0);
  215. mag = 0;
  216. } else {
  217. int i;
  218. const int n = mv_class + CLASS0_BITS - 1; // number of bits
  219. d = 0;
  220. for (i = 0; i < n; ++i)
  221. d |= vpx_read(r, mvcomp->bits[i]) << i;
  222. mag = CLASS0_SIZE << (mv_class + 2);
  223. }
  224. // Fractional part
  225. fr = vpx_read_tree(r, vp9_mv_fp_tree, class0 ? mvcomp->class0_fp[d]
  226. : mvcomp->fp);
  227. // High precision part (if hp is not used, the default value of the hp is 1)
  228. hp = usehp ? vpx_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp)
  229. : 1;
  230. // Result
  231. mag += ((d << 3) | (fr << 1) | hp) + 1;
  232. return sign ? -mag : mag;
  233. }
  234. static INLINE void read_mv(vpx_reader *r, MV *mv, const MV *ref,
  235. const nmv_context *ctx,
  236. nmv_context_counts *counts, int allow_hp) {
  237. const MV_JOINT_TYPE joint_type =
  238. (MV_JOINT_TYPE)vpx_read_tree(r, vp9_mv_joint_tree, ctx->joints);
  239. const int use_hp = allow_hp && use_mv_hp(ref);
  240. MV diff = {0, 0};
  241. if (mv_joint_vertical(joint_type))
  242. diff.row = read_mv_component(r, &ctx->comps[0], use_hp);
  243. if (mv_joint_horizontal(joint_type))
  244. diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
  245. vp9_inc_mv(&diff, counts);
  246. mv->row = ref->row + diff.row;
  247. mv->col = ref->col + diff.col;
  248. }
  249. static REFERENCE_MODE read_block_reference_mode(VP9_COMMON *cm,
  250. const MACROBLOCKD *xd,
  251. vpx_reader *r) {
  252. if (cm->reference_mode == REFERENCE_MODE_SELECT) {
  253. const int ctx = vp9_get_reference_mode_context(cm, xd);
  254. const REFERENCE_MODE mode =
  255. (REFERENCE_MODE)vpx_read(r, cm->fc->comp_inter_prob[ctx]);
  256. FRAME_COUNTS *counts = xd->counts;
  257. if (counts)
  258. ++counts->comp_inter[ctx][mode];
  259. return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
  260. } else {
  261. return cm->reference_mode;
  262. }
  263. }
  264. // Read the referncence frame
  265. static void read_ref_frames(VP9_COMMON *const cm, MACROBLOCKD *const xd,
  266. vpx_reader *r,
  267. int segment_id, MV_REFERENCE_FRAME ref_frame[2]) {
  268. FRAME_CONTEXT *const fc = cm->fc;
  269. FRAME_COUNTS *counts = xd->counts;
  270. if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
  271. ref_frame[0] = (MV_REFERENCE_FRAME)get_segdata(&cm->seg, segment_id,
  272. SEG_LVL_REF_FRAME);
  273. ref_frame[1] = NONE;
  274. } else {
  275. const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r);
  276. // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
  277. if (mode == COMPOUND_REFERENCE) {
  278. const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
  279. const int ctx = vp9_get_pred_context_comp_ref_p(cm, xd);
  280. const int bit = vpx_read(r, fc->comp_ref_prob[ctx]);
  281. if (counts)
  282. ++counts->comp_ref[ctx][bit];
  283. ref_frame[idx] = cm->comp_fixed_ref;
  284. ref_frame[!idx] = cm->comp_var_ref[bit];
  285. } else if (mode == SINGLE_REFERENCE) {
  286. const int ctx0 = vp9_get_pred_context_single_ref_p1(xd);
  287. const int bit0 = vpx_read(r, fc->single_ref_prob[ctx0][0]);
  288. if (counts)
  289. ++counts->single_ref[ctx0][0][bit0];
  290. if (bit0) {
  291. const int ctx1 = vp9_get_pred_context_single_ref_p2(xd);
  292. const int bit1 = vpx_read(r, fc->single_ref_prob[ctx1][1]);
  293. if (counts)
  294. ++counts->single_ref[ctx1][1][bit1];
  295. ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
  296. } else {
  297. ref_frame[0] = LAST_FRAME;
  298. }
  299. ref_frame[1] = NONE;
  300. } else {
  301. assert(0 && "Invalid prediction mode.");
  302. }
  303. }
  304. }
  305. // TODO(slavarnway): Move this decoder version of
  306. // vp9_get_pred_context_switchable_interp() to vp9_pred_common.h and update the
  307. // encoder.
  308. //
  309. // Returns a context number for the given MB prediction signal
  310. static int dec_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
  311. // Note:
  312. // The mode info data structure has a one element border above and to the
  313. // left of the entries corresponding to real macroblocks.
  314. // The prediction flags in these dummy entries are initialized to 0.
  315. const MODE_INFO *const left_mi = xd->left_mi;
  316. const int left_type = left_mi ? left_mi->interp_filter : SWITCHABLE_FILTERS;
  317. const MODE_INFO *const above_mi = xd->above_mi;
  318. const int above_type = above_mi ? above_mi->interp_filter
  319. : SWITCHABLE_FILTERS;
  320. if (left_type == above_type)
  321. return left_type;
  322. else if (left_type == SWITCHABLE_FILTERS)
  323. return above_type;
  324. else if (above_type == SWITCHABLE_FILTERS)
  325. return left_type;
  326. else
  327. return SWITCHABLE_FILTERS;
  328. }
  329. static INLINE INTERP_FILTER read_switchable_interp_filter(
  330. VP9_COMMON *const cm, MACROBLOCKD *const xd,
  331. vpx_reader *r) {
  332. const int ctx = dec_get_pred_context_switchable_interp(xd);
  333. const INTERP_FILTER type =
  334. (INTERP_FILTER)vpx_read_tree(r, vp9_switchable_interp_tree,
  335. cm->fc->switchable_interp_prob[ctx]);
  336. FRAME_COUNTS *counts = xd->counts;
  337. if (counts)
  338. ++counts->switchable_interp[ctx][type];
  339. return type;
  340. }
  341. static void read_intra_block_mode_info(VP9_COMMON *const cm,
  342. MACROBLOCKD *const xd, MODE_INFO *mi,
  343. vpx_reader *r) {
  344. const BLOCK_SIZE bsize = mi->sb_type;
  345. int i;
  346. switch (bsize) {
  347. case BLOCK_4X4:
  348. for (i = 0; i < 4; ++i)
  349. mi->bmi[i].as_mode = read_intra_mode_y(cm, xd, r, 0);
  350. mi->mode = mi->bmi[3].as_mode;
  351. break;
  352. case BLOCK_4X8:
  353. mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd,
  354. r, 0);
  355. mi->bmi[1].as_mode = mi->bmi[3].as_mode = mi->mode =
  356. read_intra_mode_y(cm, xd, r, 0);
  357. break;
  358. case BLOCK_8X4:
  359. mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd,
  360. r, 0);
  361. mi->bmi[2].as_mode = mi->bmi[3].as_mode = mi->mode =
  362. read_intra_mode_y(cm, xd, r, 0);
  363. break;
  364. default:
  365. mi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]);
  366. }
  367. mi->uv_mode = read_intra_mode_uv(cm, xd, r, mi->mode);
  368. // Initialize interp_filter here so we do not have to check for inter block
  369. // modes in dec_get_pred_context_switchable_interp()
  370. mi->interp_filter = SWITCHABLE_FILTERS;
  371. mi->ref_frame[0] = INTRA_FRAME;
  372. mi->ref_frame[1] = NONE;
  373. }
  374. static INLINE int is_mv_valid(const MV *mv) {
  375. return mv->row > MV_LOW && mv->row < MV_UPP &&
  376. mv->col > MV_LOW && mv->col < MV_UPP;
  377. }
  378. static INLINE void copy_mv_pair(int_mv *dst, const int_mv *src) {
  379. memcpy(dst, src, sizeof(*dst) * 2);
  380. }
  381. static INLINE void zero_mv_pair(int_mv *dst) {
  382. memset(dst, 0, sizeof(*dst) * 2);
  383. }
  384. static INLINE int assign_mv(VP9_COMMON *cm, MACROBLOCKD *xd,
  385. PREDICTION_MODE mode,
  386. int_mv mv[2], int_mv ref_mv[2],
  387. int_mv near_nearest_mv[2],
  388. int is_compound, int allow_hp, vpx_reader *r) {
  389. int i;
  390. int ret = 1;
  391. switch (mode) {
  392. case NEWMV: {
  393. FRAME_COUNTS *counts = xd->counts;
  394. nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
  395. for (i = 0; i < 1 + is_compound; ++i) {
  396. read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
  397. allow_hp);
  398. ret = ret && is_mv_valid(&mv[i].as_mv);
  399. }
  400. break;
  401. }
  402. case NEARMV:
  403. case NEARESTMV: {
  404. copy_mv_pair(mv, near_nearest_mv);
  405. break;
  406. }
  407. case ZEROMV: {
  408. zero_mv_pair(mv);
  409. break;
  410. }
  411. default: {
  412. return 0;
  413. }
  414. }
  415. return ret;
  416. }
  417. static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
  418. int segment_id, vpx_reader *r) {
  419. if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
  420. return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
  421. } else {
  422. const int ctx = get_intra_inter_context(xd);
  423. const int is_inter = vpx_read(r, cm->fc->intra_inter_prob[ctx]);
  424. FRAME_COUNTS *counts = xd->counts;
  425. if (counts)
  426. ++counts->intra_inter[ctx][is_inter];
  427. return is_inter;
  428. }
  429. }
  430. static void dec_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *best_mv,
  431. int refmv_count) {
  432. int i;
  433. // Make sure all the candidates are properly clamped etc
  434. for (i = 0; i < refmv_count; ++i) {
  435. lower_mv_precision(&mvlist[i].as_mv, allow_hp);
  436. *best_mv = mvlist[i];
  437. }
  438. }
  439. static void fpm_sync(void *const data, int mi_row) {
  440. VP9Decoder *const pbi = (VP9Decoder *)data;
  441. vp9_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
  442. mi_row << MI_BLOCK_SIZE_LOG2);
  443. }
  444. // This macro is used to add a motion vector mv_ref list if it isn't
  445. // already in the list. If it's the second motion vector or early_break
  446. // it will also skip all additional processing and jump to Done!
  447. #define ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done) \
  448. do { \
  449. if (refmv_count) { \
  450. if ((mv).as_int != (mv_ref_list)[0].as_int) { \
  451. (mv_ref_list)[(refmv_count)] = (mv); \
  452. refmv_count++; \
  453. goto Done; \
  454. } \
  455. } else { \
  456. (mv_ref_list)[(refmv_count)++] = (mv); \
  457. if (early_break) \
  458. goto Done; \
  459. } \
  460. } while (0)
  461. // If either reference frame is different, not INTRA, and they
  462. // are different from each other scale and add the mv to our list.
  463. #define IF_DIFF_REF_FRAME_ADD_MV_EB(mbmi, ref_frame, ref_sign_bias, \
  464. refmv_count, mv_ref_list, Done) \
  465. do { \
  466. if (is_inter_block(mbmi)) { \
  467. if ((mbmi)->ref_frame[0] != ref_frame) \
  468. ADD_MV_REF_LIST_EB(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
  469. refmv_count, mv_ref_list, Done); \
  470. if (has_second_ref(mbmi) && \
  471. (mbmi)->ref_frame[1] != ref_frame && \
  472. (mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) \
  473. ADD_MV_REF_LIST_EB(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
  474. refmv_count, mv_ref_list, Done); \
  475. } \
  476. } while (0)
  477. // This function searches the neighborhood of a given MB/SB
  478. // to try and find candidate reference vectors.
  479. static int dec_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
  480. PREDICTION_MODE mode, MV_REFERENCE_FRAME ref_frame,
  481. const POSITION *const mv_ref_search,
  482. int_mv *mv_ref_list,
  483. int mi_row, int mi_col, int block, int is_sub8x8,
  484. find_mv_refs_sync sync, void *const data) {
  485. const int *ref_sign_bias = cm->ref_frame_sign_bias;
  486. int i, refmv_count = 0;
  487. int different_ref_found = 0;
  488. const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
  489. cm->prev_frame->mvs + mi_row * cm->mi_cols + mi_col : NULL;
  490. const TileInfo *const tile = &xd->tile;
  491. // If mode is nearestmv or newmv (uses nearestmv as a reference) then stop
  492. // searching after the first mv is found.
  493. const int early_break = (mode != NEARMV);
  494. // Blank the reference vector list
  495. memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
  496. i = 0;
  497. if (is_sub8x8) {
  498. // If the size < 8x8 we get the mv from the bmi substructure for the
  499. // nearest two blocks.
  500. for (i = 0; i < 2; ++i) {
  501. const POSITION *const mv_ref = &mv_ref_search[i];
  502. if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
  503. const MODE_INFO *const candidate_mi =
  504. xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
  505. different_ref_found = 1;
  506. if (candidate_mi->ref_frame[0] == ref_frame)
  507. ADD_MV_REF_LIST_EB(
  508. get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
  509. refmv_count, mv_ref_list, Done);
  510. else if (candidate_mi->ref_frame[1] == ref_frame)
  511. ADD_MV_REF_LIST_EB(
  512. get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
  513. refmv_count, mv_ref_list, Done);
  514. }
  515. }
  516. }
  517. // Check the rest of the neighbors in much the same way
  518. // as before except we don't need to keep track of sub blocks or
  519. // mode counts.
  520. for (; i < MVREF_NEIGHBOURS; ++i) {
  521. const POSITION *const mv_ref = &mv_ref_search[i];
  522. if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
  523. const MODE_INFO *const candidate =
  524. xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
  525. different_ref_found = 1;
  526. if (candidate->ref_frame[0] == ref_frame)
  527. ADD_MV_REF_LIST_EB(candidate->mv[0], refmv_count, mv_ref_list, Done);
  528. else if (candidate->ref_frame[1] == ref_frame)
  529. ADD_MV_REF_LIST_EB(candidate->mv[1], refmv_count, mv_ref_list, Done);
  530. }
  531. }
  532. // TODO(hkuang): Remove this sync after fixing pthread_cond_broadcast
  533. // on windows platform. The sync here is unnecessary if use_prev_frame_mvs
  534. // is 0. But after removing it, there will be hang in the unit test on windows
  535. // due to several threads waiting for a thread's signal.
  536. #if defined(_WIN32) && !HAVE_PTHREAD_H
  537. if (cm->frame_parallel_decode && sync != NULL) {
  538. sync(data, mi_row);
  539. }
  540. #endif
  541. // Check the last frame's mode and mv info.
  542. if (prev_frame_mvs) {
  543. // Synchronize here for frame parallel decode if sync function is provided.
  544. if (cm->frame_parallel_decode && sync != NULL) {
  545. sync(data, mi_row);
  546. }
  547. if (prev_frame_mvs->ref_frame[0] == ref_frame) {
  548. ADD_MV_REF_LIST_EB(prev_frame_mvs->mv[0], refmv_count, mv_ref_list, Done);
  549. } else if (prev_frame_mvs->ref_frame[1] == ref_frame) {
  550. ADD_MV_REF_LIST_EB(prev_frame_mvs->mv[1], refmv_count, mv_ref_list, Done);
  551. }
  552. }
  553. // Since we couldn't find 2 mvs from the same reference frame
  554. // go back through the neighbors and find motion vectors from
  555. // different reference frames.
  556. if (different_ref_found) {
  557. for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
  558. const POSITION *mv_ref = &mv_ref_search[i];
  559. if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
  560. const MODE_INFO *const candidate =
  561. xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
  562. // If the candidate is INTRA we don't want to consider its mv.
  563. IF_DIFF_REF_FRAME_ADD_MV_EB(candidate, ref_frame, ref_sign_bias,
  564. refmv_count, mv_ref_list, Done);
  565. }
  566. }
  567. }
  568. // Since we still don't have a candidate we'll try the last frame.
  569. if (prev_frame_mvs) {
  570. if (prev_frame_mvs->ref_frame[0] != ref_frame &&
  571. prev_frame_mvs->ref_frame[0] > INTRA_FRAME) {
  572. int_mv mv = prev_frame_mvs->mv[0];
  573. if (ref_sign_bias[prev_frame_mvs->ref_frame[0]] !=
  574. ref_sign_bias[ref_frame]) {
  575. mv.as_mv.row *= -1;
  576. mv.as_mv.col *= -1;
  577. }
  578. ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done);
  579. }
  580. if (prev_frame_mvs->ref_frame[1] > INTRA_FRAME &&
  581. prev_frame_mvs->ref_frame[1] != ref_frame &&
  582. prev_frame_mvs->mv[1].as_int != prev_frame_mvs->mv[0].as_int) {
  583. int_mv mv = prev_frame_mvs->mv[1];
  584. if (ref_sign_bias[prev_frame_mvs->ref_frame[1]] !=
  585. ref_sign_bias[ref_frame]) {
  586. mv.as_mv.row *= -1;
  587. mv.as_mv.col *= -1;
  588. }
  589. ADD_MV_REF_LIST_EB(mv, refmv_count, mv_ref_list, Done);
  590. }
  591. }
  592. if (mode == NEARMV)
  593. refmv_count = MAX_MV_REF_CANDIDATES;
  594. else
  595. // we only care about the nearestmv for the remaining modes
  596. refmv_count = 1;
  597. Done:
  598. // Clamp vectors
  599. for (i = 0; i < refmv_count; ++i)
  600. clamp_mv_ref(&mv_ref_list[i].as_mv, xd);
  601. return refmv_count;
  602. }
  603. static void append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
  604. const POSITION *const mv_ref_search,
  605. PREDICTION_MODE b_mode, int block,
  606. int ref, int mi_row, int mi_col,
  607. int_mv *best_sub8x8) {
  608. int_mv mv_list[MAX_MV_REF_CANDIDATES];
  609. MODE_INFO *const mi = xd->mi[0];
  610. b_mode_info *bmi = mi->bmi;
  611. int n;
  612. int refmv_count;
  613. assert(MAX_MV_REF_CANDIDATES == 2);
  614. refmv_count = dec_find_mv_refs(cm, xd, b_mode, mi->ref_frame[ref],
  615. mv_ref_search, mv_list, mi_row, mi_col, block,
  616. 1, NULL, NULL);
  617. switch (block) {
  618. case 0:
  619. best_sub8x8->as_int = mv_list[refmv_count - 1].as_int;
  620. break;
  621. case 1:
  622. case 2:
  623. if (b_mode == NEARESTMV) {
  624. best_sub8x8->as_int = bmi[0].as_mv[ref].as_int;
  625. } else {
  626. best_sub8x8->as_int = 0;
  627. for (n = 0; n < refmv_count; ++n)
  628. if (bmi[0].as_mv[ref].as_int != mv_list[n].as_int) {
  629. best_sub8x8->as_int = mv_list[n].as_int;
  630. break;
  631. }
  632. }
  633. break;
  634. case 3:
  635. if (b_mode == NEARESTMV) {
  636. best_sub8x8->as_int = bmi[2].as_mv[ref].as_int;
  637. } else {
  638. int_mv candidates[2 + MAX_MV_REF_CANDIDATES];
  639. candidates[0] = bmi[1].as_mv[ref];
  640. candidates[1] = bmi[0].as_mv[ref];
  641. candidates[2] = mv_list[0];
  642. candidates[3] = mv_list[1];
  643. best_sub8x8->as_int = 0;
  644. for (n = 0; n < 2 + MAX_MV_REF_CANDIDATES; ++n)
  645. if (bmi[2].as_mv[ref].as_int != candidates[n].as_int) {
  646. best_sub8x8->as_int = candidates[n].as_int;
  647. break;
  648. }
  649. }
  650. break;
  651. default:
  652. assert(0 && "Invalid block index.");
  653. }
  654. }
  655. static uint8_t get_mode_context(const VP9_COMMON *cm, const MACROBLOCKD *xd,
  656. const POSITION *const mv_ref_search,
  657. int mi_row, int mi_col) {
  658. int i;
  659. int context_counter = 0;
  660. const TileInfo *const tile = &xd->tile;
  661. // Get mode count from nearest 2 blocks
  662. for (i = 0; i < 2; ++i) {
  663. const POSITION *const mv_ref = &mv_ref_search[i];
  664. if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
  665. const MODE_INFO *const candidate =
  666. xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
  667. // Keep counts for entropy encoding.
  668. context_counter += mode_2_counter[candidate->mode];
  669. }
  670. }
  671. return counter_to_context[context_counter];
  672. }
  673. static void read_inter_block_mode_info(VP9Decoder *const pbi,
  674. MACROBLOCKD *const xd,
  675. MODE_INFO *const mi,
  676. int mi_row, int mi_col, vpx_reader *r) {
  677. VP9_COMMON *const cm = &pbi->common;
  678. const BLOCK_SIZE bsize = mi->sb_type;
  679. const int allow_hp = cm->allow_high_precision_mv;
  680. int_mv best_ref_mvs[2];
  681. int ref, is_compound;
  682. uint8_t inter_mode_ctx;
  683. const POSITION *const mv_ref_search = mv_ref_blocks[bsize];
  684. read_ref_frames(cm, xd, r, mi->segment_id, mi->ref_frame);
  685. is_compound = has_second_ref(mi);
  686. inter_mode_ctx = get_mode_context(cm, xd, mv_ref_search, mi_row, mi_col);
  687. if (segfeature_active(&cm->seg, mi->segment_id, SEG_LVL_SKIP)) {
  688. mi->mode = ZEROMV;
  689. if (bsize < BLOCK_8X8) {
  690. vpx_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
  691. "Invalid usage of segement feature on small blocks");
  692. return;
  693. }
  694. } else {
  695. if (bsize >= BLOCK_8X8)
  696. mi->mode = read_inter_mode(cm, xd, r, inter_mode_ctx);
  697. else
  698. // Sub 8x8 blocks use the nearestmv as a ref_mv if the b_mode is NEWMV.
  699. // Setting mode to NEARESTMV forces the search to stop after the nearestmv
  700. // has been found. After b_modes have been read, mode will be overwritten
  701. // by the last b_mode.
  702. mi->mode = NEARESTMV;
  703. if (mi->mode != ZEROMV) {
  704. for (ref = 0; ref < 1 + is_compound; ++ref) {
  705. int_mv tmp_mvs[MAX_MV_REF_CANDIDATES];
  706. const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
  707. int refmv_count;
  708. refmv_count = dec_find_mv_refs(cm, xd, mi->mode, frame, mv_ref_search,
  709. tmp_mvs, mi_row, mi_col, -1, 0,
  710. fpm_sync, (void *)pbi);
  711. dec_find_best_ref_mvs(allow_hp, tmp_mvs, &best_ref_mvs[ref],
  712. refmv_count);
  713. }
  714. }
  715. }
  716. mi->interp_filter = (cm->interp_filter == SWITCHABLE)
  717. ? read_switchable_interp_filter(cm, xd, r)
  718. : cm->interp_filter;
  719. if (bsize < BLOCK_8X8) {
  720. const int num_4x4_w = 1 << xd->bmode_blocks_wl;
  721. const int num_4x4_h = 1 << xd->bmode_blocks_hl;
  722. int idx, idy;
  723. PREDICTION_MODE b_mode;
  724. int_mv best_sub8x8[2];
  725. for (idy = 0; idy < 2; idy += num_4x4_h) {
  726. for (idx = 0; idx < 2; idx += num_4x4_w) {
  727. const int j = idy * 2 + idx;
  728. b_mode = read_inter_mode(cm, xd, r, inter_mode_ctx);
  729. if (b_mode == NEARESTMV || b_mode == NEARMV) {
  730. for (ref = 0; ref < 1 + is_compound; ++ref)
  731. append_sub8x8_mvs_for_idx(cm, xd, mv_ref_search, b_mode, j, ref,
  732. mi_row, mi_col, &best_sub8x8[ref]);
  733. }
  734. if (!assign_mv(cm, xd, b_mode, mi->bmi[j].as_mv, best_ref_mvs,
  735. best_sub8x8, is_compound, allow_hp, r)) {
  736. xd->corrupted |= 1;
  737. break;
  738. }
  739. if (num_4x4_h == 2)
  740. mi->bmi[j + 2] = mi->bmi[j];
  741. if (num_4x4_w == 2)
  742. mi->bmi[j + 1] = mi->bmi[j];
  743. }
  744. }
  745. mi->mode = b_mode;
  746. copy_mv_pair(mi->mv, mi->bmi[3].as_mv);
  747. } else {
  748. xd->corrupted |= !assign_mv(cm, xd, mi->mode, mi->mv, best_ref_mvs,
  749. best_ref_mvs, is_compound, allow_hp, r);
  750. }
  751. }
  752. static void read_inter_frame_mode_info(VP9Decoder *const pbi,
  753. MACROBLOCKD *const xd,
  754. int mi_row, int mi_col, vpx_reader *r,
  755. int x_mis, int y_mis) {
  756. VP9_COMMON *const cm = &pbi->common;
  757. MODE_INFO *const mi = xd->mi[0];
  758. int inter_block;
  759. mi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r, x_mis,
  760. y_mis);
  761. mi->skip = read_skip(cm, xd, mi->segment_id, r);
  762. inter_block = read_is_inter_block(cm, xd, mi->segment_id, r);
  763. mi->tx_size = read_tx_size(cm, xd, !mi->skip || !inter_block, r);
  764. if (inter_block)
  765. read_inter_block_mode_info(pbi, xd, mi, mi_row, mi_col, r);
  766. else
  767. read_intra_block_mode_info(cm, xd, mi, r);
  768. }
  769. static INLINE void copy_ref_frame_pair(MV_REFERENCE_FRAME *dst,
  770. const MV_REFERENCE_FRAME *src) {
  771. memcpy(dst, src, sizeof(*dst) * 2);
  772. }
  773. void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
  774. int mi_row, int mi_col, vpx_reader *r,
  775. int x_mis, int y_mis) {
  776. VP9_COMMON *const cm = &pbi->common;
  777. MODE_INFO *const mi = xd->mi[0];
  778. MV_REF* frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  779. int w, h;
  780. if (frame_is_intra_only(cm)) {
  781. read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r, x_mis, y_mis);
  782. } else {
  783. read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
  784. for (h = 0; h < y_mis; ++h) {
  785. for (w = 0; w < x_mis; ++w) {
  786. MV_REF *const mv = frame_mvs + w;
  787. copy_ref_frame_pair(mv->ref_frame, mi->ref_frame);
  788. copy_mv_pair(mv->mv, mi->mv);
  789. }
  790. frame_mvs += cm->mi_cols;
  791. }
  792. }
  793. #if CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
  794. if ((xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) &&
  795. (xd->above_mi == NULL || xd->left_mi == NULL) &&
  796. !is_inter_block(mi) && need_top_left[mi->uv_mode])
  797. assert(0);
  798. #endif // CONFIG_BETTER_HW_COMPATIBILITY && CONFIG_VP9_HIGHBITDEPTH
  799. }