| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788 |
- /*
- * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
- #include "./vpx_dsp_rtcd.h"
- #include "vpx_ports/mem.h"
- #include "vpx_dsp/mips/macros_msa.h"
- #include "vpx_dsp/variance.h"
- static const uint8_t bilinear_filters_msa[8][2] = {
- { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
- { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 },
- };
- #define CALC_MSE_AVG_B(src, ref, var, sub) \
- { \
- v16u8 src_l0_m, src_l1_m; \
- v8i16 res_l0_m, res_l1_m; \
- \
- ILVRL_B2_UB(src, ref, src_l0_m, src_l1_m); \
- HSUB_UB2_SH(src_l0_m, src_l1_m, res_l0_m, res_l1_m); \
- DPADD_SH2_SW(res_l0_m, res_l1_m, res_l0_m, res_l1_m, var, var); \
- \
- sub += res_l0_m + res_l1_m; \
- }
- #define VARIANCE_WxH(sse, diff, shift) sse - (((uint32_t)diff * diff) >> shift)
- #define VARIANCE_LARGE_WxH(sse, diff, shift) \
- sse - (((int64_t)diff * diff) >> shift)
- static uint32_t avg_sse_diff_4width_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred, int32_t height,
- int32_t *diff) {
- int32_t ht_cnt;
- uint32_t src0, src1, src2, src3;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 pred, src = { 0 };
- v16u8 ref = { 0 };
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = (height >> 2); ht_cnt--;) {
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- LW4(src_ptr, src_stride, src0, src1, src2, src3);
- src_ptr += (4 * src_stride);
- LW4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
- ref_ptr += (4 * ref_stride);
- INSERT_W4_UB(src0, src1, src2, src3, src);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- src = __msa_aver_u_b(src, pred);
- CALC_MSE_AVG_B(src, ref, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_8width_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred, int32_t height,
- int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src0, src1, src2, src3;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = (height >> 2); ht_cnt--;) {
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB4(src_ptr, src_stride, src0, src1, src2, src3);
- src_ptr += (4 * src_stride);
- LD_UB4(ref_ptr, ref_stride, ref0, ref1, ref2, ref3);
- ref_ptr += (4 * ref_stride);
- PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, src0, src1,
- ref0, ref1);
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_16width_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred,
- int32_t height, int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src, ref, pred;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = (height >> 2); ht_cnt--;) {
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- src = LD_UB(src_ptr);
- src_ptr += src_stride;
- ref = LD_UB(ref_ptr);
- ref_ptr += ref_stride;
- src = __msa_aver_u_b(src, pred);
- CALC_MSE_AVG_B(src, ref, var, avg);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- src = LD_UB(src_ptr);
- src_ptr += src_stride;
- ref = LD_UB(ref_ptr);
- ref_ptr += ref_stride;
- src = __msa_aver_u_b(src, pred);
- CALC_MSE_AVG_B(src, ref, var, avg);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- src = LD_UB(src_ptr);
- src_ptr += src_stride;
- ref = LD_UB(ref_ptr);
- ref_ptr += ref_stride;
- src = __msa_aver_u_b(src, pred);
- CALC_MSE_AVG_B(src, ref, var, avg);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- src = LD_UB(src_ptr);
- src_ptr += src_stride;
- ref = LD_UB(ref_ptr);
- ref_ptr += ref_stride;
- src = __msa_aver_u_b(src, pred);
- CALC_MSE_AVG_B(src, ref, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_32width_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred,
- int32_t height, int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src0, src1, ref0, ref1, pred0, pred1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = (height >> 2); ht_cnt--;) {
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_32x64_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred, int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src0, src1, ref0, ref1, pred0, pred1;
- v8i16 avg0 = { 0 };
- v8i16 avg1 = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = 16; ht_cnt--;) {
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB2(src_ptr, 16, src0, src1);
- src_ptr += src_stride;
- LD_UB2(ref_ptr, 16, ref0, ref1);
- ref_ptr += ref_stride;
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- }
- vec = __msa_hadd_s_w(avg0, avg0);
- vec += __msa_hadd_s_w(avg1, avg1);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_64x32_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred, int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src0, src1, src2, src3;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1, pred2, pred3;
- v8i16 avg0 = { 0 };
- v8i16 avg1 = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = 16; ht_cnt--;) {
- LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
- sec_pred += 64;
- LD_UB4(src_ptr, 16, src0, src1, src2, src3);
- src_ptr += src_stride;
- LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
- ref_ptr += ref_stride;
- AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
- src2, src3);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src2, ref2, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- CALC_MSE_AVG_B(src3, ref3, var, avg1);
- LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
- sec_pred += 64;
- LD_UB4(src_ptr, 16, src0, src1, src2, src3);
- src_ptr += src_stride;
- LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
- ref_ptr += ref_stride;
- AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
- src2, src3);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src2, ref2, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- CALC_MSE_AVG_B(src3, ref3, var, avg1);
- }
- vec = __msa_hadd_s_w(avg0, avg0);
- vec += __msa_hadd_s_w(avg1, avg1);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t avg_sse_diff_64x64_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- const uint8_t *ref_ptr,
- int32_t ref_stride,
- const uint8_t *sec_pred, int32_t *diff) {
- int32_t ht_cnt;
- v16u8 src0, src1, src2, src3;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1, pred2, pred3;
- v8i16 avg0 = { 0 };
- v8i16 avg1 = { 0 };
- v8i16 avg2 = { 0 };
- v8i16 avg3 = { 0 };
- v4i32 vec, var = { 0 };
- for (ht_cnt = 32; ht_cnt--;) {
- LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
- sec_pred += 64;
- LD_UB4(src_ptr, 16, src0, src1, src2, src3);
- src_ptr += src_stride;
- LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
- ref_ptr += ref_stride;
- AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
- src2, src3);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- CALC_MSE_AVG_B(src2, ref2, var, avg2);
- CALC_MSE_AVG_B(src3, ref3, var, avg3);
- LD_UB4(sec_pred, 16, pred0, pred1, pred2, pred3);
- sec_pred += 64;
- LD_UB4(src_ptr, 16, src0, src1, src2, src3);
- src_ptr += src_stride;
- LD_UB4(ref_ptr, 16, ref0, ref1, ref2, ref3);
- ref_ptr += ref_stride;
- AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, src0, src1,
- src2, src3);
- CALC_MSE_AVG_B(src0, ref0, var, avg0);
- CALC_MSE_AVG_B(src1, ref1, var, avg1);
- CALC_MSE_AVG_B(src2, ref2, var, avg2);
- CALC_MSE_AVG_B(src3, ref3, var, avg3);
- }
- vec = __msa_hadd_s_w(avg0, avg0);
- vec += __msa_hadd_s_w(avg1, avg1);
- vec += __msa_hadd_s_w(avg2, avg2);
- vec += __msa_hadd_s_w(avg3, avg3);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_4width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 filt0, ref = { 0 };
- v16i8 src0, src1, src2, src3;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 vec0, vec1, vec2, vec3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src1, src2, src3);
- src += (4 * src_stride);
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
- vec2, vec3);
- SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
- PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
- src2, src3);
- ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
- src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
- CALC_MSE_AVG_B(src0, ref, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_8width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 filt0, out, ref0, ref1, ref2, ref3;
- v16i8 src0, src1, src2, src3;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 vec0, vec1, vec2, vec3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src1, src2, src3);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
- vec2, vec3);
- SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
- PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
- src2, src3);
- out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0);
- CALC_MSE_AVG_B(out, ref0, var, avg);
- out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2);
- CALC_MSE_AVG_B(out, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_16width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v16u8 dst0, dst1, dst2, dst3, filt0;
- v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src2, src4, src6);
- LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
- dst += (4 * dst_stride);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5);
- VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
- out2, out3);
- DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
- out6, out7);
- SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
- SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
- PCKEV_B4_SB(out1, out0, out3, out2, out5, out4, out7, out6, src0, src1,
- src2, src3);
- CALC_MSE_AVG_B(src0, dst0, var, avg);
- CALC_MSE_AVG_B(src1, dst1, var, avg);
- CALC_MSE_AVG_B(src2, dst2, var, avg);
- CALC_MSE_AVG_B(src3, dst3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_32width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_h_msa(src, src_stride, dst, dst_stride,
- filter, height, &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_sse_diff_64width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_h_msa(src, src_stride, dst, dst_stride,
- filter, height, &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- static uint32_t sub_pixel_sse_diff_4width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4, out;
- v16u8 src10_r, src32_r, src21_r, src43_r;
- v16u8 ref = { 0 };
- v16u8 src2110, src4332;
- v16u8 filt0;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- v8u16 tmp0, tmp1;
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
- src32_r, src43_r);
- ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
- DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- CALC_MSE_AVG_B(out, ref, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_8width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 ref0, ref1, ref2, ref3;
- v8u16 vec0, vec1, vec2, vec3;
- v8u16 tmp0, tmp1, tmp2, tmp3;
- v16u8 filt0;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
- vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
- tmp2, tmp3);
- SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_16width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 out0, out1, out2, out3;
- v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 tmp0, tmp1, tmp2, tmp3;
- v16u8 filt0;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2);
- ILVL_B2_UB(src1, src0, src2, src1, vec1, vec3);
- DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- ILVR_B2_UB(src3, src2, src4, src3, vec4, vec6);
- ILVL_B2_UB(src3, src2, src4, src3, vec5, vec7);
- DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- out1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
- DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- out3 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
- src0 = src4;
- CALC_MSE_AVG_B(out0, ref0, var, avg);
- CALC_MSE_AVG_B(out1, ref1, var, avg);
- CALC_MSE_AVG_B(out2, ref2, var, avg);
- CALC_MSE_AVG_B(out3, ref3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_32width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_v_msa(src, src_stride, dst, dst_stride,
- filter, height, &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_sse_diff_64width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_v_msa(src, src_stride, dst, dst_stride,
- filter, height, &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- static uint32_t sub_pixel_sse_diff_4width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 out, ref = { 0 };
- v16u8 filt_vt, filt_hz, vec0, vec1;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
- v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4;
- v8u16 tmp0, tmp1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, FILTER_BITS);
- hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
- hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- CALC_MSE_AVG_B(out, ref, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_8width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 out0, out1;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 hz_out0, hz_out1;
- v8u16 tmp0, tmp1, tmp2, tmp3;
- v16u8 filt_vt, filt_hz, vec0;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
- tmp0 = __msa_dotp_u_h(vec0, filt_vt);
- hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
- tmp1 = __msa_dotp_u_h(vec0, filt_vt);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
- tmp2 = __msa_dotp_u_h(vec0, filt_vt);
- hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
- tmp3 = __msa_dotp_u_h(vec0, filt_vt);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- CALC_MSE_AVG_B(out0, ref0, var, avg);
- CALC_MSE_AVG_B(out1, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_16width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 filt_hz, filt_vt, vec0, vec1;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 hz_out0, hz_out1, hz_out2, hz_out3;
- v8u16 tmp0, tmp1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- LD_UB2(src, 8, src0, src1);
- src += src_stride;
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src0, src2, src4, src6);
- LD_UB4(src + 8, src_stride, src1, src3, src5, src7);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- src0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- src1 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- src2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- src3 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- CALC_MSE_AVG_B(src2, ref2, var, avg);
- CALC_MSE_AVG_B(src3, ref3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_sse_diff_32width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_hv_msa(src, src_stride, dst, dst_stride,
- filter_horiz, filter_vert, height,
- &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_sse_diff_64width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *filter_horiz, const uint8_t *filter_vert,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse += sub_pixel_sse_diff_16width_hv_msa(src, src_stride, dst, dst_stride,
- filter_horiz, filter_vert, height,
- &diff0[loop_cnt]);
- src += 16;
- dst += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_4width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 out, pred, filt0, ref = { 0 };
- v16i8 src0, src1, src2, src3;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 vec0, vec1, vec2, vec3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src1, src2, src3);
- src += (4 * src_stride);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
- vec2, vec3);
- SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
- PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
- src2, src3);
- ILVEV_W2_SB(src0, src1, src2, src3, src0, src2);
- out = (v16u8)__msa_ilvev_d((v2i64)src2, (v2i64)src0);
- out = __msa_aver_u_b(out, pred);
- CALC_MSE_AVG_B(out, ref, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_8width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 out, pred, filt0;
- v16u8 ref0, ref1, ref2, ref3;
- v16i8 src0, src1, src2, src3;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 vec0, vec1, vec2, vec3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src1, src2, src3);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
- vec2, vec3);
- SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
- PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, src0, src1,
- src2, src3);
- out = (v16u8)__msa_ilvev_d((v2i64)src1, (v2i64)src0);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- out = __msa_aver_u_b(out, pred);
- CALC_MSE_AVG_B(out, ref0, var, avg);
- out = (v16u8)__msa_ilvev_d((v2i64)src3, (v2i64)src2);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- out = __msa_aver_u_b(out, pred);
- CALC_MSE_AVG_B(out, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t subpel_avg_ssediff_16w_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff, int32_t width) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16i8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v16u8 dst0, dst1, dst2, dst3;
- v16u8 tmp0, tmp1, tmp2, tmp3;
- v16u8 pred0, pred1, pred2, pred3, filt0;
- v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 out0, out1, out2, out3, out4, out5, out6, out7;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_SB4(src, src_stride, src0, src2, src4, src6);
- LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
- src += (4 * src_stride);
- LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
- dst += (4 * dst_stride);
- LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3);
- sec_pred += (4 * width);
- VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
- VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
- VSHF_B2_UH(src4, src4, src5, src5, mask, mask, vec4, vec5);
- VSHF_B2_UH(src6, src6, src7, src7, mask, mask, vec6, vec7);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, out1,
- out2, out3);
- DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, out4, out5,
- out6, out7);
- SRARI_H4_UH(out0, out1, out2, out3, FILTER_BITS);
- SRARI_H4_UH(out4, out5, out6, out7, FILTER_BITS);
- PCKEV_B4_UB(out1, out0, out3, out2, out5, out4, out7, out6, tmp0, tmp1,
- tmp2, tmp3);
- AVER_UB4_UB(tmp0, pred0, tmp1, pred1, tmp2, pred2, tmp3, pred3, tmp0, tmp1,
- tmp2, tmp3);
- CALC_MSE_AVG_B(tmp0, dst0, var, avg);
- CALC_MSE_AVG_B(tmp1, dst1, var, avg);
- CALC_MSE_AVG_B(tmp2, dst2, var, avg);
- CALC_MSE_AVG_B(tmp3, dst3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_16width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- return subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride,
- sec_pred, filter, height, diff, 16);
- }
- static uint32_t sub_pixel_avg_sse_diff_32width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse +=
- subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred,
- filter, height, &diff0[loop_cnt], 32);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_64width_h_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse +=
- subpel_avg_ssediff_16w_h_msa(src, src_stride, dst, dst_stride, sec_pred,
- filter, height, &diff0[loop_cnt], 64);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_4width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 src10_r, src32_r, src21_r, src43_r;
- v16u8 out, pred, ref = { 0 };
- v16u8 src2110, src4332, filt0;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- v8u16 tmp0, tmp1;
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src10_r, src21_r,
- src32_r, src43_r);
- ILVR_D2_UB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
- DOTP_UB2_UH(src2110, src4332, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- out = __msa_aver_u_b(out, pred);
- CALC_MSE_AVG_B(out, ref, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_8width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1, filt0;
- v8u16 vec0, vec1, vec2, vec3;
- v8u16 tmp0, tmp1, tmp2, tmp3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- ILVR_B4_UH(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, vec2,
- vec3);
- DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1,
- tmp2, tmp3);
- SRARI_H4_UH(tmp0, tmp1, tmp2, tmp3, FILTER_BITS);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
- AVER_UB2_UB(src0, pred0, src1, pred1, src0, src1);
- CALC_MSE_AVG_B(src0, ref0, var, avg);
- CALC_MSE_AVG_B(src1, ref1, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t subpel_avg_ssediff_16w_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff, int32_t width) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1, pred2, pred3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 out0, out1, out2, out3, filt0;
- v8u16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
- v8u16 tmp0, tmp1, tmp2, tmp3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter);
- filt0 = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3);
- sec_pred += (4 * width);
- ILVR_B2_UH(src1, src0, src2, src1, vec0, vec2);
- ILVL_B2_UH(src1, src0, src2, src1, vec1, vec3);
- DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- ILVR_B2_UH(src3, src2, src4, src3, vec4, vec6);
- ILVL_B2_UH(src3, src2, src4, src3, vec5, vec7);
- DOTP_UB2_UH(vec2, vec3, filt0, filt0, tmp2, tmp3);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- out1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
- DOTP_UB2_UH(vec4, vec5, filt0, filt0, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- DOTP_UB2_UH(vec6, vec7, filt0, filt0, tmp2, tmp3);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- out3 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2);
- src0 = src4;
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1,
- out2, out3);
- CALC_MSE_AVG_B(out0, ref0, var, avg);
- CALC_MSE_AVG_B(out1, ref1, var, avg);
- CALC_MSE_AVG_B(out2, ref2, var, avg);
- CALC_MSE_AVG_B(out3, ref3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_16width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- return subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride,
- sec_pred, filter, height, diff, 16);
- }
- static uint32_t sub_pixel_avg_sse_diff_32width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse +=
- subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred,
- filter, height, &diff0[loop_cnt], 32);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_64width_v_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter,
- int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse +=
- subpel_avg_ssediff_16w_v_msa(src, src_stride, dst, dst_stride, sec_pred,
- filter, height, &diff0[loop_cnt], 64);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_4width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- uint32_t ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20 };
- v16u8 filt_hz, filt_vt, vec0, vec1;
- v16u8 out, pred, ref = { 0 };
- v8u16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, tmp0, tmp1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- pred = LD_UB(sec_pred);
- sec_pred += 16;
- LW4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- INSERT_W4_UB(ref0, ref1, ref2, ref3, ref);
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src1, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src2, src3, mask, filt_hz, FILTER_BITS);
- hz_out4 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- hz_out1 = (v8u16)__msa_sldi_b((v16i8)hz_out2, (v16i8)hz_out0, 8);
- hz_out3 = (v8u16)__msa_pckod_d((v2i64)hz_out4, (v2i64)hz_out2);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- out = __msa_aver_u_b(out, pred);
- CALC_MSE_AVG_B(out, ref, var, avg);
- src0 = src4;
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_8width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 src0, src1, src2, src3, src4;
- v16u8 pred0, pred1, out0, out1;
- v16u8 filt_hz, filt_vt, vec0;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 hz_out0, hz_out1, tmp0, tmp1, tmp2, tmp3;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- src0 = LD_UB(src);
- src += src_stride;
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src1, src2, src3, src4);
- src += (4 * src_stride);
- LD_UB2(sec_pred, 16, pred0, pred1);
- sec_pred += 32;
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- PCKEV_D2_UB(ref1, ref0, ref3, ref2, ref0, ref1);
- hz_out1 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
- tmp0 = __msa_dotp_u_h(vec0, filt_vt);
- hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
- tmp1 = __msa_dotp_u_h(vec0, filt_vt);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- hz_out1 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0);
- tmp2 = __msa_dotp_u_h(vec0, filt_vt);
- hz_out0 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- vec0 = (v16u8)__msa_ilvev_b((v16i8)hz_out0, (v16i8)hz_out1);
- tmp3 = __msa_dotp_u_h(vec0, filt_vt);
- SRARI_H2_UH(tmp2, tmp3, FILTER_BITS);
- PCKEV_B2_UB(tmp1, tmp0, tmp3, tmp2, out0, out1);
- AVER_UB2_UB(out0, pred0, out1, pred1, out0, out1);
- CALC_MSE_AVG_B(out0, ref0, var, avg);
- CALC_MSE_AVG_B(out1, ref1, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t subpel_avg_ssediff_16w_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff, int32_t width) {
- int16_t filtval;
- uint32_t loop_cnt;
- v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
- v16u8 ref0, ref1, ref2, ref3;
- v16u8 pred0, pred1, pred2, pred3;
- v16u8 out0, out1, out2, out3;
- v16u8 filt_hz, filt_vt, vec0, vec1;
- v16u8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 };
- v8u16 hz_out0, hz_out1, hz_out2, hz_out3, tmp0, tmp1;
- v8i16 avg = { 0 };
- v4i32 vec, var = { 0 };
- filtval = LH(filter_horiz);
- filt_hz = (v16u8)__msa_fill_h(filtval);
- filtval = LH(filter_vert);
- filt_vt = (v16u8)__msa_fill_h(filtval);
- LD_UB2(src, 8, src0, src1);
- src += src_stride;
- hz_out0 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- for (loop_cnt = (height >> 2); loop_cnt--;) {
- LD_UB4(src, src_stride, src0, src2, src4, src6);
- LD_UB4(src + 8, src_stride, src1, src3, src5, src7);
- src += (4 * src_stride);
- LD_UB4(sec_pred, width, pred0, pred1, pred2, pred3);
- sec_pred += (4 * width);
- hz_out1 = HORIZ_2TAP_FILT_UH(src0, src0, mask, filt_hz, FILTER_BITS);
- hz_out3 = HORIZ_2TAP_FILT_UH(src1, src1, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out0 = HORIZ_2TAP_FILT_UH(src2, src2, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src3, src3, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out1 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out1 = HORIZ_2TAP_FILT_UH(src4, src4, mask, filt_hz, FILTER_BITS);
- hz_out3 = HORIZ_2TAP_FILT_UH(src5, src5, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out2 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- hz_out0 = HORIZ_2TAP_FILT_UH(src6, src6, mask, filt_hz, FILTER_BITS);
- hz_out2 = HORIZ_2TAP_FILT_UH(src7, src7, mask, filt_hz, FILTER_BITS);
- ILVEV_B2_UB(hz_out1, hz_out0, hz_out3, hz_out2, vec0, vec1);
- DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1);
- SRARI_H2_UH(tmp0, tmp1, FILTER_BITS);
- out3 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0);
- LD_UB4(dst, dst_stride, ref0, ref1, ref2, ref3);
- dst += (4 * dst_stride);
- AVER_UB4_UB(out0, pred0, out1, pred1, out2, pred2, out3, pred3, out0, out1,
- out2, out3);
- CALC_MSE_AVG_B(out0, ref0, var, avg);
- CALC_MSE_AVG_B(out1, ref1, var, avg);
- CALC_MSE_AVG_B(out2, ref2, var, avg);
- CALC_MSE_AVG_B(out3, ref3, var, avg);
- }
- vec = __msa_hadd_s_w(avg, avg);
- *diff = HADD_SW_S32(vec);
- return HADD_SW_S32(var);
- }
- static uint32_t sub_pixel_avg_sse_diff_16width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff) {
- return subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride,
- sec_pred, filter_horiz, filter_vert,
- height, diff, 16);
- }
- static uint32_t sub_pixel_avg_sse_diff_32width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[2];
- for (loop_cnt = 0; loop_cnt < 2; ++loop_cnt) {
- sse += subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride,
- sec_pred, filter_horiz, filter_vert,
- height, &diff0[loop_cnt], 32);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1];
- return sse;
- }
- static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa(
- const uint8_t *src, int32_t src_stride, const uint8_t *dst,
- int32_t dst_stride, const uint8_t *sec_pred, const uint8_t *filter_horiz,
- const uint8_t *filter_vert, int32_t height, int32_t *diff) {
- uint32_t loop_cnt, sse = 0;
- int32_t diff0[4];
- for (loop_cnt = 0; loop_cnt < 4; ++loop_cnt) {
- sse += subpel_avg_ssediff_16w_hv_msa(src, src_stride, dst, dst_stride,
- sec_pred, filter_horiz, filter_vert,
- height, &diff0[loop_cnt], 64);
- src += 16;
- dst += 16;
- sec_pred += 16;
- }
- *diff = diff0[0] + diff0[1] + diff0[2] + diff0[3];
- return sse;
- }
- #define VARIANCE_4Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 4);
- #define VARIANCE_4Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 5);
- #define VARIANCE_8Wx4H(sse, diff) VARIANCE_WxH(sse, diff, 5);
- #define VARIANCE_8Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 6);
- #define VARIANCE_8Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 7);
- #define VARIANCE_16Wx8H(sse, diff) VARIANCE_WxH(sse, diff, 7);
- #define VARIANCE_16Wx16H(sse, diff) VARIANCE_WxH(sse, diff, 8);
- #define VARIANCE_16Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
- #define VARIANCE_32Wx16H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 9);
- #define VARIANCE_32Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 10);
- #define VARIANCE_32Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
- #define VARIANCE_64Wx32H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 11);
- #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12);
- #define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \
- uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa( \
- const uint8_t *src, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref, int32_t ref_stride, \
- uint32_t *sse) { \
- int32_t diff; \
- uint32_t var; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
- \
- if (yoffset) { \
- if (xoffset) { \
- *sse = sub_pixel_sse_diff_##wd##width_hv_msa( \
- src, src_stride, ref, ref_stride, h_filter, v_filter, ht, &diff); \
- } else { \
- *sse = sub_pixel_sse_diff_##wd##width_v_msa( \
- src, src_stride, ref, ref_stride, v_filter, ht, &diff); \
- } \
- \
- var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
- } else { \
- if (xoffset) { \
- *sse = sub_pixel_sse_diff_##wd##width_h_msa( \
- src, src_stride, ref, ref_stride, h_filter, ht, &diff); \
- \
- var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \
- } else { \
- var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \
- sse); \
- } \
- } \
- \
- return var; \
- }
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 4);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(4, 8);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 4);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 8);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(8, 16);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 8);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 16);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(16, 32);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 16);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 32);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(32, 64);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 32);
- VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64);
- #define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \
- uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \
- const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
- uint32_t *sse, const uint8_t *sec_pred) { \
- int32_t diff; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
- \
- if (yoffset) { \
- if (xoffset) { \
- *sse = sub_pixel_avg_sse_diff_##wd##width_hv_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \
- v_filter, ht, &diff); \
- } else { \
- *sse = sub_pixel_avg_sse_diff_##wd##width_v_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \
- &diff); \
- } \
- } else { \
- if (xoffset) { \
- *sse = sub_pixel_avg_sse_diff_##wd##width_h_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
- &diff); \
- } else { \
- *sse = avg_sse_diff_##wd##width_msa(src_ptr, src_stride, ref_ptr, \
- ref_stride, sec_pred, ht, &diff); \
- } \
- } \
- \
- return VARIANCE_##wd##Wx##ht##H(*sse, diff); \
- }
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 4);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(4, 8);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 4);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 8);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(8, 16);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 8);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 16);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(16, 32);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16);
- VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32);
- uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr,
- int32_t src_stride,
- int32_t xoffset, int32_t yoffset,
- const uint8_t *ref_ptr,
- int32_t ref_stride, uint32_t *sse,
- const uint8_t *sec_pred) {
- int32_t diff;
- const uint8_t *h_filter = bilinear_filters_msa[xoffset];
- const uint8_t *v_filter = bilinear_filters_msa[yoffset];
- if (yoffset) {
- if (xoffset) {
- *sse = sub_pixel_avg_sse_diff_32width_hv_msa(
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter,
- v_filter, 64, &diff);
- } else {
- *sse = sub_pixel_avg_sse_diff_32width_v_msa(src_ptr, src_stride, ref_ptr,
- ref_stride, sec_pred,
- v_filter, 64, &diff);
- }
- } else {
- if (xoffset) {
- *sse = sub_pixel_avg_sse_diff_32width_h_msa(src_ptr, src_stride, ref_ptr,
- ref_stride, sec_pred,
- h_filter, 64, &diff);
- } else {
- *sse = avg_sse_diff_32x64_msa(src_ptr, src_stride, ref_ptr, ref_stride,
- sec_pred, &diff);
- }
- }
- return VARIANCE_32Wx64H(*sse, diff);
- }
- #define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \
- uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa( \
- const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \
- int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \
- uint32_t *sse, const uint8_t *sec_pred) { \
- int32_t diff; \
- const uint8_t *h_filter = bilinear_filters_msa[xoffset]; \
- const uint8_t *v_filter = bilinear_filters_msa[yoffset]; \
- \
- if (yoffset) { \
- if (xoffset) { \
- *sse = sub_pixel_avg_sse_diff_64width_hv_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, \
- v_filter, ht, &diff); \
- } else { \
- *sse = sub_pixel_avg_sse_diff_64width_v_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, v_filter, ht, \
- &diff); \
- } \
- } else { \
- if (xoffset) { \
- *sse = sub_pixel_avg_sse_diff_64width_h_msa( \
- src_ptr, src_stride, ref_ptr, ref_stride, sec_pred, h_filter, ht, \
- &diff); \
- } else { \
- *sse = avg_sse_diff_64x##ht##_msa(src_ptr, src_stride, ref_ptr, \
- ref_stride, sec_pred, &diff); \
- } \
- } \
- \
- return VARIANCE_64Wx##ht##H(*sse, diff); \
- }
- VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(32);
- VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(64);
|