| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729 |
- /*
- * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
- #include <arm_neon.h>
- #include <string.h>
- #include "./vpx_config.h"
- #include "vpx_dsp/arm/mem_neon.h"
- #include "vpx_ports/mem.h"
- static const int8_t vp8_sub_pel_filters[8][8] = {
- { 0, 0, 128, 0, 0, 0, 0, 0 }, /* note that 1/8 pel positionyys are */
- { 0, -6, 123, 12, -1, 0, 0, 0 }, /* just as per alpha -0.5 bicubic */
- { 2, -11, 108, 36, -8, 1, 0, 0 }, /* New 1/4 pel 6 tap filter */
- { 0, -9, 93, 50, -6, 0, 0, 0 },
- { 3, -16, 77, 77, -16, 3, 0, 0 }, /* New 1/2 pel 6 tap filter */
- { 0, -6, 50, 93, -9, 0, 0, 0 },
- { 1, -8, 36, 108, -11, 2, 0, 0 }, /* New 1/4 pel 6 tap filter */
- { 0, -1, 12, 123, -6, 0, 0, 0 },
- };
- // This table is derived from vp8/common/filter.c:vp8_sub_pel_filters.
- // Apply abs() to all the values. Elements 0, 2, 3, and 5 are always positive.
- // Elements 1 and 4 are either 0 or negative. The code accounts for this with
- // multiply/accumulates which either add or subtract as needed. The other
- // functions will be updated to use this table later.
- // It is also expanded to 8 elements to allow loading into 64 bit neon
- // registers.
- static const uint8_t abs_filters[8][8] = {
- { 0, 0, 128, 0, 0, 0, 0, 0 }, { 0, 6, 123, 12, 1, 0, 0, 0 },
- { 2, 11, 108, 36, 8, 1, 0, 0 }, { 0, 9, 93, 50, 6, 0, 0, 0 },
- { 3, 16, 77, 77, 16, 3, 0, 0 }, { 0, 6, 50, 93, 9, 0, 0, 0 },
- { 1, 8, 36, 108, 11, 2, 0, 0 }, { 0, 1, 12, 123, 6, 0, 0, 0 },
- };
- static INLINE uint8x8_t load_and_shift(const unsigned char *a) {
- return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
- }
- static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b,
- const uint8x8_t filter, uint16x8_t *c,
- uint16x8_t *d) {
- const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)),
- vreinterpret_u32_u8(vget_high_u8(a)));
- const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)),
- vreinterpret_u32_u8(vget_high_u8(b)));
- *c = vmlal_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter);
- *d = vmlal_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter);
- }
- static INLINE void filter_sub_accumulate(const uint8x16_t a, const uint8x16_t b,
- const uint8x8_t filter, uint16x8_t *c,
- uint16x8_t *d) {
- const uint32x2x2_t a_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(a)),
- vreinterpret_u32_u8(vget_high_u8(a)));
- const uint32x2x2_t b_shuf = vzip_u32(vreinterpret_u32_u8(vget_low_u8(b)),
- vreinterpret_u32_u8(vget_high_u8(b)));
- *c = vmlsl_u8(*c, vreinterpret_u8_u32(a_shuf.val[0]), filter);
- *d = vmlsl_u8(*d, vreinterpret_u8_u32(b_shuf.val[0]), filter);
- }
- static INLINE void yonly4x4(const unsigned char *src, int src_stride,
- int filter_offset, unsigned char *dst,
- int dst_stride) {
- uint8x8_t a0, a1, a2, a3, a4, a5, a6, a7, a8;
- uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
- uint16x8_t c0, c1, c2, c3;
- int16x8_t d0, d1;
- uint8x8_t e0, e1;
- const uint8x8_t filter = vld1_u8(abs_filters[filter_offset]);
- const uint8x8_t filter0 = vdup_lane_u8(filter, 0);
- const uint8x8_t filter1 = vdup_lane_u8(filter, 1);
- const uint8x8_t filter2 = vdup_lane_u8(filter, 2);
- const uint8x8_t filter3 = vdup_lane_u8(filter, 3);
- const uint8x8_t filter4 = vdup_lane_u8(filter, 4);
- const uint8x8_t filter5 = vdup_lane_u8(filter, 5);
- src -= src_stride * 2;
- // Shift the even rows to allow using 'vext' to combine the vectors. armv8
- // has vcopy_lane which would be interesting. This started as just a
- // horrible workaround for clang adding alignment hints to 32bit loads:
- // https://llvm.org/bugs/show_bug.cgi?id=24421
- // But it turns out it almost identical to casting the loads.
- a0 = load_and_shift(src);
- src += src_stride;
- a1 = vld1_u8(src);
- src += src_stride;
- a2 = load_and_shift(src);
- src += src_stride;
- a3 = vld1_u8(src);
- src += src_stride;
- a4 = load_and_shift(src);
- src += src_stride;
- a5 = vld1_u8(src);
- src += src_stride;
- a6 = load_and_shift(src);
- src += src_stride;
- a7 = vld1_u8(src);
- src += src_stride;
- a8 = vld1_u8(src);
- // Combine the rows so we can operate on 8 at a time.
- b0 = vext_u8(a0, a1, 4);
- b2 = vext_u8(a2, a3, 4);
- b4 = vext_u8(a4, a5, 4);
- b6 = vext_u8(a6, a7, 4);
- b8 = a8;
- // To keep with the 8-at-a-time theme, combine *alternate* rows. This
- // allows combining the odd rows with the even.
- b1 = vext_u8(b0, b2, 4);
- b3 = vext_u8(b2, b4, 4);
- b5 = vext_u8(b4, b6, 4);
- b7 = vext_u8(b6, b8, 4);
- // Multiply and expand to 16 bits.
- c0 = vmull_u8(b0, filter0);
- c1 = vmull_u8(b2, filter0);
- c2 = vmull_u8(b5, filter5);
- c3 = vmull_u8(b7, filter5);
- // Multiply, subtract and accumulate for filters 1 and 4 (the negative
- // ones).
- c0 = vmlsl_u8(c0, b4, filter4);
- c1 = vmlsl_u8(c1, b6, filter4);
- c2 = vmlsl_u8(c2, b1, filter1);
- c3 = vmlsl_u8(c3, b3, filter1);
- // Add more positive ones. vmlal should really return a signed type.
- // It's doing signed math internally, as evidenced by the fact we can do
- // subtractions followed by more additions. Ideally we could use
- // vqmlal/sl but that instruction doesn't exist. Might be able to
- // shoehorn vqdmlal/vqdmlsl in here but it would take some effort.
- c0 = vmlal_u8(c0, b2, filter2);
- c1 = vmlal_u8(c1, b4, filter2);
- c2 = vmlal_u8(c2, b3, filter3);
- c3 = vmlal_u8(c3, b5, filter3);
- // Use signed saturation math because vmlsl may have left some negative
- // numbers in there.
- d0 = vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0));
- d1 = vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1));
- // Use signed again because numbers like -200 need to be saturated to 0.
- e0 = vqrshrun_n_s16(d0, 7);
- e1 = vqrshrun_n_s16(d1, 7);
- store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1));
- }
- void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
- int xoffset, int yoffset,
- unsigned char *dst_ptr, int dst_pitch) {
- uint8x16_t s0, s1, s2, s3, s4;
- uint64x2_t s01, s23;
- // Variables to hold src[] elements for the given filter[]
- uint8x8_t s0_f5, s1_f5, s2_f5, s3_f5, s4_f5;
- uint8x8_t s4_f1, s4_f2, s4_f3, s4_f4;
- uint8x16_t s01_f0, s23_f0;
- uint64x2_t s01_f3, s23_f3;
- uint32x2x2_t s01_f3_q, s23_f3_q, s01_f5_q, s23_f5_q;
- // Accumulator variables.
- uint16x8_t d0123, d4567, d89;
- uint16x8_t d0123_a, d4567_a, d89_a;
- int16x8_t e0123, e4567, e89;
- // Second pass intermediates.
- uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7, b8;
- uint16x8_t c0, c1, c2, c3;
- int16x8_t d0, d1;
- uint8x8_t e0, e1;
- uint8x8_t filter, filter0, filter1, filter2, filter3, filter4, filter5;
- if (xoffset == 0) { // Second pass only.
- yonly4x4(src_ptr, src_pixels_per_line, yoffset, dst_ptr, dst_pitch);
- return;
- }
- if (yoffset == 0) { // First pass only.
- src_ptr -= 2;
- } else { // Add context for the second pass. 2 extra lines on top.
- src_ptr -= 2 + (src_pixels_per_line * 2);
- }
- filter = vld1_u8(abs_filters[xoffset]);
- filter0 = vdup_lane_u8(filter, 0);
- filter1 = vdup_lane_u8(filter, 1);
- filter2 = vdup_lane_u8(filter, 2);
- filter3 = vdup_lane_u8(filter, 3);
- filter4 = vdup_lane_u8(filter, 4);
- filter5 = vdup_lane_u8(filter, 5);
- // 2 bytes of context, 4 bytes of src values, 3 bytes of context, 7 bytes of
- // garbage. So much effort for that last single bit.
- // The low values of each pair are for filter0.
- s0 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s1 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s2 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s3 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- // Shift to extract values for filter[5]
- // If src[] is 0, this puts:
- // 3 4 5 6 7 8 9 10 in s0_f5
- // Can't use vshr.u64 because it crosses the double word boundary.
- s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5);
- s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5);
- s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5);
- s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5);
- s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1));
- s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3));
- s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5));
- s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5));
- d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5);
- d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5);
- // Keep original src data as 64 bits to simplify shifting and extracting.
- s01 = vreinterpretq_u64_u8(s01_f0);
- s23 = vreinterpretq_u64_u8(s23_f0);
- // 3 4 5 6 * filter0
- filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567);
- // Shift over one to use -1, 0, 1, 2 for filter1
- // -1 0 1 2 * filter1
- filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1,
- &d0123, &d4567);
- // 2 3 4 5 * filter4
- filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4,
- &d0123, &d4567);
- // 0 1 2 3 * filter2
- filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2,
- &d0123, &d4567);
- // 1 2 3 4 * filter3
- s01_f3 = vshrq_n_u64(s01, 24);
- s23_f3 = vshrq_n_u64(s23, 24);
- s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)),
- vreinterpret_u32_u64(vget_high_u64(s01_f3)));
- s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)),
- vreinterpret_u32_u64(vget_high_u64(s23_f3)));
- // Accumulate into different registers so it can use saturated addition.
- d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3);
- d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3);
- e0123 =
- vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a));
- e4567 =
- vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a));
- // Shift and narrow.
- b0 = vqrshrun_n_s16(e0123, 7);
- b2 = vqrshrun_n_s16(e4567, 7);
- if (yoffset == 0) { // firstpass_filter4x4_only
- store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2));
- return;
- }
- // Load additional context when doing both filters.
- s0 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s1 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s2 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s3 = vld1q_u8(src_ptr);
- src_ptr += src_pixels_per_line;
- s4 = vld1q_u8(src_ptr);
- s0_f5 = vext_u8(vget_low_u8(s0), vget_high_u8(s0), 5);
- s1_f5 = vext_u8(vget_low_u8(s1), vget_high_u8(s1), 5);
- s2_f5 = vext_u8(vget_low_u8(s2), vget_high_u8(s2), 5);
- s3_f5 = vext_u8(vget_low_u8(s3), vget_high_u8(s3), 5);
- s4_f5 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 5);
- // 3 4 5 6 * filter0
- s01_f0 = vcombine_u8(vget_low_u8(s0), vget_low_u8(s1));
- s23_f0 = vcombine_u8(vget_low_u8(s2), vget_low_u8(s3));
- s01_f5_q = vzip_u32(vreinterpret_u32_u8(s0_f5), vreinterpret_u32_u8(s1_f5));
- s23_f5_q = vzip_u32(vreinterpret_u32_u8(s2_f5), vreinterpret_u32_u8(s3_f5));
- // But this time instead of 16 pixels to filter, there are 20. So an extra
- // run with a doubleword register.
- d0123 = vmull_u8(vreinterpret_u8_u32(s01_f5_q.val[0]), filter5);
- d4567 = vmull_u8(vreinterpret_u8_u32(s23_f5_q.val[0]), filter5);
- d89 = vmull_u8(s4_f5, filter5);
- // Save a copy as u64 for shifting.
- s01 = vreinterpretq_u64_u8(s01_f0);
- s23 = vreinterpretq_u64_u8(s23_f0);
- filter_add_accumulate(s01_f0, s23_f0, filter0, &d0123, &d4567);
- d89 = vmlal_u8(d89, vget_low_u8(s4), filter0);
- filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 8)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 8)), filter1,
- &d0123, &d4567);
- s4_f1 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 1);
- d89 = vmlsl_u8(d89, s4_f1, filter1);
- filter_sub_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 32)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 32)), filter4,
- &d0123, &d4567);
- s4_f4 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 4);
- d89 = vmlsl_u8(d89, s4_f4, filter4);
- filter_add_accumulate(vreinterpretq_u8_u64(vshrq_n_u64(s01, 16)),
- vreinterpretq_u8_u64(vshrq_n_u64(s23, 16)), filter2,
- &d0123, &d4567);
- s4_f2 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 2);
- d89 = vmlal_u8(d89, s4_f2, filter2);
- s01_f3 = vshrq_n_u64(s01, 24);
- s23_f3 = vshrq_n_u64(s23, 24);
- s01_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s01_f3)),
- vreinterpret_u32_u64(vget_high_u64(s01_f3)));
- s23_f3_q = vzip_u32(vreinterpret_u32_u64(vget_low_u64(s23_f3)),
- vreinterpret_u32_u64(vget_high_u64(s23_f3)));
- s4_f3 = vext_u8(vget_low_u8(s4), vget_high_u8(s4), 3);
- d0123_a = vmull_u8(vreinterpret_u8_u32(s01_f3_q.val[0]), filter3);
- d4567_a = vmull_u8(vreinterpret_u8_u32(s23_f3_q.val[0]), filter3);
- d89_a = vmull_u8(s4_f3, filter3);
- e0123 =
- vqaddq_s16(vreinterpretq_s16_u16(d0123), vreinterpretq_s16_u16(d0123_a));
- e4567 =
- vqaddq_s16(vreinterpretq_s16_u16(d4567), vreinterpretq_s16_u16(d4567_a));
- e89 = vqaddq_s16(vreinterpretq_s16_u16(d89), vreinterpretq_s16_u16(d89_a));
- b4 = vqrshrun_n_s16(e0123, 7);
- b6 = vqrshrun_n_s16(e4567, 7);
- b8 = vqrshrun_n_s16(e89, 7);
- // Second pass: 4x4
- filter = vld1_u8(abs_filters[yoffset]);
- filter0 = vdup_lane_u8(filter, 0);
- filter1 = vdup_lane_u8(filter, 1);
- filter2 = vdup_lane_u8(filter, 2);
- filter3 = vdup_lane_u8(filter, 3);
- filter4 = vdup_lane_u8(filter, 4);
- filter5 = vdup_lane_u8(filter, 5);
- b1 = vext_u8(b0, b2, 4);
- b3 = vext_u8(b2, b4, 4);
- b5 = vext_u8(b4, b6, 4);
- b7 = vext_u8(b6, b8, 4);
- c0 = vmull_u8(b0, filter0);
- c1 = vmull_u8(b2, filter0);
- c2 = vmull_u8(b5, filter5);
- c3 = vmull_u8(b7, filter5);
- c0 = vmlsl_u8(c0, b4, filter4);
- c1 = vmlsl_u8(c1, b6, filter4);
- c2 = vmlsl_u8(c2, b1, filter1);
- c3 = vmlsl_u8(c3, b3, filter1);
- c0 = vmlal_u8(c0, b2, filter2);
- c1 = vmlal_u8(c1, b4, filter2);
- c2 = vmlal_u8(c2, b3, filter3);
- c3 = vmlal_u8(c3, b5, filter3);
- d0 = vqaddq_s16(vreinterpretq_s16_u16(c2), vreinterpretq_s16_u16(c0));
- d1 = vqaddq_s16(vreinterpretq_s16_u16(c3), vreinterpretq_s16_u16(c1));
- e0 = vqrshrun_n_s16(d0, 7);
- e1 = vqrshrun_n_s16(d1, 7);
- store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
- }
- void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
- int xoffset, int yoffset,
- unsigned char *dst_ptr, int dst_pitch) {
- unsigned char *src;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
- uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8;
- uint8x8_t d27u8, d28u8, d29u8, d30u8, d31u8;
- int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
- uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
- uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
- int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
- int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
- uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8;
- if (xoffset == 0) { // secondpass_filter8x4_only
- // load second_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // load src data
- src = src_ptr - src_pixels_per_line * 2;
- d22u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d23u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d24u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d25u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d26u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d27u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d28u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d29u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d30u8 = vld1_u8(src);
- q3u16 = vmull_u8(d22u8, d0u8);
- q4u16 = vmull_u8(d23u8, d0u8);
- q5u16 = vmull_u8(d24u8, d0u8);
- q6u16 = vmull_u8(d25u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
- q7u16 = vmull_u8(d25u8, d3u8);
- q8u16 = vmull_u8(d26u8, d3u8);
- q9u16 = vmull_u8(d27u8, d3u8);
- q10u16 = vmull_u8(d28u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- vst1_u8(dst_ptr, d6u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d7u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d8u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d9u8);
- return;
- }
- // load first_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // First pass: output_height lines x output_width columns (9x4)
- if (yoffset == 0) // firstpass_filter4x4_only
- src = src_ptr - 2;
- else
- src = src_ptr - 2 - (src_pixels_per_line * 2);
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
- q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
- q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
- q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
- q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
- q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
- q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
- q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
- q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
- q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
- q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
- q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
- q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
- q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
- q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
- q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
- q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
- q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
- q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
- q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
- q3u16 = vmull_u8(d28u8, d3u8);
- q4u16 = vmull_u8(d29u8, d3u8);
- q5u16 = vmull_u8(d30u8, d3u8);
- q6u16 = vmull_u8(d31u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d22u8 = vqrshrun_n_s16(q7s16, 7);
- d23u8 = vqrshrun_n_s16(q8s16, 7);
- d24u8 = vqrshrun_n_s16(q9s16, 7);
- d25u8 = vqrshrun_n_s16(q10s16, 7);
- if (yoffset == 0) { // firstpass_filter8x4_only
- vst1_u8(dst_ptr, d22u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d23u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d24u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d25u8);
- return;
- }
- // First Pass on rest 5-line data
- src += src_pixels_per_line;
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q7u8 = vld1q_u8(src);
- q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
- q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
- q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
- q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
- q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
- q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
- q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
- q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
- q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
- q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
- q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
- q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
- q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
- q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
- q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
- q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
- q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
- q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
- q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
- q3u16 = vmull_u8(d27u8, d3u8);
- q4u16 = vmull_u8(d28u8, d3u8);
- q5u16 = vmull_u8(d29u8, d3u8);
- q6u16 = vmull_u8(d30u8, d3u8);
- q7u16 = vmull_u8(d31u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q11s16 = vreinterpretq_s16_u16(q11u16);
- q12s16 = vreinterpretq_s16_u16(q12u16);
- q8s16 = vqaddq_s16(q8s16, q3s16);
- q9s16 = vqaddq_s16(q9s16, q4s16);
- q10s16 = vqaddq_s16(q10s16, q5s16);
- q11s16 = vqaddq_s16(q11s16, q6s16);
- q12s16 = vqaddq_s16(q12s16, q7s16);
- d26u8 = vqrshrun_n_s16(q8s16, 7);
- d27u8 = vqrshrun_n_s16(q9s16, 7);
- d28u8 = vqrshrun_n_s16(q10s16, 7);
- d29u8 = vqrshrun_n_s16(q11s16, 7);
- d30u8 = vqrshrun_n_s16(q12s16, 7);
- // Second pass: 8x4
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- q3u16 = vmull_u8(d22u8, d0u8);
- q4u16 = vmull_u8(d23u8, d0u8);
- q5u16 = vmull_u8(d24u8, d0u8);
- q6u16 = vmull_u8(d25u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
- q7u16 = vmull_u8(d25u8, d3u8);
- q8u16 = vmull_u8(d26u8, d3u8);
- q9u16 = vmull_u8(d27u8, d3u8);
- q10u16 = vmull_u8(d28u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- vst1_u8(dst_ptr, d6u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d7u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d8u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d9u8);
- return;
- }
- void vp8_sixtap_predict8x8_neon(unsigned char *src_ptr, int src_pixels_per_line,
- int xoffset, int yoffset,
- unsigned char *dst_ptr, int dst_pitch) {
- unsigned char *src, *tmpp;
- unsigned char tmp[64];
- int i;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
- uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8;
- uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
- int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
- uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
- uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
- int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
- int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
- uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8;
- if (xoffset == 0) { // secondpass_filter8x8_only
- // load second_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // load src data
- src = src_ptr - src_pixels_per_line * 2;
- d18u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d19u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d20u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d21u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d22u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d23u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d24u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d25u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d26u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d27u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d28u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d29u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d30u8 = vld1_u8(src);
- for (i = 2; i > 0; i--) {
- q3u16 = vmull_u8(d18u8, d0u8);
- q4u16 = vmull_u8(d19u8, d0u8);
- q5u16 = vmull_u8(d20u8, d0u8);
- q6u16 = vmull_u8(d21u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
- q7u16 = vmull_u8(d21u8, d3u8);
- q8u16 = vmull_u8(d22u8, d3u8);
- q9u16 = vmull_u8(d23u8, d3u8);
- q10u16 = vmull_u8(d24u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- d18u8 = d22u8;
- d19u8 = d23u8;
- d20u8 = d24u8;
- d21u8 = d25u8;
- d22u8 = d26u8;
- d23u8 = d27u8;
- d24u8 = d28u8;
- d25u8 = d29u8;
- d26u8 = d30u8;
- vst1_u8(dst_ptr, d6u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d7u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d8u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d9u8);
- dst_ptr += dst_pitch;
- }
- return;
- }
- // load first_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // First pass: output_height lines x output_width columns (9x4)
- if (yoffset == 0) // firstpass_filter4x4_only
- src = src_ptr - 2;
- else
- src = src_ptr - 2 - (src_pixels_per_line * 2);
- tmpp = tmp;
- for (i = 2; i > 0; i--) {
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- __builtin_prefetch(src);
- __builtin_prefetch(src + src_pixels_per_line);
- __builtin_prefetch(src + src_pixels_per_line * 2);
- q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
- q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
- q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
- q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
- q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
- q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
- q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
- q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
- q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
- q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
- q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
- q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
- q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
- q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
- q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
- q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
- q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
- q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
- q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
- q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
- d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
- d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
- d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
- d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
- q3u16 = vmull_u8(d28u8, d3u8);
- q4u16 = vmull_u8(d29u8, d3u8);
- q5u16 = vmull_u8(d30u8, d3u8);
- q6u16 = vmull_u8(d31u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d22u8 = vqrshrun_n_s16(q7s16, 7);
- d23u8 = vqrshrun_n_s16(q8s16, 7);
- d24u8 = vqrshrun_n_s16(q9s16, 7);
- d25u8 = vqrshrun_n_s16(q10s16, 7);
- if (yoffset == 0) { // firstpass_filter8x4_only
- vst1_u8(dst_ptr, d22u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d23u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d24u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d25u8);
- dst_ptr += dst_pitch;
- } else {
- vst1_u8(tmpp, d22u8);
- tmpp += 8;
- vst1_u8(tmpp, d23u8);
- tmpp += 8;
- vst1_u8(tmpp, d24u8);
- tmpp += 8;
- vst1_u8(tmpp, d25u8);
- tmpp += 8;
- }
- }
- if (yoffset == 0) return;
- // First Pass on rest 5-line data
- q3u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q4u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q5u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q6u8 = vld1q_u8(src);
- src += src_pixels_per_line;
- q7u8 = vld1q_u8(src);
- q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
- q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
- q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
- q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
- q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
- q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
- q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
- q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
- q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
- q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
- q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
- q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
- q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
- q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
- q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
- q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
- q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
- q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
- q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
- q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
- d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
- d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
- d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
- d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
- d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
- q3u16 = vmull_u8(d27u8, d3u8);
- q4u16 = vmull_u8(d28u8, d3u8);
- q5u16 = vmull_u8(d29u8, d3u8);
- q6u16 = vmull_u8(d30u8, d3u8);
- q7u16 = vmull_u8(d31u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q11s16 = vreinterpretq_s16_u16(q11u16);
- q12s16 = vreinterpretq_s16_u16(q12u16);
- q8s16 = vqaddq_s16(q8s16, q3s16);
- q9s16 = vqaddq_s16(q9s16, q4s16);
- q10s16 = vqaddq_s16(q10s16, q5s16);
- q11s16 = vqaddq_s16(q11s16, q6s16);
- q12s16 = vqaddq_s16(q12s16, q7s16);
- d26u8 = vqrshrun_n_s16(q8s16, 7);
- d27u8 = vqrshrun_n_s16(q9s16, 7);
- d28u8 = vqrshrun_n_s16(q10s16, 7);
- d29u8 = vqrshrun_n_s16(q11s16, 7);
- d30u8 = vqrshrun_n_s16(q12s16, 7);
- // Second pass: 8x8
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- tmpp = tmp;
- q9u8 = vld1q_u8(tmpp);
- tmpp += 16;
- q10u8 = vld1q_u8(tmpp);
- tmpp += 16;
- q11u8 = vld1q_u8(tmpp);
- tmpp += 16;
- q12u8 = vld1q_u8(tmpp);
- d18u8 = vget_low_u8(q9u8);
- d19u8 = vget_high_u8(q9u8);
- d20u8 = vget_low_u8(q10u8);
- d21u8 = vget_high_u8(q10u8);
- d22u8 = vget_low_u8(q11u8);
- d23u8 = vget_high_u8(q11u8);
- d24u8 = vget_low_u8(q12u8);
- d25u8 = vget_high_u8(q12u8);
- for (i = 2; i > 0; i--) {
- q3u16 = vmull_u8(d18u8, d0u8);
- q4u16 = vmull_u8(d19u8, d0u8);
- q5u16 = vmull_u8(d20u8, d0u8);
- q6u16 = vmull_u8(d21u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
- q7u16 = vmull_u8(d21u8, d3u8);
- q8u16 = vmull_u8(d22u8, d3u8);
- q9u16 = vmull_u8(d23u8, d3u8);
- q10u16 = vmull_u8(d24u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- d18u8 = d22u8;
- d19u8 = d23u8;
- d20u8 = d24u8;
- d21u8 = d25u8;
- d22u8 = d26u8;
- d23u8 = d27u8;
- d24u8 = d28u8;
- d25u8 = d29u8;
- d26u8 = d30u8;
- vst1_u8(dst_ptr, d6u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d7u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d8u8);
- dst_ptr += dst_pitch;
- vst1_u8(dst_ptr, d9u8);
- dst_ptr += dst_pitch;
- }
- return;
- }
- void vp8_sixtap_predict16x16_neon(unsigned char *src_ptr,
- int src_pixels_per_line, int xoffset,
- int yoffset, unsigned char *dst_ptr,
- int dst_pitch) {
- unsigned char *src, *src_tmp, *dst, *tmpp;
- unsigned char tmp[336];
- int i, j;
- uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
- uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8;
- uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8;
- uint8x8_t d28u8, d29u8, d30u8, d31u8;
- int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
- uint8x16_t q3u8, q4u8;
- uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16;
- uint16x8_t q11u16, q12u16, q13u16, q15u16;
- int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16;
- int16x8_t q11s16, q12s16, q13s16, q15s16;
- if (xoffset == 0) { // secondpass_filter8x8_only
- // load second_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // load src data
- src_tmp = src_ptr - src_pixels_per_line * 2;
- for (i = 0; i < 2; ++i) {
- src = src_tmp + i * 8;
- dst = dst_ptr + i * 8;
- d18u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d19u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d20u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d21u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d22u8 = vld1_u8(src);
- src += src_pixels_per_line;
- for (j = 0; j < 4; ++j) {
- d23u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d24u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d25u8 = vld1_u8(src);
- src += src_pixels_per_line;
- d26u8 = vld1_u8(src);
- src += src_pixels_per_line;
- q3u16 = vmull_u8(d18u8, d0u8);
- q4u16 = vmull_u8(d19u8, d0u8);
- q5u16 = vmull_u8(d20u8, d0u8);
- q6u16 = vmull_u8(d21u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
- q7u16 = vmull_u8(d21u8, d3u8);
- q8u16 = vmull_u8(d22u8, d3u8);
- q9u16 = vmull_u8(d23u8, d3u8);
- q10u16 = vmull_u8(d24u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- d18u8 = d22u8;
- d19u8 = d23u8;
- d20u8 = d24u8;
- d21u8 = d25u8;
- d22u8 = d26u8;
- vst1_u8(dst, d6u8);
- dst += dst_pitch;
- vst1_u8(dst, d7u8);
- dst += dst_pitch;
- vst1_u8(dst, d8u8);
- dst += dst_pitch;
- vst1_u8(dst, d9u8);
- dst += dst_pitch;
- }
- }
- return;
- }
- // load first_pass filter
- dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- // First pass: output_height lines x output_width columns (9x4)
- if (yoffset == 0) { // firstpass_filter4x4_only
- src = src_ptr - 2;
- dst = dst_ptr;
- for (i = 0; i < 8; ++i) {
- d6u8 = vld1_u8(src);
- d7u8 = vld1_u8(src + 8);
- d8u8 = vld1_u8(src + 16);
- src += src_pixels_per_line;
- d9u8 = vld1_u8(src);
- d10u8 = vld1_u8(src + 8);
- d11u8 = vld1_u8(src + 16);
- src += src_pixels_per_line;
- __builtin_prefetch(src);
- __builtin_prefetch(src + src_pixels_per_line);
- q6u16 = vmull_u8(d6u8, d0u8);
- q7u16 = vmull_u8(d7u8, d0u8);
- q8u16 = vmull_u8(d9u8, d0u8);
- q9u16 = vmull_u8(d10u8, d0u8);
- d20u8 = vext_u8(d6u8, d7u8, 1);
- d21u8 = vext_u8(d9u8, d10u8, 1);
- d22u8 = vext_u8(d7u8, d8u8, 1);
- d23u8 = vext_u8(d10u8, d11u8, 1);
- d24u8 = vext_u8(d6u8, d7u8, 4);
- d25u8 = vext_u8(d9u8, d10u8, 4);
- d26u8 = vext_u8(d7u8, d8u8, 4);
- d27u8 = vext_u8(d10u8, d11u8, 4);
- d28u8 = vext_u8(d6u8, d7u8, 5);
- d29u8 = vext_u8(d9u8, d10u8, 5);
- q6u16 = vmlsl_u8(q6u16, d20u8, d1u8);
- q8u16 = vmlsl_u8(q8u16, d21u8, d1u8);
- q7u16 = vmlsl_u8(q7u16, d22u8, d1u8);
- q9u16 = vmlsl_u8(q9u16, d23u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d24u8, d4u8);
- q8u16 = vmlsl_u8(q8u16, d25u8, d4u8);
- q7u16 = vmlsl_u8(q7u16, d26u8, d4u8);
- q9u16 = vmlsl_u8(q9u16, d27u8, d4u8);
- q6u16 = vmlal_u8(q6u16, d28u8, d5u8);
- q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
- d20u8 = vext_u8(d7u8, d8u8, 5);
- d21u8 = vext_u8(d10u8, d11u8, 5);
- d22u8 = vext_u8(d6u8, d7u8, 2);
- d23u8 = vext_u8(d9u8, d10u8, 2);
- d24u8 = vext_u8(d7u8, d8u8, 2);
- d25u8 = vext_u8(d10u8, d11u8, 2);
- d26u8 = vext_u8(d6u8, d7u8, 3);
- d27u8 = vext_u8(d9u8, d10u8, 3);
- d28u8 = vext_u8(d7u8, d8u8, 3);
- d29u8 = vext_u8(d10u8, d11u8, 3);
- q7u16 = vmlal_u8(q7u16, d20u8, d5u8);
- q9u16 = vmlal_u8(q9u16, d21u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d22u8, d2u8);
- q8u16 = vmlal_u8(q8u16, d23u8, d2u8);
- q7u16 = vmlal_u8(q7u16, d24u8, d2u8);
- q9u16 = vmlal_u8(q9u16, d25u8, d2u8);
- q10u16 = vmull_u8(d26u8, d3u8);
- q11u16 = vmull_u8(d27u8, d3u8);
- q12u16 = vmull_u8(d28u8, d3u8);
- q15u16 = vmull_u8(d29u8, d3u8);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q11s16 = vreinterpretq_s16_u16(q11u16);
- q12s16 = vreinterpretq_s16_u16(q12u16);
- q15s16 = vreinterpretq_s16_u16(q15u16);
- q6s16 = vqaddq_s16(q6s16, q10s16);
- q8s16 = vqaddq_s16(q8s16, q11s16);
- q7s16 = vqaddq_s16(q7s16, q12s16);
- q9s16 = vqaddq_s16(q9s16, q15s16);
- d6u8 = vqrshrun_n_s16(q6s16, 7);
- d7u8 = vqrshrun_n_s16(q7s16, 7);
- d8u8 = vqrshrun_n_s16(q8s16, 7);
- d9u8 = vqrshrun_n_s16(q9s16, 7);
- q3u8 = vcombine_u8(d6u8, d7u8);
- q4u8 = vcombine_u8(d8u8, d9u8);
- vst1q_u8(dst, q3u8);
- dst += dst_pitch;
- vst1q_u8(dst, q4u8);
- dst += dst_pitch;
- }
- return;
- }
- src = src_ptr - 2 - src_pixels_per_line * 2;
- tmpp = tmp;
- for (i = 0; i < 7; ++i) {
- d6u8 = vld1_u8(src);
- d7u8 = vld1_u8(src + 8);
- d8u8 = vld1_u8(src + 16);
- src += src_pixels_per_line;
- d9u8 = vld1_u8(src);
- d10u8 = vld1_u8(src + 8);
- d11u8 = vld1_u8(src + 16);
- src += src_pixels_per_line;
- d12u8 = vld1_u8(src);
- d13u8 = vld1_u8(src + 8);
- d14u8 = vld1_u8(src + 16);
- src += src_pixels_per_line;
- __builtin_prefetch(src);
- __builtin_prefetch(src + src_pixels_per_line);
- __builtin_prefetch(src + src_pixels_per_line * 2);
- q8u16 = vmull_u8(d6u8, d0u8);
- q9u16 = vmull_u8(d7u8, d0u8);
- q10u16 = vmull_u8(d9u8, d0u8);
- q11u16 = vmull_u8(d10u8, d0u8);
- q12u16 = vmull_u8(d12u8, d0u8);
- q13u16 = vmull_u8(d13u8, d0u8);
- d28u8 = vext_u8(d6u8, d7u8, 1);
- d29u8 = vext_u8(d9u8, d10u8, 1);
- d30u8 = vext_u8(d12u8, d13u8, 1);
- q8u16 = vmlsl_u8(q8u16, d28u8, d1u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
- q12u16 = vmlsl_u8(q12u16, d30u8, d1u8);
- d28u8 = vext_u8(d7u8, d8u8, 1);
- d29u8 = vext_u8(d10u8, d11u8, 1);
- d30u8 = vext_u8(d13u8, d14u8, 1);
- q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
- q11u16 = vmlsl_u8(q11u16, d29u8, d1u8);
- q13u16 = vmlsl_u8(q13u16, d30u8, d1u8);
- d28u8 = vext_u8(d6u8, d7u8, 4);
- d29u8 = vext_u8(d9u8, d10u8, 4);
- d30u8 = vext_u8(d12u8, d13u8, 4);
- q8u16 = vmlsl_u8(q8u16, d28u8, d4u8);
- q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
- q12u16 = vmlsl_u8(q12u16, d30u8, d4u8);
- d28u8 = vext_u8(d7u8, d8u8, 4);
- d29u8 = vext_u8(d10u8, d11u8, 4);
- d30u8 = vext_u8(d13u8, d14u8, 4);
- q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
- q11u16 = vmlsl_u8(q11u16, d29u8, d4u8);
- q13u16 = vmlsl_u8(q13u16, d30u8, d4u8);
- d28u8 = vext_u8(d6u8, d7u8, 5);
- d29u8 = vext_u8(d9u8, d10u8, 5);
- d30u8 = vext_u8(d12u8, d13u8, 5);
- q8u16 = vmlal_u8(q8u16, d28u8, d5u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
- q12u16 = vmlal_u8(q12u16, d30u8, d5u8);
- d28u8 = vext_u8(d7u8, d8u8, 5);
- d29u8 = vext_u8(d10u8, d11u8, 5);
- d30u8 = vext_u8(d13u8, d14u8, 5);
- q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
- q11u16 = vmlal_u8(q11u16, d29u8, d5u8);
- q13u16 = vmlal_u8(q13u16, d30u8, d5u8);
- d28u8 = vext_u8(d6u8, d7u8, 2);
- d29u8 = vext_u8(d9u8, d10u8, 2);
- d30u8 = vext_u8(d12u8, d13u8, 2);
- q8u16 = vmlal_u8(q8u16, d28u8, d2u8);
- q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
- q12u16 = vmlal_u8(q12u16, d30u8, d2u8);
- d28u8 = vext_u8(d7u8, d8u8, 2);
- d29u8 = vext_u8(d10u8, d11u8, 2);
- d30u8 = vext_u8(d13u8, d14u8, 2);
- q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
- q11u16 = vmlal_u8(q11u16, d29u8, d2u8);
- q13u16 = vmlal_u8(q13u16, d30u8, d2u8);
- d28u8 = vext_u8(d6u8, d7u8, 3);
- d29u8 = vext_u8(d9u8, d10u8, 3);
- d30u8 = vext_u8(d12u8, d13u8, 3);
- d15u8 = vext_u8(d7u8, d8u8, 3);
- d31u8 = vext_u8(d10u8, d11u8, 3);
- d6u8 = vext_u8(d13u8, d14u8, 3);
- q4u16 = vmull_u8(d28u8, d3u8);
- q5u16 = vmull_u8(d29u8, d3u8);
- q6u16 = vmull_u8(d30u8, d3u8);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q12s16 = vreinterpretq_s16_u16(q12u16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q10s16 = vqaddq_s16(q10s16, q5s16);
- q12s16 = vqaddq_s16(q12s16, q6s16);
- q6u16 = vmull_u8(d15u8, d3u8);
- q7u16 = vmull_u8(d31u8, d3u8);
- q3u16 = vmull_u8(d6u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q11s16 = vreinterpretq_s16_u16(q11u16);
- q13s16 = vreinterpretq_s16_u16(q13u16);
- q9s16 = vqaddq_s16(q9s16, q6s16);
- q11s16 = vqaddq_s16(q11s16, q7s16);
- q13s16 = vqaddq_s16(q13s16, q3s16);
- d6u8 = vqrshrun_n_s16(q8s16, 7);
- d7u8 = vqrshrun_n_s16(q9s16, 7);
- d8u8 = vqrshrun_n_s16(q10s16, 7);
- d9u8 = vqrshrun_n_s16(q11s16, 7);
- d10u8 = vqrshrun_n_s16(q12s16, 7);
- d11u8 = vqrshrun_n_s16(q13s16, 7);
- vst1_u8(tmpp, d6u8);
- tmpp += 8;
- vst1_u8(tmpp, d7u8);
- tmpp += 8;
- vst1_u8(tmpp, d8u8);
- tmpp += 8;
- vst1_u8(tmpp, d9u8);
- tmpp += 8;
- vst1_u8(tmpp, d10u8);
- tmpp += 8;
- vst1_u8(tmpp, d11u8);
- tmpp += 8;
- }
- // Second pass: 16x16
- dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
- d0s8 = vdup_lane_s8(dtmps8, 0);
- d1s8 = vdup_lane_s8(dtmps8, 1);
- d2s8 = vdup_lane_s8(dtmps8, 2);
- d3s8 = vdup_lane_s8(dtmps8, 3);
- d4s8 = vdup_lane_s8(dtmps8, 4);
- d5s8 = vdup_lane_s8(dtmps8, 5);
- d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
- d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
- d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
- d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
- d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
- d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
- for (i = 0; i < 2; ++i) {
- dst = dst_ptr + 8 * i;
- tmpp = tmp + 8 * i;
- d18u8 = vld1_u8(tmpp);
- tmpp += 16;
- d19u8 = vld1_u8(tmpp);
- tmpp += 16;
- d20u8 = vld1_u8(tmpp);
- tmpp += 16;
- d21u8 = vld1_u8(tmpp);
- tmpp += 16;
- d22u8 = vld1_u8(tmpp);
- tmpp += 16;
- for (j = 0; j < 4; ++j) {
- d23u8 = vld1_u8(tmpp);
- tmpp += 16;
- d24u8 = vld1_u8(tmpp);
- tmpp += 16;
- d25u8 = vld1_u8(tmpp);
- tmpp += 16;
- d26u8 = vld1_u8(tmpp);
- tmpp += 16;
- q3u16 = vmull_u8(d18u8, d0u8);
- q4u16 = vmull_u8(d19u8, d0u8);
- q5u16 = vmull_u8(d20u8, d0u8);
- q6u16 = vmull_u8(d21u8, d0u8);
- q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
- q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
- q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
- q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
- q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
- q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
- q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
- q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
- q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
- q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
- q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
- q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
- q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
- q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
- q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
- q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
- q7u16 = vmull_u8(d21u8, d3u8);
- q8u16 = vmull_u8(d22u8, d3u8);
- q9u16 = vmull_u8(d23u8, d3u8);
- q10u16 = vmull_u8(d24u8, d3u8);
- q3s16 = vreinterpretq_s16_u16(q3u16);
- q4s16 = vreinterpretq_s16_u16(q4u16);
- q5s16 = vreinterpretq_s16_u16(q5u16);
- q6s16 = vreinterpretq_s16_u16(q6u16);
- q7s16 = vreinterpretq_s16_u16(q7u16);
- q8s16 = vreinterpretq_s16_u16(q8u16);
- q9s16 = vreinterpretq_s16_u16(q9u16);
- q10s16 = vreinterpretq_s16_u16(q10u16);
- q7s16 = vqaddq_s16(q7s16, q3s16);
- q8s16 = vqaddq_s16(q8s16, q4s16);
- q9s16 = vqaddq_s16(q9s16, q5s16);
- q10s16 = vqaddq_s16(q10s16, q6s16);
- d6u8 = vqrshrun_n_s16(q7s16, 7);
- d7u8 = vqrshrun_n_s16(q8s16, 7);
- d8u8 = vqrshrun_n_s16(q9s16, 7);
- d9u8 = vqrshrun_n_s16(q10s16, 7);
- d18u8 = d22u8;
- d19u8 = d23u8;
- d20u8 = d24u8;
- d21u8 = d25u8;
- d22u8 = d26u8;
- vst1_u8(dst, d6u8);
- dst += dst_pitch;
- vst1_u8(dst, d7u8);
- dst += dst_pitch;
- vst1_u8(dst, d8u8);
- dst += dst_pitch;
- vst1_u8(dst, d9u8);
- dst += dst_pitch;
- }
- }
- return;
- }
|