variance_vsx.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. /*
  2. * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <assert.h>
  11. #include "./vpx_dsp_rtcd.h"
  12. #include "vpx_dsp/ppc/types_vsx.h"
  13. static inline uint8x16_t read4x2(const uint8_t *a, int stride) {
  14. const uint32x4_t a0 = (uint32x4_t)vec_vsx_ld(0, a);
  15. const uint32x4_t a1 = (uint32x4_t)vec_vsx_ld(0, a + stride);
  16. return (uint8x16_t)vec_mergeh(a0, a1);
  17. }
  18. uint32_t vpx_get4x4sse_cs_vsx(const uint8_t *a, int a_stride, const uint8_t *b,
  19. int b_stride) {
  20. int distortion;
  21. const int16x8_t a0 = unpack_to_s16_h(read4x2(a, a_stride));
  22. const int16x8_t a1 = unpack_to_s16_h(read4x2(a + a_stride * 2, a_stride));
  23. const int16x8_t b0 = unpack_to_s16_h(read4x2(b, b_stride));
  24. const int16x8_t b1 = unpack_to_s16_h(read4x2(b + b_stride * 2, b_stride));
  25. const int16x8_t d0 = vec_sub(a0, b0);
  26. const int16x8_t d1 = vec_sub(a1, b1);
  27. const int32x4_t ds = vec_msum(d1, d1, vec_msum(d0, d0, vec_splat_s32(0)));
  28. const int32x4_t d = vec_splat(vec_sums(ds, vec_splat_s32(0)), 3);
  29. vec_ste(d, 0, &distortion);
  30. return distortion;
  31. }
  32. // TODO(lu_zero): Unroll
  33. uint32_t vpx_get_mb_ss_vsx(const int16_t *a) {
  34. unsigned int i, sum = 0;
  35. int32x4_t s = vec_splat_s32(0);
  36. for (i = 0; i < 256; i += 8) {
  37. const int16x8_t v = vec_vsx_ld(0, a + i);
  38. s = vec_msum(v, v, s);
  39. }
  40. s = vec_splat(vec_sums(s, vec_splat_s32(0)), 3);
  41. vec_ste((uint32x4_t)s, 0, &sum);
  42. return sum;
  43. }
  44. void vpx_comp_avg_pred_vsx(uint8_t *comp_pred, const uint8_t *pred, int width,
  45. int height, const uint8_t *ref, int ref_stride) {
  46. int i, j;
  47. /* comp_pred and pred must be 16 byte aligned. */
  48. assert(((intptr_t)comp_pred & 0xf) == 0);
  49. assert(((intptr_t)pred & 0xf) == 0);
  50. if (width >= 16) {
  51. for (i = 0; i < height; ++i) {
  52. for (j = 0; j < width; j += 16) {
  53. const uint8x16_t v = vec_avg(vec_vsx_ld(j, pred), vec_vsx_ld(j, ref));
  54. vec_vsx_st(v, j, comp_pred);
  55. }
  56. comp_pred += width;
  57. pred += width;
  58. ref += ref_stride;
  59. }
  60. } else if (width == 8) {
  61. // Process 2 lines at time
  62. for (i = 0; i < height / 2; ++i) {
  63. const uint8x16_t r0 = vec_vsx_ld(0, ref);
  64. const uint8x16_t r1 = vec_vsx_ld(0, ref + ref_stride);
  65. const uint8x16_t r = xxpermdi(r0, r1, 0);
  66. const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
  67. vec_vsx_st(v, 0, comp_pred);
  68. comp_pred += 16; // width * 2;
  69. pred += 16; // width * 2;
  70. ref += ref_stride * 2;
  71. }
  72. } else {
  73. assert(width == 4);
  74. // process 4 lines at time
  75. for (i = 0; i < height / 4; ++i) {
  76. const uint32x4_t r0 = (uint32x4_t)vec_vsx_ld(0, ref);
  77. const uint32x4_t r1 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride);
  78. const uint32x4_t r2 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 2);
  79. const uint32x4_t r3 = (uint32x4_t)vec_vsx_ld(0, ref + ref_stride * 3);
  80. const uint8x16_t r =
  81. (uint8x16_t)xxpermdi(vec_mergeh(r0, r1), vec_mergeh(r2, r3), 0);
  82. const uint8x16_t v = vec_avg(vec_vsx_ld(0, pred), r);
  83. vec_vsx_st(v, 0, comp_pred);
  84. comp_pred += 16; // width * 4;
  85. pred += 16; // width * 4;
  86. ref += ref_stride * 4;
  87. }
  88. }
  89. }