1 /* 2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include <emmintrin.h> // SSE2 12 13 #include "./vp9_rtcd.h" 14 #include "./vpx_config.h" 15 16 #include "vp9/encoder/vp9_variance.h" 17 #include "vpx_ports/mem.h" 18 19 // The 2 unused parameters are place holders for PIC enabled build. 20 #define DECL(w, opt) \ 21 int vp9_sub_pixel_variance##w##xh_##opt(const uint8_t *src, \ 22 ptrdiff_t src_stride, \ 23 int x_offset, int y_offset, \ 24 const uint8_t *dst, \ 25 ptrdiff_t dst_stride, \ 26 int height, unsigned int *sse, \ 27 void *unused0, void *unused) 28 #define DECLS(opt1, opt2) \ 29 DECL(4, opt2); \ 30 DECL(8, opt1); \ 31 DECL(16, opt1) 32 33 DECLS(sse2, sse); 34 DECLS(ssse3, ssse3); 35 #undef DECLS 36 #undef DECL 37 38 #define FN(w, h, wf, wlog2, hlog2, opt, cast) \ 39 unsigned int vp9_sub_pixel_variance##w##x##h##_##opt(const uint8_t *src, \ 40 int src_stride, \ 41 int x_offset, \ 42 int y_offset, \ 43 const uint8_t *dst, \ 44 int dst_stride, \ 45 unsigned int *sse_ptr) { \ 46 unsigned int sse; \ 47 int se = vp9_sub_pixel_variance##wf##xh_##opt(src, src_stride, x_offset, \ 48 y_offset, dst, dst_stride, \ 49 h, &sse, NULL, NULL); \ 50 if (w > wf) { \ 51 unsigned int sse2; \ 52 int se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 16, src_stride, \ 53 x_offset, y_offset, \ 54 dst + 16, dst_stride, \ 55 h, &sse2, NULL, NULL); \ 56 se += se2; \ 57 sse += sse2; \ 58 if (w > wf * 2) { \ 59 se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 32, src_stride, \ 60 x_offset, y_offset, \ 61 dst + 32, dst_stride, \ 62 h, &sse2, NULL, NULL); \ 63 se += se2; \ 64 sse += sse2; \ 65 se2 = vp9_sub_pixel_variance##wf##xh_##opt(src + 48, src_stride, \ 66 x_offset, y_offset, \ 67 dst + 48, dst_stride, \ 68 h, &sse2, NULL, NULL); \ 69 se += se2; \ 70 sse += sse2; \ 71 } \ 72 } \ 73 *sse_ptr = sse; \ 74 return sse - ((cast se * se) >> (wlog2 + hlog2)); \ 75 } 76 77 #define FNS(opt1, opt2) \ 78 FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ 79 FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ 80 FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ 81 FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ 82 FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ 83 FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ 84 FN(16, 16, 16, 4, 4, opt1, (unsigned int)); \ 85 FN(16, 8, 16, 4, 3, opt1, (unsigned int)); \ 86 FN(8, 16, 8, 3, 4, opt1, (unsigned int)); \ 87 FN(8, 8, 8, 3, 3, opt1, (unsigned int)); \ 88 FN(8, 4, 8, 3, 2, opt1, (unsigned int)); \ 89 FN(4, 8, 4, 2, 3, opt2, (unsigned int)); \ 90 FN(4, 4, 4, 2, 2, opt2, (unsigned int)) 91 92 FNS(sse2, sse); 93 FNS(ssse3, ssse3); 94 95 #undef FNS 96 #undef FN 97 98 // The 2 unused parameters are place holders for PIC enabled build. 99 #define DECL(w, opt) \ 100 int vp9_sub_pixel_avg_variance##w##xh_##opt(const uint8_t *src, \ 101 ptrdiff_t src_stride, \ 102 int x_offset, int y_offset, \ 103 const uint8_t *dst, \ 104 ptrdiff_t dst_stride, \ 105 const uint8_t *sec, \ 106 ptrdiff_t sec_stride, \ 107 int height, unsigned int *sse, \ 108 void *unused0, void *unused) 109 #define DECLS(opt1, opt2) \ 110 DECL(4, opt2); \ 111 DECL(8, opt1); \ 112 DECL(16, opt1) 113 114 DECLS(sse2, sse); 115 DECLS(ssse3, ssse3); 116 #undef DECL 117 #undef DECLS 118 119 #define FN(w, h, wf, wlog2, hlog2, opt, cast) \ 120 unsigned int vp9_sub_pixel_avg_variance##w##x##h##_##opt(const uint8_t *src, \ 121 int src_stride, \ 122 int x_offset, \ 123 int y_offset, \ 124 const uint8_t *dst, \ 125 int dst_stride, \ 126 unsigned int *sseptr, \ 127 const uint8_t *sec) { \ 128 unsigned int sse; \ 129 int se = vp9_sub_pixel_avg_variance##wf##xh_##opt(src, src_stride, x_offset, \ 130 y_offset, dst, dst_stride, \ 131 sec, w, h, &sse, NULL, \ 132 NULL); \ 133 if (w > wf) { \ 134 unsigned int sse2; \ 135 int se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 16, src_stride, \ 136 x_offset, y_offset, \ 137 dst + 16, dst_stride, \ 138 sec + 16, w, h, &sse2, \ 139 NULL, NULL); \ 140 se += se2; \ 141 sse += sse2; \ 142 if (w > wf * 2) { \ 143 se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 32, src_stride, \ 144 x_offset, y_offset, \ 145 dst + 32, dst_stride, \ 146 sec + 32, w, h, &sse2, \ 147 NULL, NULL); \ 148 se += se2; \ 149 sse += sse2; \ 150 se2 = vp9_sub_pixel_avg_variance##wf##xh_##opt(src + 48, src_stride, \ 151 x_offset, y_offset, \ 152 dst + 48, dst_stride, \ 153 sec + 48, w, h, &sse2, \ 154 NULL, NULL); \ 155 se += se2; \ 156 sse += sse2; \ 157 } \ 158 } \ 159 *sseptr = sse; \ 160 return sse - ((cast se * se) >> (wlog2 + hlog2)); \ 161 } 162 163 #define FNS(opt1, opt2) \ 164 FN(64, 64, 16, 6, 6, opt1, (int64_t)); \ 165 FN(64, 32, 16, 6, 5, opt1, (int64_t)); \ 166 FN(32, 64, 16, 5, 6, opt1, (int64_t)); \ 167 FN(32, 32, 16, 5, 5, opt1, (int64_t)); \ 168 FN(32, 16, 16, 5, 4, opt1, (int64_t)); \ 169 FN(16, 32, 16, 4, 5, opt1, (int64_t)); \ 170 FN(16, 16, 16, 4, 4, opt1, (unsigned int)); \ 171 FN(16, 8, 16, 4, 3, opt1, (unsigned int)); \ 172 FN(8, 16, 8, 3, 4, opt1, (unsigned int)); \ 173 FN(8, 8, 8, 3, 3, opt1, (unsigned int)); \ 174 FN(8, 4, 8, 3, 2, opt1, (unsigned int)); \ 175 FN(4, 8, 4, 2, 3, opt2, (unsigned int)); \ 176 FN(4, 4, 4, 2, 2, opt2, (unsigned int)) 177 178 FNS(sse2, sse); 179 FNS(ssse3, ssse3); 180 181 #undef FNS 182 #undef FN 183