1 /*
2 *
3 * Copyright (c) 2018, Alliance for Open Media. All rights reserved
4 *
5 * This source code is subject to the terms of the BSD 2 Clause License and
6 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
7 * was not distributed with this source code in the LICENSE file, you can
8 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
9 * Media Patent License 1.0 was not distributed with this source code in the
10 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
11 */
12
13 #include <arm_neon.h>
14 #include <assert.h>
15
16 #include "aom/aom_integer.h"
17 #include "aom_dsp/blend.h"
18 #include "aom_ports/mem.h"
19 #include "av1/common/arm/mem_neon.h"
20 #include "aom_dsp/aom_dsp_common.h"
21 #include "config/aom_dsp_rtcd.h"
22
aom_blend_a64_hmask_neon(uint8_t * dst,uint32_t dst_stride,const uint8_t * src0,uint32_t src0_stride,const uint8_t * src1,uint32_t src1_stride,const uint8_t * mask,int w,int h)23 void aom_blend_a64_hmask_neon(uint8_t *dst, uint32_t dst_stride,
24 const uint8_t *src0, uint32_t src0_stride,
25 const uint8_t *src1, uint32_t src1_stride,
26 const uint8_t *mask, int w, int h) {
27 assert(IMPLIES(src0 == dst, src0_stride == dst_stride));
28 assert(IMPLIES(src1 == dst, src1_stride == dst_stride));
29
30 assert(h >= 2);
31 assert(w >= 2);
32 assert(IS_POWER_OF_TWO(h));
33 assert(IS_POWER_OF_TWO(w));
34 uint8x8_t tmp0, tmp1;
35 uint8x16_t res_q;
36 uint16x8_t res, res_low, res_high;
37 uint32x2_t tmp0_32 = vdup_n_u32(0), tmp1_32 = vdup_n_u32(0);
38 uint16x4_t tmp0_16 = vdup_n_u16(0), tmp1_16 = vdup_n_u16(0);
39 const uint8x8_t vdup_64 = vdup_n_u8((uint8_t)64);
40
41 if (w >= 16) {
42 const uint8x16_t vdup_64_q = vdupq_n_u8((uint8_t)64);
43 for (int i = 0; i < h; ++i) {
44 for (int j = 0; j < w; j += 16) {
45 __builtin_prefetch(src0);
46 __builtin_prefetch(src1);
47 const uint8x16_t tmp0_q = vld1q_u8(src0);
48 const uint8x16_t tmp1_q = vld1q_u8(src1);
49 const uint8x16_t m_q = vld1q_u8(mask);
50 const uint8x16_t max_minus_m_q = vsubq_u8(vdup_64_q, m_q);
51 res_low = vmull_u8(vget_low_u8(m_q), vget_low_u8(tmp0_q));
52 res_low =
53 vmlal_u8(res_low, vget_low_u8(max_minus_m_q), vget_low_u8(tmp1_q));
54 res_high = vmull_u8(vget_high_u8(m_q), vget_high_u8(tmp0_q));
55 res_high = vmlal_u8(res_high, vget_high_u8(max_minus_m_q),
56 vget_high_u8(tmp1_q));
57 res_q = vcombine_u8(vrshrn_n_u16(res_low, AOM_BLEND_A64_ROUND_BITS),
58 vrshrn_n_u16(res_high, AOM_BLEND_A64_ROUND_BITS));
59 vst1q_u8(dst, res_q);
60 src0 += 16;
61 src1 += 16;
62 dst += 16;
63 mask += 16;
64 }
65 src0 += src0_stride - w;
66 src1 += src1_stride - w;
67 dst += dst_stride - w;
68 mask -= w;
69 }
70 } else if (w == 8) {
71 const uint8x8_t m = vld1_u8(mask);
72 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
73 for (int i = 0; i < h; ++i) {
74 __builtin_prefetch(src0);
75 __builtin_prefetch(src1);
76 tmp0 = vld1_u8(src0);
77 tmp1 = vld1_u8(src1);
78 res = vmull_u8(m, tmp0);
79 res = vmlal_u8(res, max_minus_m, tmp1);
80 vst1_u8(dst, vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS));
81 src0 += src0_stride;
82 src1 += src1_stride;
83 dst += dst_stride;
84 }
85 } else if (w == 4) {
86 const uint8x8_t m = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)mask));
87 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
88 for (int i = 0; i < h; i += 2) {
89 __builtin_prefetch(src0 + 0 * src0_stride);
90 __builtin_prefetch(src0 + 1 * src0_stride);
91 __builtin_prefetch(src1 + 0 * src1_stride);
92 __builtin_prefetch(src1 + 1 * src1_stride);
93 load_unaligned_u8_4x2(src0, src0_stride, &tmp0_32);
94 tmp0 = vreinterpret_u8_u32(tmp0_32);
95 load_unaligned_u8_4x2(src1, src1_stride, &tmp1_32);
96 tmp1 = vreinterpret_u8_u32(tmp1_32);
97 res = vmull_u8(m, tmp0);
98 res = vmlal_u8(res, max_minus_m, tmp1);
99 vst1_lane_u32(
100 (uint32_t *)(dst + (0 * dst_stride)),
101 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
102 vst1_lane_u32(
103 (uint32_t *)(dst + (1 * dst_stride)),
104 vreinterpret_u32_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
105 src0 += (2 * src0_stride);
106 src1 += (2 * src1_stride);
107 dst += (2 * dst_stride);
108 }
109 } else if (w == 2) {
110 const uint8x8_t m = vreinterpret_u8_u16(vld1_dup_u16((uint16_t *)mask));
111 const uint8x8_t max_minus_m = vsub_u8(vdup_64, m);
112 for (int i = 0; i < h; i += 2) {
113 __builtin_prefetch(src0 + 0 * src0_stride);
114 __builtin_prefetch(src0 + 1 * src0_stride);
115 __builtin_prefetch(src1 + 0 * src1_stride);
116 __builtin_prefetch(src1 + 1 * src1_stride);
117 load_unaligned_u8_2x2(src0, src0_stride, &tmp0_16);
118 tmp0 = vreinterpret_u8_u16(tmp0_16);
119 load_unaligned_u8_2x2(src1, src1_stride, &tmp1_16);
120 tmp1 = vreinterpret_u8_u16(tmp1_16);
121 res = vmull_u8(m, tmp0);
122 res = vmlal_u8(res, max_minus_m, tmp1);
123 vst1_lane_u16(
124 (uint16_t *)(dst + (0 * dst_stride)),
125 vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 0);
126 vst1_lane_u16(
127 (uint16_t *)(dst + (1 * dst_stride)),
128 vreinterpret_u16_u8(vrshrn_n_u16(res, AOM_BLEND_A64_ROUND_BITS)), 1);
129 src0 += (2 * src0_stride);
130 src1 += (2 * src1_stride);
131 dst += (2 * dst_stride);
132 }
133 }
134 }
135