1 /*
2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/rotate.h"
12
13 #include "libyuv/convert.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/planar_functions.h"
16 #include "libyuv/row.h"
17 #include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */
18
19 #ifdef __cplusplus
20 namespace libyuv {
21 extern "C" {
22 #endif
23
ARGBTranspose(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)24 static int ARGBTranspose(const uint8_t* src_argb,
25 int src_stride_argb,
26 uint8_t* dst_argb,
27 int dst_stride_argb,
28 int width,
29 int height) {
30 int i;
31 int src_pixel_step = src_stride_argb >> 2;
32 void (*ScaleARGBRowDownEven)(
33 const uint8_t* src_argb, ptrdiff_t src_stride_argb, int src_step,
34 uint8_t* dst_argb, int dst_width) = ScaleARGBRowDownEven_C;
35 // Check stride is a multiple of 4.
36 if (src_stride_argb & 3) {
37 return -1;
38 }
39 #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
40 if (TestCpuFlag(kCpuHasSSE2)) {
41 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_SSE2;
42 if (IS_ALIGNED(height, 4)) { // Width of dest.
43 ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
44 }
45 }
46 #endif
47 #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
48 if (TestCpuFlag(kCpuHasNEON)) {
49 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_NEON;
50 if (IS_ALIGNED(height, 4)) { // Width of dest.
51 ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
52 }
53 }
54 #endif
55 #if defined(HAS_SCALEARGBROWDOWNEVEN_MMI)
56 if (TestCpuFlag(kCpuHasMMI)) {
57 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MMI;
58 if (IS_ALIGNED(height, 4)) { // Width of dest.
59 ScaleARGBRowDownEven = ScaleARGBRowDownEven_MMI;
60 }
61 }
62 #endif
63 #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
64 if (TestCpuFlag(kCpuHasMSA)) {
65 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA;
66 if (IS_ALIGNED(height, 4)) { // Width of dest.
67 ScaleARGBRowDownEven = ScaleARGBRowDownEven_MSA;
68 }
69 }
70 #endif
71
72 for (i = 0; i < width; ++i) { // column of source to row of dest.
73 ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height);
74 dst_argb += dst_stride_argb;
75 src_argb += 4;
76 }
77 return 0;
78 }
79
ARGBRotate90(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)80 static int ARGBRotate90(const uint8_t* src_argb,
81 int src_stride_argb,
82 uint8_t* dst_argb,
83 int dst_stride_argb,
84 int width,
85 int height) {
86 // Rotate by 90 is a ARGBTranspose with the source read
87 // from bottom to top. So set the source pointer to the end
88 // of the buffer and flip the sign of the source stride.
89 src_argb += src_stride_argb * (height - 1);
90 src_stride_argb = -src_stride_argb;
91 return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
92 width, height);
93 }
94
ARGBRotate270(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)95 static int ARGBRotate270(const uint8_t* src_argb,
96 int src_stride_argb,
97 uint8_t* dst_argb,
98 int dst_stride_argb,
99 int width,
100 int height) {
101 // Rotate by 270 is a ARGBTranspose with the destination written
102 // from bottom to top. So set the destination pointer to the end
103 // of the buffer and flip the sign of the destination stride.
104 dst_argb += dst_stride_argb * (width - 1);
105 dst_stride_argb = -dst_stride_argb;
106 return ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
107 width, height);
108 }
109
ARGBRotate180(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)110 static int ARGBRotate180(const uint8_t* src_argb,
111 int src_stride_argb,
112 uint8_t* dst_argb,
113 int dst_stride_argb,
114 int width,
115 int height) {
116 // Swap first and last row and mirror the content. Uses a temporary row.
117 align_buffer_64(row, width * 4);
118 const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1);
119 uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1);
120 int half_height = (height + 1) >> 1;
121 int y;
122 void (*ARGBMirrorRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
123 ARGBMirrorRow_C;
124 void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
125 CopyRow_C;
126 #if defined(HAS_ARGBMIRRORROW_NEON)
127 if (TestCpuFlag(kCpuHasNEON)) {
128 ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
129 if (IS_ALIGNED(width, 8)) {
130 ARGBMirrorRow = ARGBMirrorRow_NEON;
131 }
132 }
133 #endif
134 #if defined(HAS_ARGBMIRRORROW_SSE2)
135 if (TestCpuFlag(kCpuHasSSE2)) {
136 ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
137 if (IS_ALIGNED(width, 4)) {
138 ARGBMirrorRow = ARGBMirrorRow_SSE2;
139 }
140 }
141 #endif
142 #if defined(HAS_ARGBMIRRORROW_AVX2)
143 if (TestCpuFlag(kCpuHasAVX2)) {
144 ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
145 if (IS_ALIGNED(width, 8)) {
146 ARGBMirrorRow = ARGBMirrorRow_AVX2;
147 }
148 }
149 #endif
150 #if defined(HAS_ARGBMIRRORROW_MMI)
151 if (TestCpuFlag(kCpuHasMMI)) {
152 ARGBMirrorRow = ARGBMirrorRow_Any_MMI;
153 if (IS_ALIGNED(width, 2)) {
154 ARGBMirrorRow = ARGBMirrorRow_MMI;
155 }
156 }
157 #endif
158 #if defined(HAS_ARGBMIRRORROW_MSA)
159 if (TestCpuFlag(kCpuHasMSA)) {
160 ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
161 if (IS_ALIGNED(width, 16)) {
162 ARGBMirrorRow = ARGBMirrorRow_MSA;
163 }
164 }
165 #endif
166 #if defined(HAS_COPYROW_SSE2)
167 if (TestCpuFlag(kCpuHasSSE2)) {
168 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
169 }
170 #endif
171 #if defined(HAS_COPYROW_AVX)
172 if (TestCpuFlag(kCpuHasAVX)) {
173 CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
174 }
175 #endif
176 #if defined(HAS_COPYROW_ERMS)
177 if (TestCpuFlag(kCpuHasERMS)) {
178 CopyRow = CopyRow_ERMS;
179 }
180 #endif
181 #if defined(HAS_COPYROW_NEON)
182 if (TestCpuFlag(kCpuHasNEON)) {
183 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
184 }
185 #endif
186
187 // Odd height will harmlessly mirror the middle row twice.
188 for (y = 0; y < half_height; ++y) {
189 ARGBMirrorRow(src_argb, row, width); // Mirror first row into a buffer
190 ARGBMirrorRow(src_bot, dst_argb, width); // Mirror last row into first row
191 CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
192 src_argb += src_stride_argb;
193 dst_argb += dst_stride_argb;
194 src_bot -= src_stride_argb;
195 dst_bot -= dst_stride_argb;
196 }
197 free_aligned_buffer_64(row);
198 return 0;
199 }
200
201 LIBYUV_API
ARGBRotate(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height,enum RotationMode mode)202 int ARGBRotate(const uint8_t* src_argb,
203 int src_stride_argb,
204 uint8_t* dst_argb,
205 int dst_stride_argb,
206 int width,
207 int height,
208 enum RotationMode mode) {
209 if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
210 return -1;
211 }
212
213 // Negative height means invert the image.
214 if (height < 0) {
215 height = -height;
216 src_argb = src_argb + (height - 1) * src_stride_argb;
217 src_stride_argb = -src_stride_argb;
218 }
219
220 switch (mode) {
221 case kRotate0:
222 // copy frame
223 return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
224 width, height);
225 case kRotate90:
226 return ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
227 width, height);
228 case kRotate270:
229 return ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
230 width, height);
231 case kRotate180:
232 return ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
233 width, height);
234 default:
235 break;
236 }
237 return -1;
238 }
239
240 #ifdef __cplusplus
241 } // extern "C"
242 } // namespace libyuv
243 #endif
244