1 /*
2 * Copyright 2012 The LibYuv Project Authors. All rights reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "libyuv/rotate.h"
12
13 #include "libyuv/convert.h"
14 #include "libyuv/cpu_id.h"
15 #include "libyuv/planar_functions.h"
16 #include "libyuv/row.h"
17 #include "libyuv/scale_row.h" /* for ScaleARGBRowDownEven_ */
18
19 #ifdef __cplusplus
20 namespace libyuv {
21 extern "C" {
22 #endif
23
ARGBTranspose(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)24 static void ARGBTranspose(const uint8_t* src_argb,
25 int src_stride_argb,
26 uint8_t* dst_argb,
27 int dst_stride_argb,
28 int width,
29 int height) {
30 int i;
31 int src_pixel_step = src_stride_argb >> 2;
32 void (*ScaleARGBRowDownEven)(
33 const uint8_t* src_argb, ptrdiff_t src_stride_argb, int src_step,
34 uint8_t* dst_argb, int dst_width) = ScaleARGBRowDownEven_C;
35 #if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
36 if (TestCpuFlag(kCpuHasSSE2)) {
37 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_SSE2;
38 if (IS_ALIGNED(height, 4)) { // Width of dest.
39 ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
40 }
41 }
42 #endif
43 #if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
44 if (TestCpuFlag(kCpuHasNEON)) {
45 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_NEON;
46 if (IS_ALIGNED(height, 4)) { // Width of dest.
47 ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
48 }
49 }
50 #endif
51 #if defined(HAS_SCALEARGBROWDOWNEVEN_MSA)
52 if (TestCpuFlag(kCpuHasMSA)) {
53 ScaleARGBRowDownEven = ScaleARGBRowDownEven_Any_MSA;
54 if (IS_ALIGNED(height, 4)) { // Width of dest.
55 ScaleARGBRowDownEven = ScaleARGBRowDownEven_MSA;
56 }
57 }
58 #endif
59
60 for (i = 0; i < width; ++i) { // column of source to row of dest.
61 ScaleARGBRowDownEven(src_argb, 0, src_pixel_step, dst_argb, height);
62 dst_argb += dst_stride_argb;
63 src_argb += 4;
64 }
65 }
66
ARGBRotate90(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)67 void ARGBRotate90(const uint8_t* src_argb,
68 int src_stride_argb,
69 uint8_t* dst_argb,
70 int dst_stride_argb,
71 int width,
72 int height) {
73 // Rotate by 90 is a ARGBTranspose with the source read
74 // from bottom to top. So set the source pointer to the end
75 // of the buffer and flip the sign of the source stride.
76 src_argb += src_stride_argb * (height - 1);
77 src_stride_argb = -src_stride_argb;
78 ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
79 height);
80 }
81
ARGBRotate270(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)82 void ARGBRotate270(const uint8_t* src_argb,
83 int src_stride_argb,
84 uint8_t* dst_argb,
85 int dst_stride_argb,
86 int width,
87 int height) {
88 // Rotate by 270 is a ARGBTranspose with the destination written
89 // from bottom to top. So set the destination pointer to the end
90 // of the buffer and flip the sign of the destination stride.
91 dst_argb += dst_stride_argb * (width - 1);
92 dst_stride_argb = -dst_stride_argb;
93 ARGBTranspose(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
94 height);
95 }
96
ARGBRotate180(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height)97 void ARGBRotate180(const uint8_t* src_argb,
98 int src_stride_argb,
99 uint8_t* dst_argb,
100 int dst_stride_argb,
101 int width,
102 int height) {
103 // Swap first and last row and mirror the content. Uses a temporary row.
104 align_buffer_64(row, width * 4);
105 const uint8_t* src_bot = src_argb + src_stride_argb * (height - 1);
106 uint8_t* dst_bot = dst_argb + dst_stride_argb * (height - 1);
107 int half_height = (height + 1) >> 1;
108 int y;
109 void (*ARGBMirrorRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
110 ARGBMirrorRow_C;
111 void (*CopyRow)(const uint8_t* src_argb, uint8_t* dst_argb, int width) =
112 CopyRow_C;
113 #if defined(HAS_ARGBMIRRORROW_NEON)
114 if (TestCpuFlag(kCpuHasNEON)) {
115 ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
116 if (IS_ALIGNED(width, 4)) {
117 ARGBMirrorRow = ARGBMirrorRow_NEON;
118 }
119 }
120 #endif
121 #if defined(HAS_ARGBMIRRORROW_SSE2)
122 if (TestCpuFlag(kCpuHasSSE2)) {
123 ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
124 if (IS_ALIGNED(width, 4)) {
125 ARGBMirrorRow = ARGBMirrorRow_SSE2;
126 }
127 }
128 #endif
129 #if defined(HAS_ARGBMIRRORROW_AVX2)
130 if (TestCpuFlag(kCpuHasAVX2)) {
131 ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
132 if (IS_ALIGNED(width, 8)) {
133 ARGBMirrorRow = ARGBMirrorRow_AVX2;
134 }
135 }
136 #endif
137 #if defined(HAS_ARGBMIRRORROW_MSA)
138 if (TestCpuFlag(kCpuHasMSA)) {
139 ARGBMirrorRow = ARGBMirrorRow_Any_MSA;
140 if (IS_ALIGNED(width, 16)) {
141 ARGBMirrorRow = ARGBMirrorRow_MSA;
142 }
143 }
144 #endif
145 #if defined(HAS_COPYROW_SSE2)
146 if (TestCpuFlag(kCpuHasSSE2)) {
147 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
148 }
149 #endif
150 #if defined(HAS_COPYROW_AVX)
151 if (TestCpuFlag(kCpuHasAVX)) {
152 CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
153 }
154 #endif
155 #if defined(HAS_COPYROW_ERMS)
156 if (TestCpuFlag(kCpuHasERMS)) {
157 CopyRow = CopyRow_ERMS;
158 }
159 #endif
160 #if defined(HAS_COPYROW_NEON)
161 if (TestCpuFlag(kCpuHasNEON)) {
162 CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
163 }
164 #endif
165
166 // Odd height will harmlessly mirror the middle row twice.
167 for (y = 0; y < half_height; ++y) {
168 ARGBMirrorRow(src_argb, row, width); // Mirror first row into a buffer
169 ARGBMirrorRow(src_bot, dst_argb, width); // Mirror last row into first row
170 CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
171 src_argb += src_stride_argb;
172 dst_argb += dst_stride_argb;
173 src_bot -= src_stride_argb;
174 dst_bot -= dst_stride_argb;
175 }
176 free_aligned_buffer_64(row);
177 }
178
179 LIBYUV_API
ARGBRotate(const uint8_t * src_argb,int src_stride_argb,uint8_t * dst_argb,int dst_stride_argb,int width,int height,enum RotationMode mode)180 int ARGBRotate(const uint8_t* src_argb,
181 int src_stride_argb,
182 uint8_t* dst_argb,
183 int dst_stride_argb,
184 int width,
185 int height,
186 enum RotationMode mode) {
187 if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
188 return -1;
189 }
190
191 // Negative height means invert the image.
192 if (height < 0) {
193 height = -height;
194 src_argb = src_argb + (height - 1) * src_stride_argb;
195 src_stride_argb = -src_stride_argb;
196 }
197
198 switch (mode) {
199 case kRotate0:
200 // copy frame
201 return ARGBCopy(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
202 width, height);
203 case kRotate90:
204 ARGBRotate90(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
205 height);
206 return 0;
207 case kRotate270:
208 ARGBRotate270(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
209 height);
210 return 0;
211 case kRotate180:
212 ARGBRotate180(src_argb, src_stride_argb, dst_argb, dst_stride_argb, width,
213 height);
214 return 0;
215 default:
216 break;
217 }
218 return -1;
219 }
220
221 #ifdef __cplusplus
222 } // extern "C"
223 } // namespace libyuv
224 #endif
225