1 /* ------------------------------------------------------------------
2  * Copyright (C) 1998-2010 PacketVideo
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
13  * express or implied.
14  * See the License for the specific language governing permissions
15  * and limitations under the License.
16  * -------------------------------------------------------------------
17  */
18 /****************************************************************************************
19 Portions of this file are derived from the following 3GPP standard:
20 
21     3GPP TS 26.173
22     ANSI-C code for the Adaptive Multi-Rate - Wideband (AMR-WB) speech codec
23     Available from http://www.3gpp.org
24 
25 (C) 2007, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TTA, TTC)
26 Permission to distribute, modify and use this file under the standard license
27 terms listed above has been obtained from the copyright holder.
28 ****************************************************************************************/
29 /*
30 ------------------------------------------------------------------------------
31 
32 
33 
34  Pathname: ./src/pvamrwbdecoder_basic_op_gcc_armv5.h
35 
36 ------------------------------------------------------------------------------
37  INCLUDE DESCRIPTION
38 
39 ------------------------------------------------------------------------------
40 */
41 
42 #ifndef PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H
43 #define PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H
44 
45 
46 #ifdef __cplusplus
47 extern "C"
48 {
49 #endif
50 
51 
sub_int16(int16 var1,int16 var2)52     static inline int16 sub_int16(int16 var1, int16 var2)
53     {
54         register int32 L_var_out;
55         register int32 L_var_aux;
56         register int32 ra = (int32)var1;
57         register int32 rb = (int32)var2;
58 
59         asm volatile(
60             "mov  %0, %2, lsl #16\n"
61             "mov  %1, %3, lsl #16\n"
62             "qsub %0, %0, %1\n"
63             "mov  %0, %0, asr #16"
64     : "=&r*i"(L_var_out),
65             "=&r*i"(L_var_aux)
66                     : "r"(ra),
67                     "r"(rb));
68 
69         return (int16)L_var_out;
70 
71     }
72 
add_int16(int16 var1,int16 var2)73     static inline int16 add_int16(int16 var1, int16 var2)
74 {
75         register int32 L_var_out;
76         register int32 L_var_aux;
77         register int32 ra = (int32)var1;
78         register int32 rb = (int32)var2;
79 
80         asm volatile(
81             "mov  %0, %2, lsl #16\n"
82             "mov  %1, %3, lsl #16\n"
83             "qadd %0, %0, %1\n"
84             "mov  %0, %0, asr #16"
85     : "=&r*i"(L_var_out),
86             "=&r*i"(L_var_aux)
87                     : "r"(ra),
88                     "r"(rb));
89 
90         return (int16)L_var_out;
91 
92     }
93 
mul_32by16(int16 hi,int16 lo,int16 n)94     static inline  int32 mul_32by16(int16 hi, int16 lo, int16 n)
95 {
96         register int32 H_32;
97         register int32 L_32;
98         register int32 ra = (int32)hi;
99         register int32 rb = (int32)lo;
100         register int32 rc = (int32)n;
101 
102 
103         asm volatile(
104             "smulbb %0, %2, %4\n"
105             "smulbb %1, %3, %4\n"
106             "add    %0, %0, %1, asr #15\n"
107             "qadd   %0, %0, %0"
108     : "=&r*i"(H_32),
109             "=&r*i"(L_32)
110                     : "r"(ra),
111                     "r"(rb),
112                     "r"(rc));
113 
114         return H_32;
115     }
116 
117 
sub_int32(int32 L_var1,int32 L_var2)118     static inline int32 sub_int32(int32 L_var1, int32 L_var2)
119 {
120         register int32 L_var_out;
121         register int32 ra = L_var1;
122         register int32 rb = L_var2;
123 
124         asm volatile(
125             "qsub %0, %1, %2"
126     : "=&r*i"(L_var_out)
127                     : "r"(ra),
128                     "r"(rb));
129 
130         return L_var_out;
131     }
132 
add_int32(int32 L_var1,int32 L_var2)133     static inline int32 add_int32(int32 L_var1, int32 L_var2)
134 {
135         register int32 L_var_out;
136         register int32 ra = L_var1;
137         register int32 rb = L_var2;
138 
139         asm volatile(
140             "qadd %0, %1, %2"
141     : "=&r*i"(L_var_out)
142                     : "r"(ra),
143                     "r"(rb));
144 
145         return L_var_out;
146     }
147 
msu_16by16_from_int32(int32 L_var3,int16 var1,int16 var2)148     static inline int32 msu_16by16_from_int32(int32 L_var3, int16 var1, int16 var2)
149 {
150         register int32 L_var_out;
151         register int32 ra = (int32)var1;
152         register int32 rb = (int32)var2;
153         register int32 rc = L_var3;
154 
155         asm volatile(
156             "smulbb %0, %1, %2\n"
157             "qdsub %0, %3, %0"
158     : "=&r*i"(L_var_out)
159                     : "r"(ra),
160                     "r"(rb),
161                     "r"(rc));
162 
163         return L_var_out;
164     }
165 
166 
mac_16by16_to_int32(int32 L_var3,int16 var1,int16 var2)167     static inline int32 mac_16by16_to_int32(int32 L_var3, int16 var1, int16 var2)
168 {
169         register int32 L_var_out;
170         register int32 ra = (int32)var1;
171         register int32 rb = (int32)var2;
172         register int32 rc = L_var3;
173 
174         asm volatile(
175             "smulbb %0, %1, %2\n"
176             "qdadd %0, %3, %0"
177     : "=&r*i"(L_var_out)
178                     : "r"(ra),
179                     "r"(rb),
180                     "r"(rc));
181 
182         return L_var_out;
183     }
184 
185 
mul_16by16_to_int32(int16 var1,int16 var2)186     static inline  int32 mul_16by16_to_int32(int16 var1, int16 var2)
187 {
188         register int32 L_var_out;
189         register int32 ra = (int32)var1;
190         register int32 rb = (int32)var2;
191 
192         asm volatile(
193             "smulbb %0, %1, %2\n"
194             "qadd %0, %0, %0"
195     : "=&r*i"(L_var_out)
196                     : "r"(ra),
197                     "r"(rb));
198 
199         return L_var_out;
200     }
201 
202 
mult_int16(int16 var1,int16 var2)203     static inline int16 mult_int16(int16 var1, int16 var2)
204 {
205         register int32 L_var_out;
206         register int32 ra = (int32)var1;
207         register int32 rb = (int32)var2;
208 
209         asm volatile(
210             "smulbb %0, %1, %2\n"
211             "mov %0, %0, asr #15"
212     : "=&r*i"(L_var_out)
213                     : "r"(ra),
214                     "r"(rb));
215 
216         return (int16)L_var_out;
217     }
218 
amr_wb_round(int32 L_var1)219     static inline int16 amr_wb_round(int32 L_var1)
220 {
221         register int32 L_var_out;
222         register int32 ra = (int32)L_var1;
223         register int32 rb = (int32)0x00008000L;
224 
225         asm volatile(
226             "qadd %0, %1, %2\n"
227             "mov %0, %0, asr #16"
228     : "=&r*i"(L_var_out)
229                     : "r"(ra),
230                     "r"(rb));
231         return (int16)L_var_out;
232     }
233 
amr_wb_shl1_round(int32 L_var1)234     static inline int16 amr_wb_shl1_round(int32 L_var1)
235 {
236         register int32 L_var_out;
237         register int32 ra = (int32)L_var1;
238         register int32 rb = (int32)0x00008000L;
239 
240         asm volatile(
241             "qadd %0, %1, %1\n"
242             "qadd %0, %0, %2\n"
243             "mov %0, %0, asr #16"
244     : "=&r*i"(L_var_out)
245                     : "r"(ra),
246                     "r"(rb));
247         return (int16)L_var_out;
248     }
249 
250 
fxp_mac_16by16(const int16 L_var1,const int16 L_var2,int32 L_add)251     static inline int32 fxp_mac_16by16(const int16 L_var1, const int16 L_var2, int32 L_add)
252 {
253         register int32 tmp;
254         register int32 ra = (int32)L_var1;
255         register int32 rb = (int32)L_var2;
256         register int32 rc = (int32)L_add;
257 
258         asm volatile(
259             "smlabb %0, %1, %2, %3"
260     : "=&r*i"(tmp)
261                     : "r"(ra),
262                     "r"(rb),
263                     "r"(rc));
264         return (tmp);
265     }
266 
fxp_mul_16by16bb(int16 L_var1,const int16 L_var2)267     static inline int32 fxp_mul_16by16bb(int16 L_var1, const int16 L_var2)
268 {
269         register int32 tmp;
270         register int32 ra = (int32)L_var1;
271         register int32 rb = (int32)L_var2;
272 
273         asm volatile(
274             "smulbb %0, %1, %2"
275     : "=&r*i"(tmp)
276                     : "r"(ra),
277                     "r"(rb));
278         return (tmp);
279     }
280 
281 
282 #define fxp_mul_16by16(a, b)  fxp_mul_16by16bb(  a, b)
283 
284 
fxp_mul32_by_16(int32 L_var1,const int32 L_var2)285     static inline int32 fxp_mul32_by_16(int32 L_var1, const int32 L_var2)
286 {
287         register int32 tmp;
288         register int32 ra = (int32)L_var1;
289         register int32 rb = (int32)L_var2;
290 
291         asm volatile(
292             "smulwb %0, %1, %2"
293     : "=&r*i"(tmp)
294                     : "r"(ra),
295                     "r"(rb));
296         return (tmp);
297     }
298 
299 #define fxp_mul32_by_16b( a, b)   fxp_mul32_by_16( a, b)
300 
301 
302 
303 #ifdef __cplusplus
304 }
305 #endif
306 
307 
308 
309 
310 #endif   /*  PVAMRWBDECODER_BASIC_OP_GCC_ARMV5_H  */
311 
312