1 /* ----------------------------------------------------------------------
2 * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
3 *
4 * $Date:        12. March 2014
5 * $Revision: 	V1.4.4
6 *
7 * Project: 	    CMSIS DSP Library
8 * Title:		arm_shift_q15.c
9 *
10 * Description:	Shifts the elements of a Q15 vector by a specified number of bits.
11 *
12 * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 *   - Redistributions of source code must retain the above copyright
18 *     notice, this list of conditions and the following disclaimer.
19 *   - Redistributions in binary form must reproduce the above copyright
20 *     notice, this list of conditions and the following disclaimer in
21 *     the documentation and/or other materials provided with the
22 *     distribution.
23 *   - Neither the name of ARM LIMITED nor the names of its contributors
24 *     may be used to endorse or promote products derived from this
25 *     software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
34 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
35 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
37 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 * -------------------------------------------------------------------- */
40 
41 #include "arm_math.h"
42 
43 /**
44  * @ingroup groupMath
45  */
46 
47 /**
48  * @addtogroup shift
49  * @{
50  */
51 
52 /**
53  * @brief  Shifts the elements of a Q15 vector a specified number of bits.
54  * @param[in]  *pSrc points to the input vector
55  * @param[in]  shiftBits number of bits to shift.  A positive value shifts left; a negative value shifts right.
56  * @param[out]  *pDst points to the output vector
57  * @param[in]  blockSize number of samples in the vector
58  * @return none.
59  *
60  * <b>Scaling and Overflow Behavior:</b>
61  * \par
62  * The function uses saturating arithmetic.
63  * Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated.
64  */
65 
arm_shift_q15(q15_t * pSrc,int8_t shiftBits,q15_t * pDst,uint32_t blockSize)66 void arm_shift_q15(
67   q15_t * pSrc,
68   int8_t shiftBits,
69   q15_t * pDst,
70   uint32_t blockSize)
71 {
72   uint32_t blkCnt;                               /* loop counter */
73   uint8_t sign;                                  /* Sign of shiftBits */
74 
75 #ifndef ARM_MATH_CM0_FAMILY
76 
77 /* Run the below code for Cortex-M4 and Cortex-M3 */
78 
79   q15_t in1, in2;                                /* Temporary variables */
80 
81 
82   /*loop Unrolling */
83   blkCnt = blockSize >> 2u;
84 
85   /* Getting the sign of shiftBits */
86   sign = (shiftBits & 0x80);
87 
88   /* If the shift value is positive then do right shift else left shift */
89   if(sign == 0u)
90   {
91     /* First part of the processing with loop unrolling.  Compute 4 outputs at a time.
92      ** a second loop below computes the remaining 1 to 3 samples. */
93     while(blkCnt > 0u)
94     {
95       /* Read 2 inputs */
96       in1 = *pSrc++;
97       in2 = *pSrc++;
98       /* C = A << shiftBits */
99       /* Shift the inputs and then store the results in the destination buffer. */
100 #ifndef  ARM_MATH_BIG_ENDIAN
101 
102       *__SIMD32(pDst)++ = __PKHBT(__SSAT((in1 << shiftBits), 16),
103                                   __SSAT((in2 << shiftBits), 16), 16);
104 
105 #else
106 
107       *__SIMD32(pDst)++ = __PKHBT(__SSAT((in2 << shiftBits), 16),
108                                   __SSAT((in1 << shiftBits), 16), 16);
109 
110 #endif /* #ifndef  ARM_MATH_BIG_ENDIAN    */
111 
112       in1 = *pSrc++;
113       in2 = *pSrc++;
114 
115 #ifndef  ARM_MATH_BIG_ENDIAN
116 
117       *__SIMD32(pDst)++ = __PKHBT(__SSAT((in1 << shiftBits), 16),
118                                   __SSAT((in2 << shiftBits), 16), 16);
119 
120 #else
121 
122       *__SIMD32(pDst)++ = __PKHBT(__SSAT((in2 << shiftBits), 16),
123                                   __SSAT((in1 << shiftBits), 16), 16);
124 
125 #endif /* #ifndef  ARM_MATH_BIG_ENDIAN    */
126 
127       /* Decrement the loop counter */
128       blkCnt--;
129     }
130 
131     /* If the blockSize is not a multiple of 4, compute any remaining output samples here.
132      ** No loop unrolling is used. */
133     blkCnt = blockSize % 0x4u;
134 
135     while(blkCnt > 0u)
136     {
137       /* C = A << shiftBits */
138       /* Shift and then store the results in the destination buffer. */
139       *pDst++ = __SSAT((*pSrc++ << shiftBits), 16);
140 
141       /* Decrement the loop counter */
142       blkCnt--;
143     }
144   }
145   else
146   {
147     /* First part of the processing with loop unrolling.  Compute 4 outputs at a time.
148      ** a second loop below computes the remaining 1 to 3 samples. */
149     while(blkCnt > 0u)
150     {
151       /* Read 2 inputs */
152       in1 = *pSrc++;
153       in2 = *pSrc++;
154 
155       /* C = A >> shiftBits */
156       /* Shift the inputs and then store the results in the destination buffer. */
157 #ifndef  ARM_MATH_BIG_ENDIAN
158 
159       *__SIMD32(pDst)++ = __PKHBT((in1 >> -shiftBits),
160                                   (in2 >> -shiftBits), 16);
161 
162 #else
163 
164       *__SIMD32(pDst)++ = __PKHBT((in2 >> -shiftBits),
165                                   (in1 >> -shiftBits), 16);
166 
167 #endif /* #ifndef  ARM_MATH_BIG_ENDIAN    */
168 
169       in1 = *pSrc++;
170       in2 = *pSrc++;
171 
172 #ifndef  ARM_MATH_BIG_ENDIAN
173 
174       *__SIMD32(pDst)++ = __PKHBT((in1 >> -shiftBits),
175                                   (in2 >> -shiftBits), 16);
176 
177 #else
178 
179       *__SIMD32(pDst)++ = __PKHBT((in2 >> -shiftBits),
180                                   (in1 >> -shiftBits), 16);
181 
182 #endif /* #ifndef  ARM_MATH_BIG_ENDIAN    */
183 
184       /* Decrement the loop counter */
185       blkCnt--;
186     }
187 
188     /* If the blockSize is not a multiple of 4, compute any remaining output samples here.
189      ** No loop unrolling is used. */
190     blkCnt = blockSize % 0x4u;
191 
192     while(blkCnt > 0u)
193     {
194       /* C = A >> shiftBits */
195       /* Shift the inputs and then store the results in the destination buffer. */
196       *pDst++ = (*pSrc++ >> -shiftBits);
197 
198       /* Decrement the loop counter */
199       blkCnt--;
200     }
201   }
202 
203 #else
204 
205   /* Run the below code for Cortex-M0 */
206 
207   /* Getting the sign of shiftBits */
208   sign = (shiftBits & 0x80);
209 
210   /* If the shift value is positive then do right shift else left shift */
211   if(sign == 0u)
212   {
213     /* Initialize blkCnt with number of samples */
214     blkCnt = blockSize;
215 
216     while(blkCnt > 0u)
217     {
218       /* C = A << shiftBits */
219       /* Shift and then store the results in the destination buffer. */
220       *pDst++ = __SSAT(((q31_t) * pSrc++ << shiftBits), 16);
221 
222       /* Decrement the loop counter */
223       blkCnt--;
224     }
225   }
226   else
227   {
228     /* Initialize blkCnt with number of samples */
229     blkCnt = blockSize;
230 
231     while(blkCnt > 0u)
232     {
233       /* C = A >> shiftBits */
234       /* Shift the inputs and then store the results in the destination buffer. */
235       *pDst++ = (*pSrc++ >> -shiftBits);
236 
237       /* Decrement the loop counter */
238       blkCnt--;
239     }
240   }
241 
242 #endif /* #ifndef ARM_MATH_CM0_FAMILY */
243 
244 }
245 
246 /**
247  * @} end of shift group
248  */
249