1 /* Copyright (C) 2002 Jean-Marc Valin
2 File: filters.c
3 Various analysis/synthesis filters
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
7 are met:
8
9 - Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 - Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 - Neither the name of the Xiph.org Foundation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
24 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifdef HAVE_CONFIG_H
34 #include "config.h"
35 #endif
36
37 #include "filters.h"
38 #include "stack_alloc.h"
39 #include "misc.h"
40 #include "math_approx.h"
41 #include "ltp.h"
42 #include <math.h>
43
44 #ifdef _USE_SSE
45 #include "filters_sse.h"
46 #elif defined (ARM4_ASM) || defined(ARM5E_ASM)
47 #include "filters_arm4.h"
48 #elif defined (BFIN_ASM)
49 #include "filters_bfin.h"
50 #endif
51
52
53
bw_lpc(spx_word16_t gamma,const spx_coef_t * lpc_in,spx_coef_t * lpc_out,int order)54 void bw_lpc(spx_word16_t gamma, const spx_coef_t *lpc_in, spx_coef_t *lpc_out, int order)
55 {
56 int i;
57 spx_word16_t tmp=gamma;
58 for (i=0;i<order;i++)
59 {
60 lpc_out[i] = MULT16_16_P15(tmp,lpc_in[i]);
61 tmp = MULT16_16_P15(tmp, gamma);
62 }
63 }
64
65
66 #ifdef FIXED_POINT
67
68 /* FIXME: These functions are ugly and probably introduce too much error */
signal_mul(const spx_sig_t * x,spx_sig_t * y,spx_word32_t scale,int len)69 void signal_mul(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
70 {
71 int i;
72 for (i=0;i<len;i++)
73 {
74 y[i] = SHL32(MULT16_32_Q14(EXTRACT16(SHR32(x[i],7)),scale),7);
75 }
76 }
77
signal_div(const spx_sig_t * x,spx_sig_t * y,spx_word32_t scale,int len)78 void signal_div(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
79 {
80 int i;
81 if (scale > SHL32(EXTEND32(SIG_SCALING), 8))
82 {
83 spx_word16_t scale_1;
84 scale = PSHR32(scale, SIG_SHIFT);
85 scale_1 = EXTRACT16(DIV32_16(SHL32(EXTEND32(SIG_SCALING),7),scale));
86 for (i=0;i<len;i++)
87 {
88 y[i] = SHR32(MULT16_16(scale_1, EXTRACT16(SHR32(x[i],SIG_SHIFT))),7);
89 }
90 } else {
91 spx_word16_t scale_1;
92 scale = PSHR32(scale, SIG_SHIFT-5);
93 scale_1 = DIV32_16(SHL32(EXTEND32(SIG_SCALING),3),scale);
94 for (i=0;i<len;i++)
95 {
96 y[i] = MULT16_16(scale_1, EXTRACT16(SHR32(x[i],SIG_SHIFT-2)));
97 }
98 }
99 }
100
101 #else
102
signal_mul(const spx_sig_t * x,spx_sig_t * y,spx_word32_t scale,int len)103 void signal_mul(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
104 {
105 int i;
106 for (i=0;i<len;i++)
107 y[i] = scale*x[i];
108 }
109
signal_div(const spx_sig_t * x,spx_sig_t * y,spx_word32_t scale,int len)110 void signal_div(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
111 {
112 int i;
113 float scale_1 = 1/scale;
114 for (i=0;i<len;i++)
115 y[i] = scale_1*x[i];
116 }
117 #endif
118
119
120
121 #ifdef FIXED_POINT
122
123
124
compute_rms(const spx_sig_t * x,int len)125 spx_word16_t compute_rms(const spx_sig_t *x, int len)
126 {
127 int i;
128 spx_word32_t sum=0;
129 spx_sig_t max_val=1;
130 int sig_shift;
131
132 for (i=0;i<len;i++)
133 {
134 spx_sig_t tmp = x[i];
135 if (tmp<0)
136 tmp = -tmp;
137 if (tmp > max_val)
138 max_val = tmp;
139 }
140
141 sig_shift=0;
142 while (max_val>16383)
143 {
144 sig_shift++;
145 max_val >>= 1;
146 }
147
148 for (i=0;i<len;i+=4)
149 {
150 spx_word32_t sum2=0;
151 spx_word16_t tmp;
152 tmp = EXTRACT16(SHR32(x[i],sig_shift));
153 sum2 = MAC16_16(sum2,tmp,tmp);
154 tmp = EXTRACT16(SHR32(x[i+1],sig_shift));
155 sum2 = MAC16_16(sum2,tmp,tmp);
156 tmp = EXTRACT16(SHR32(x[i+2],sig_shift));
157 sum2 = MAC16_16(sum2,tmp,tmp);
158 tmp = EXTRACT16(SHR32(x[i+3],sig_shift));
159 sum2 = MAC16_16(sum2,tmp,tmp);
160 sum = ADD32(sum,SHR32(sum2,6));
161 }
162
163 return EXTRACT16(SHR32(SHL32(EXTEND32(spx_sqrt(1+DIV32(sum,len))),(sig_shift+3)),SIG_SHIFT));
164 }
165
166
167 #ifndef OVERRIDE_NORMALIZE16
normalize16(const spx_sig_t * x,spx_word16_t * y,spx_sig_t max_scale,int len)168 int normalize16(const spx_sig_t *x, spx_word16_t *y, spx_sig_t max_scale, int len)
169 {
170 int i;
171 spx_sig_t max_val=1;
172 int sig_shift;
173
174 for (i=0;i<len;i++)
175 {
176 spx_sig_t tmp = x[i];
177 if (tmp<0)
178 tmp = NEG32(tmp);
179 if (tmp >= max_val)
180 max_val = tmp;
181 }
182
183 sig_shift=0;
184 while (max_val>max_scale)
185 {
186 sig_shift++;
187 max_val >>= 1;
188 }
189
190 for (i=0;i<len;i++)
191 y[i] = EXTRACT16(SHR32(x[i], sig_shift));
192
193 return sig_shift;
194 }
195 #endif
196
197 #else
198
compute_rms(const spx_sig_t * x,int len)199 spx_word16_t compute_rms(const spx_sig_t *x, int len)
200 {
201 int i;
202 float sum=0;
203 for (i=0;i<len;i++)
204 {
205 sum += x[i]*x[i];
206 }
207 return sqrt(.1+sum/len);
208 }
209 #endif
210
211
212
213 #ifndef OVERRIDE_FILTER_MEM2
214 #ifdef PRECISION16
filter_mem2(const spx_sig_t * x,const spx_coef_t * num,const spx_coef_t * den,spx_sig_t * y,int N,int ord,spx_mem_t * mem)215 void filter_mem2(const spx_sig_t *x, const spx_coef_t *num, const spx_coef_t *den, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
216 {
217 int i,j;
218 spx_word16_t xi,yi,nyi;
219
220 for (i=0;i<N;i++)
221 {
222 xi= EXTRACT16(PSHR32(SATURATE(x[i],536870911),SIG_SHIFT));
223 yi = EXTRACT16(PSHR32(SATURATE(ADD32(x[i], SHL32(mem[0],1)),536870911),SIG_SHIFT));
224 nyi = NEG16(yi);
225 for (j=0;j<ord-1;j++)
226 {
227 mem[j] = MAC16_16(MAC16_16(mem[j+1], num[j],xi), den[j],nyi);
228 }
229 mem[ord-1] = ADD32(MULT16_16(num[ord-1],xi), MULT16_16(den[ord-1],nyi));
230 y[i] = SHL32(EXTEND32(yi),SIG_SHIFT);
231 }
232 }
233 #else
filter_mem2(const spx_sig_t * x,const spx_coef_t * num,const spx_coef_t * den,spx_sig_t * y,int N,int ord,spx_mem_t * mem)234 void filter_mem2(const spx_sig_t *x, const spx_coef_t *num, const spx_coef_t *den, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
235 {
236 int i,j;
237 spx_sig_t xi,yi,nyi;
238
239 for (i=0;i<N;i++)
240 {
241 xi=SATURATE(x[i],805306368);
242 yi = SATURATE(ADD32(xi, SHL32(mem[0],2)),805306368);
243 nyi = NEG32(yi);
244 for (j=0;j<ord-1;j++)
245 {
246 mem[j] = MAC16_32_Q15(MAC16_32_Q15(mem[j+1], num[j],xi), den[j],nyi);
247 }
248 mem[ord-1] = SUB32(MULT16_32_Q15(num[ord-1],xi), MULT16_32_Q15(den[ord-1],yi));
249 y[i] = yi;
250 }
251 }
252 #endif
253 #endif
254
255 #ifndef OVERRIDE_IIR_MEM2
256 #ifdef PRECISION16
iir_mem2(const spx_sig_t * x,const spx_coef_t * den,spx_sig_t * y,int N,int ord,spx_mem_t * mem)257 void iir_mem2(const spx_sig_t *x, const spx_coef_t *den, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
258 {
259 int i,j;
260 spx_word16_t yi,nyi;
261
262 for (i=0;i<N;i++)
263 {
264 yi = EXTRACT16(PSHR32(SATURATE(x[i] + SHL32(mem[0],1),536870911),SIG_SHIFT));
265 nyi = NEG16(yi);
266 for (j=0;j<ord-1;j++)
267 {
268 mem[j] = MAC16_16(mem[j+1],den[j],nyi);
269 }
270 mem[ord-1] = MULT16_16(den[ord-1],nyi);
271 y[i] = SHL32(EXTEND32(yi),SIG_SHIFT);
272 }
273 }
274 #else
iir_mem2(const spx_sig_t * x,const spx_coef_t * den,spx_sig_t * y,int N,int ord,spx_mem_t * mem)275 void iir_mem2(const spx_sig_t *x, const spx_coef_t *den, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
276 {
277 int i,j;
278 spx_word32_t xi,yi,nyi;
279
280 for (i=0;i<N;i++)
281 {
282 xi=SATURATE(x[i],805306368);
283 yi = SATURATE(xi + SHL32(mem[0],2),805306368);
284 nyi = NEG32(yi);
285 for (j=0;j<ord-1;j++)
286 {
287 mem[j] = MAC16_32_Q15(mem[j+1],den[j],nyi);
288 }
289 mem[ord-1] = MULT16_32_Q15(den[ord-1],nyi);
290 y[i] = yi;
291 }
292 }
293 #endif
294 #endif
295
296 #ifndef OVERRIDE_FIR_MEM2
297 #ifdef PRECISION16
fir_mem2(const spx_sig_t * x,const spx_coef_t * num,spx_sig_t * y,int N,int ord,spx_mem_t * mem)298 void fir_mem2(const spx_sig_t *x, const spx_coef_t *num, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
299 {
300 int i,j;
301 spx_word16_t xi,yi;
302
303 for (i=0;i<N;i++)
304 {
305 xi= EXTRACT16(PSHR32(SATURATE(x[i],536870911),SIG_SHIFT));
306 yi = EXTRACT16(PSHR32(SATURATE(x[i] + SHL32(mem[0],1),536870911),SIG_SHIFT));
307 for (j=0;j<ord-1;j++)
308 {
309 mem[j] = MAC16_16(mem[j+1], num[j],xi);
310 }
311 mem[ord-1] = MULT16_16(num[ord-1],xi);
312 y[i] = SHL32(EXTEND32(yi),SIG_SHIFT);
313 }
314 }
315 #else
fir_mem2(const spx_sig_t * x,const spx_coef_t * num,spx_sig_t * y,int N,int ord,spx_mem_t * mem)316 void fir_mem2(const spx_sig_t *x, const spx_coef_t *num, spx_sig_t *y, int N, int ord, spx_mem_t *mem)
317 {
318 int i,j;
319 spx_word32_t xi,yi;
320
321 for (i=0;i<N;i++)
322 {
323 xi=SATURATE(x[i],805306368);
324 yi = xi + SHL32(mem[0],2);
325 for (j=0;j<ord-1;j++)
326 {
327 mem[j] = MAC16_32_Q15(mem[j+1], num[j],xi);
328 }
329 mem[ord-1] = MULT16_32_Q15(num[ord-1],xi);
330 y[i] = SATURATE(yi,805306368);
331 }
332 }
333 #endif
334 #endif
335
336
337
338
339
340
341
342
syn_percep_zero(const spx_sig_t * xx,const spx_coef_t * ak,const spx_coef_t * awk1,const spx_coef_t * awk2,spx_sig_t * y,int N,int ord,char * stack)343 void syn_percep_zero(const spx_sig_t *xx, const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_sig_t *y, int N, int ord, char *stack)
344 {
345 int i;
346 VARDECL(spx_mem_t *mem);
347 ALLOC(mem, ord, spx_mem_t);
348 for (i=0;i<ord;i++)
349 mem[i]=0;
350 iir_mem2(xx, ak, y, N, ord, mem);
351 for (i=0;i<ord;i++)
352 mem[i]=0;
353 filter_mem2(y, awk1, awk2, y, N, ord, mem);
354 }
355
residue_percep_zero(const spx_sig_t * xx,const spx_coef_t * ak,const spx_coef_t * awk1,const spx_coef_t * awk2,spx_sig_t * y,int N,int ord,char * stack)356 void residue_percep_zero(const spx_sig_t *xx, const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_sig_t *y, int N, int ord, char *stack)
357 {
358 int i;
359 VARDECL(spx_mem_t *mem);
360 ALLOC(mem, ord, spx_mem_t);
361 for (i=0;i<ord;i++)
362 mem[i]=0;
363 filter_mem2(xx, ak, awk1, y, N, ord, mem);
364 for (i=0;i<ord;i++)
365 mem[i]=0;
366 fir_mem2(y, awk2, y, N, ord, mem);
367 }
368
369 #ifndef OVERRIDE_COMPUTE_IMPULSE_RESPONSE
compute_impulse_response(const spx_coef_t * ak,const spx_coef_t * awk1,const spx_coef_t * awk2,spx_word16_t * y,int N,int ord,char * stack)370 void compute_impulse_response(const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_word16_t *y, int N, int ord, char *stack)
371 {
372 int i,j;
373 spx_word16_t y1, ny1i, ny2i;
374 VARDECL(spx_mem_t *mem1);
375 VARDECL(spx_mem_t *mem2);
376 ALLOC(mem1, ord, spx_mem_t);
377 ALLOC(mem2, ord, spx_mem_t);
378
379 y[0] = LPC_SCALING;
380 for (i=0;i<ord;i++)
381 y[i+1] = awk1[i];
382 i++;
383 for (;i<N;i++)
384 y[i] = VERY_SMALL;
385
386 for (i=0;i<ord;i++)
387 mem1[i] = mem2[i] = 0;
388 for (i=0;i<N;i++)
389 {
390 y1 = ADD16(y[i], EXTRACT16(PSHR32(mem1[0],LPC_SHIFT)));
391 ny1i = NEG16(y1);
392 y[i] = ADD16(SHL16(y1,1), EXTRACT16(PSHR32(mem2[0],LPC_SHIFT)));
393 ny2i = NEG16(y[i]);
394 for (j=0;j<ord-1;j++)
395 {
396 mem1[j] = MAC16_16(mem1[j+1], awk2[j],ny1i);
397 mem2[j] = MAC16_16(mem2[j+1], ak[j],ny2i);
398 }
399 mem1[ord-1] = MULT16_16(awk2[ord-1],ny1i);
400 mem2[ord-1] = MULT16_16(ak[ord-1],ny2i);
401 }
402 }
403 #endif
404
qmf_decomp(const spx_word16_t * xx,const spx_word16_t * aa,spx_sig_t * y1,spx_sig_t * y2,int N,int M,spx_word16_t * mem,char * stack)405 void qmf_decomp(const spx_word16_t *xx, const spx_word16_t *aa, spx_sig_t *y1, spx_sig_t *y2, int N, int M, spx_word16_t *mem, char *stack)
406 {
407 int i,j,k,M2;
408 VARDECL(spx_word16_t *a);
409 VARDECL(spx_word16_t *x);
410 spx_word16_t *x2;
411
412 ALLOC(a, M, spx_word16_t);
413 ALLOC(x, N+M-1, spx_word16_t);
414 x2=x+M-1;
415 M2=M>>1;
416 for (i=0;i<M;i++)
417 a[M-i-1]= aa[i];
418
419 for (i=0;i<M-1;i++)
420 x[i]=mem[M-i-2];
421 for (i=0;i<N;i++)
422 x[i+M-1]=SATURATE(PSHR(xx[i],1),16383);
423 for (i=0,k=0;i<N;i+=2,k++)
424 {
425 y1[k]=0;
426 y2[k]=0;
427 for (j=0;j<M2;j++)
428 {
429 y1[k]=ADD32(y1[k],SHR(MULT16_16(a[j],ADD16(x[i+j],x2[i-j])),1));
430 y2[k]=SUB32(y2[k],SHR(MULT16_16(a[j],SUB16(x[i+j],x2[i-j])),1));
431 j++;
432 y1[k]=ADD32(y1[k],SHR(MULT16_16(a[j],ADD16(x[i+j],x2[i-j])),1));
433 y2[k]=ADD32(y2[k],SHR(MULT16_16(a[j],SUB16(x[i+j],x2[i-j])),1));
434 }
435 }
436 for (i=0;i<M-1;i++)
437 mem[i]=SATURATE(PSHR(xx[N-i-1],1),16383);
438 }
439
440
441 /* By segher */
fir_mem_up(const spx_sig_t * x,const spx_word16_t * a,spx_sig_t * y,int N,int M,spx_word32_t * mem,char * stack)442 void fir_mem_up(const spx_sig_t *x, const spx_word16_t *a, spx_sig_t *y, int N, int M, spx_word32_t *mem, char *stack)
443 /* assumptions:
444 all odd x[i] are zero -- well, actually they are left out of the array now
445 N and M are multiples of 4 */
446 {
447 int i, j;
448 VARDECL(spx_word16_t *xx);
449
450 ALLOC(xx, M+N-1, spx_word16_t);
451
452 for (i = 0; i < N/2; i++)
453 xx[2*i] = SHR(x[N/2-1-i],SIG_SHIFT+1);
454 for (i = 0; i < M - 1; i += 2)
455 xx[N+i] = mem[i+1];
456
457 for (i = 0; i < N; i += 4) {
458 spx_sig_t y0, y1, y2, y3;
459 spx_word16_t x0;
460
461 y0 = y1 = y2 = y3 = 0;
462 x0 = xx[N-4-i];
463
464 for (j = 0; j < M; j += 4) {
465 spx_word16_t x1;
466 spx_word16_t a0, a1;
467
468 a0 = a[j];
469 a1 = a[j+1];
470 x1 = xx[N-2+j-i];
471
472 y0 = ADD32(y0,SHR(MULT16_16(a0, x1),1));
473 y1 = ADD32(y1,SHR(MULT16_16(a1, x1),1));
474 y2 = ADD32(y2,SHR(MULT16_16(a0, x0),1));
475 y3 = ADD32(y3,SHR(MULT16_16(a1, x0),1));
476
477 a0 = a[j+2];
478 a1 = a[j+3];
479 x0 = xx[N+j-i];
480
481 y0 = ADD32(y0,SHR(MULT16_16(a0, x0),1));
482 y1 = ADD32(y1,SHR(MULT16_16(a1, x0),1));
483 y2 = ADD32(y2,SHR(MULT16_16(a0, x1),1));
484 y3 = ADD32(y3,SHR(MULT16_16(a1, x1),1));
485 }
486 y[i] = y0;
487 y[i+1] = y1;
488 y[i+2] = y2;
489 y[i+3] = y3;
490 }
491
492 for (i = 0; i < M - 1; i += 2)
493 mem[i+1] = xx[i];
494 }
495
filter_dc_notch(spx_sig_t * in,spx_word16_t radius,spx_sig_t * out,int len,spx_mem_t * mem)496 void filter_dc_notch(spx_sig_t *in, spx_word16_t radius, spx_sig_t *out, int len, spx_mem_t *mem)
497 {
498 int i;
499 spx_word16_t num[3], den[3];
500 num[0] = num[2] = 1;
501 num[1] = -2;
502 den[0] = 1;
503 den[1] = -2*radius;
504 den[2] = radius*radius + .7*(1-radius)*(1-radius);
505 for (i=0;i<len;i++)
506 {
507 out[i] = mem[0] + num[0]*in[i];
508 mem[0] = mem[1] + num[1]*in[i] - den[1]*out[i];
509 mem[1] = num[2]*in[i] - den[2]*out[i];
510 }
511 }
512
comb_filter_mem_init(CombFilterMem * mem)513 void comb_filter_mem_init (CombFilterMem *mem)
514 {
515 mem->last_pitch=0;
516 mem->last_pitch_gain[0]=mem->last_pitch_gain[1]=mem->last_pitch_gain[2]=0;
517 mem->smooth_gain=1;
518 }
519
520 #ifdef FIXED_POINT
521 #define COMB_STEP 32767
522 #else
523 #define COMB_STEP 1.0
524 #endif
525
comb_filter(spx_sig_t * exc,spx_sig_t * new_exc,spx_coef_t * ak,int p,int nsf,int pitch,spx_word16_t * pitch_gain,spx_word16_t comb_gain,CombFilterMem * mem)526 void comb_filter(
527 spx_sig_t *exc, /*decoded excitation*/
528 spx_sig_t *new_exc, /*enhanced excitation*/
529 spx_coef_t *ak, /*LPC filter coefs*/
530 int p, /*LPC order*/
531 int nsf, /*sub-frame size*/
532 int pitch, /*pitch period*/
533 spx_word16_t *pitch_gain, /*pitch gain (3-tap)*/
534 spx_word16_t comb_gain, /*gain of comb filter*/
535 CombFilterMem *mem
536 )
537 {
538 int i;
539 spx_word16_t exc_energy=0, new_exc_energy=0;
540 spx_word16_t gain;
541 spx_word16_t step;
542 spx_word16_t fact;
543
544 /*Compute excitation amplitude prior to enhancement*/
545 exc_energy = compute_rms(exc, nsf);
546 /*for (i=0;i<nsf;i++)
547 exc_energy+=((float)exc[i])*exc[i];*/
548
549 /*Some gain adjustment if pitch is too high or if unvoiced*/
550 #ifdef FIXED_POINT
551 {
552 spx_word16_t g = gain_3tap_to_1tap(pitch_gain)+gain_3tap_to_1tap(mem->last_pitch_gain);
553 if (g > 166)
554 comb_gain = MULT16_16_Q15(DIV32_16(SHL32(EXTEND32(165),15),g), comb_gain);
555 if (g < 64)
556 comb_gain = MULT16_16_Q15(SHL16(g, 9), comb_gain);
557 }
558 #else
559 {
560 float g=0;
561 g = GAIN_SCALING_1*.5*(gain_3tap_to_1tap(pitch_gain)+gain_3tap_to_1tap(mem->last_pitch_gain));
562 if (g>1.3)
563 comb_gain*=1.3/g;
564 if (g<.5)
565 comb_gain*=2.*g;
566 }
567 #endif
568 step = DIV32(COMB_STEP, nsf);
569 fact=0;
570
571 /*Apply pitch comb-filter (filter out noise between pitch harmonics)*/
572 for (i=0;i<nsf;i++)
573 {
574 spx_word32_t exc1, exc2;
575
576 fact = ADD16(fact,step);
577
578 exc1 = SHL32(MULT16_32_Q15(SHL16(pitch_gain[0],7),exc[i-pitch+1]) +
579 MULT16_32_Q15(SHL16(pitch_gain[1],7),exc[i-pitch]) +
580 MULT16_32_Q15(SHL16(pitch_gain[2],7),exc[i-pitch-1]) , 2);
581 exc2 = SHL32(MULT16_32_Q15(SHL16(mem->last_pitch_gain[0],7),exc[i-mem->last_pitch+1]) +
582 MULT16_32_Q15(SHL16(mem->last_pitch_gain[1],7),exc[i-mem->last_pitch]) +
583 MULT16_32_Q15(SHL16(mem->last_pitch_gain[2],7),exc[i-mem->last_pitch-1]),2);
584
585 new_exc[i] = exc[i] + MULT16_32_Q15(comb_gain, ADD32(MULT16_32_Q15(fact,exc1), MULT16_32_Q15(SUB16(COMB_STEP,fact), exc2)));
586 }
587
588 mem->last_pitch_gain[0] = pitch_gain[0];
589 mem->last_pitch_gain[1] = pitch_gain[1];
590 mem->last_pitch_gain[2] = pitch_gain[2];
591 mem->last_pitch = pitch;
592
593 /*Amplitude after enhancement*/
594 new_exc_energy = compute_rms(new_exc, nsf);
595
596 if (exc_energy > new_exc_energy)
597 exc_energy = new_exc_energy;
598
599 gain = DIV32_16(SHL32(EXTEND32(exc_energy),15),ADD16(1,new_exc_energy));
600
601 #ifdef FIXED_POINT
602 if (gain < 16384)
603 gain = 16384;
604 #else
605 if (gain < .5)
606 gain=.5;
607 #endif
608
609 #ifdef FIXED_POINT
610 for (i=0;i<nsf;i++)
611 {
612 mem->smooth_gain = ADD16(MULT16_16_Q15(31457,mem->smooth_gain), MULT16_16_Q15(1311,gain));
613 new_exc[i] = MULT16_32_Q15(mem->smooth_gain, new_exc[i]);
614 }
615 #else
616 for (i=0;i<nsf;i++)
617 {
618 mem->smooth_gain = .96*mem->smooth_gain + .04*gain;
619 new_exc[i] *= mem->smooth_gain;
620 }
621 #endif
622 }
623