1 /* _______ ____ __ ___ ___
2 * \ _ \ \ / \ / \ \ / / ' ' '
3 * | | \ \ | | || | \/ | . .
4 * | | | | | | || ||\ /| |
5 * | | | | | | || || \/ | | ' ' '
6 * | | | | | | || || | | . .
7 * | |_/ / \ \__// || | |
8 * /_______/ynamic \____/niversal /__\ /____\usic /| . . ibliotheque
9 * / \
10 * / . \
11 * resample.c - Resampling helpers. / / \ \
12 * | < / \_
13 * By Bob and entheh. | \/ /\ /
14 * \_ / > /
15 * In order to find a good trade-off between | \ / /
16 * speed and accuracy in this code, some tests | ' /
17 * were carried out regarding the behaviour of \__/
18 * long long ints with gcc. The following code
19 * was tested:
20 *
21 * int a, b, c;
22 * c = ((long long)a * b) >> 16;
23 *
24 * DJGPP GCC Version 3.0.3 generated the following assembly language code for
25 * the multiplication and scaling, leaving the 32-bit result in EAX.
26 *
27 * movl -8(%ebp), %eax ; read one int into EAX
28 * imull -4(%ebp) ; multiply by the other; result goes in EDX:EAX
29 * shrdl $16, %edx, %eax ; shift EAX right 16, shifting bits in from EDX
30 *
31 * Note that a 32*32->64 multiplication is performed, allowing for high
32 * accuracy. On the Pentium 2 and above, shrdl takes two cycles (generally),
33 * so it is a minor concern when four multiplications are being performed
34 * (the cubic resampler). On the Pentium MMX and earlier, it takes four or
35 * more cycles, so this method is unsuitable for use in the low-quality
36 * resamplers.
37 *
38 * Since "long long" is a gcc-specific extension, we use LONG_LONG instead,
39 * defined in dumb.h. We may investigate later what code MSVC generates, but
40 * if it seems too slow then we suggest you use a good compiler.
41 *
42 * FIXME: these comments are somewhat out of date now.
43 */
44
45 #include <math.h>
46 #include "dumb.h"
47
48
49
50 /* Compile with -DHEAVYDEBUG if you want to make sure the pick-up function is
51 * called when it should be. There will be a considerable performance hit,
52 * since at least one condition has to be tested for every sample generated.
53 */
54 #ifdef HEAVYDEBUG
55 #define HEAVYASSERT(cond) ASSERT(cond)
56 #else
57 #define HEAVYASSERT(cond)
58 #endif
59
60
61
62 /* A global variable for controlling resampling quality wherever a local
63 * specification doesn't override it. The following values are valid:
64 *
65 * 0 - DUMB_RQ_ALIASING - fastest
66 * 1 - DUMB_RQ_LINEAR
67 * 2 - DUMB_RQ_CUBIC - nicest
68 *
69 * Values outside the range 0-2 will behave the same as the nearest
70 * value within the range.
71 */
72 int dumb_resampling_quality = DUMB_RQ_CUBIC;
73
74
75
76 //#define MULSC(a, b) ((int)((LONG_LONG)(a) * (b) >> 16))
77 //#define MULSC(a, b) ((a) * ((b) >> 2) >> 14)
78 #define MULSC(a, b) ((int)((LONG_LONG)((a) << 4) * ((b) << 12) >> 32))
79 #define MULSC16(a, b) ((int)((LONG_LONG)((a) << 12) * ((b) << 12) >> 32))
80
81
82
83 /* Executes the content 'iterator' times.
84 * Clobbers the 'iterator' variable.
85 * The loop is unrolled by four.
86 */
87 #define LOOP4(iterator, CONTENT) \
88 { \
89 if ((iterator) & 2) { \
90 CONTENT; \
91 CONTENT; \
92 } \
93 if ((iterator) & 1) { \
94 CONTENT; \
95 } \
96 (iterator) >>= 2; \
97 while (iterator) { \
98 CONTENT; \
99 CONTENT; \
100 CONTENT; \
101 CONTENT; \
102 (iterator)--; \
103 } \
104 }
105
106
107
108 #define PASTERAW(a, b) a ## b /* This does not expand macros in b ... */
109 #define PASTE(a, b) PASTERAW(a, b) /* ... but b is expanded during this substitution. */
110
111 #define X PASTE(x.x, SRCBITS)
112
113
114
115 /* Cubic resampler: look-up tables
116 *
117 * a = 1.5*x1 - 1.5*x2 + 0.5*x3 - 0.5*x0
118 * b = 2*x2 + x0 - 2.5*x1 - 0.5*x3
119 * c = 0.5*x2 - 0.5*x0
120 * d = x1
121 *
122 * x = a*t*t*t + b*t*t + c*t + d
123 * = (-0.5*x0 + 1.5*x1 - 1.5*x2 + 0.5*x3) * t*t*t +
124 * ( 1*x0 - 2.5*x1 + 2 *x2 - 0.5*x3) * t*t +
125 * (-0.5*x0 + 0.5*x2 ) * t +
126 * ( 1*x1 )
127 * = (-0.5*t*t*t + 1 *t*t - 0.5*t ) * x0 +
128 * ( 1.5*t*t*t - 2.5*t*t + 1) * x1 +
129 * (-1.5*t*t*t + 2 *t*t + 0.5*t ) * x2 +
130 * ( 0.5*t*t*t - 0.5*t*t ) * x3
131 * = A0(t) * x0 + A1(t) * x1 + A2(t) * x2 + A3(t) * x3
132 *
133 * A0, A1, A2 and A3 stay within the range [-1,1].
134 * In the tables, they are scaled with 14 fractional bits.
135 *
136 * Turns out we don't need to store A2 and A3; they are symmetrical to A1 and A0.
137 *
138 * TODO: A0 and A3 stay very small indeed. Consider different scale/resolution?
139 */
140
141 static short cubicA0[1025], cubicA1[1025];
142
init_cubic(void)143 static void init_cubic(void)
144 {
145 unsigned int t; /* 3*1024*1024*1024 is within range if it's unsigned */
146 static int done = 0;
147 if (done) return;
148 done = 1;
149 for (t = 0; t < 1025; t++) {
150 /* int casts to pacify warnings about negating unsigned values */
151 cubicA0[t] = -(int)( t*t*t >> 17) + (int)( t*t >> 6) - (int)(t << 3);
152 cubicA1[t] = (int)(3*t*t*t >> 17) - (int)(5*t*t >> 7) + (int)(1 << 14);
153 }
154 }
155
156
157
158 /* Create resamplers for 24-in-32-bit source samples. */
159
160 /* #define SUFFIX
161 * MSVC warns if we try to paste a null SUFFIX, so instead we define
162 * special macros for the function names that don't bother doing the
163 * corresponding paste. The more generic definitions are further down.
164 */
165 #define process_pickup PASTE(process_pickup, SUFFIX2)
166 #define dumb_resample PASTE(PASTE(dumb_resample, SUFFIX2), SUFFIX3)
167 #define dumb_resample_get_current_sample PASTE(PASTE(dumb_resample_get_current_sample, SUFFIX2), SUFFIX3)
168
169 #define SRCTYPE sample_t
170 #define SRCBITS 24
171 #define ALIAS(x, vol) MULSC(x, vol)
172 #define LINEAR(x0, x1) (x0 + MULSC(x1 - x0, subpos))
173 /*
174 #define SET_CUBIC_COEFFICIENTS(x0, x1, x2, x3) { \
175 a = (3 * (x1 - x2) + (x3 - x0)) >> 1; \
176 b = ((x2 << 2) + (x0 << 1) - (5 * x1 + x3)) >> 1; \
177 c = (x2 - x0) >> 1; \
178 }
179 #define CUBIC(d) MULSC(MULSC(MULSC(MULSC(a, subpos) + b, subpos) + c, subpos) + d, vol)
180 */
181 #define CUBIC(x0, x1, x2, x3) ( \
182 MULSC(x0, cubicA0[subpos >> 6] << 2) + \
183 MULSC(x1, cubicA1[subpos >> 6] << 2) + \
184 MULSC(x2, cubicA1[1 + (subpos >> 6 ^ 1023)] << 2) + \
185 MULSC(x3, cubicA0[1 + (subpos >> 6 ^ 1023)] << 2))
186 #define CUBICVOL(x, vol) MULSC(x, vol)
187 #include "resample.inc"
188
189 /* Undefine the simplified macros. */
190 #undef dumb_resample_get_current_sample
191 #undef dumb_resample
192 #undef process_pickup
193
194
195 /* Now define the proper ones that use SUFFIX. */
196 #define dumb_reset_resampler PASTE(dumb_reset_resampler, SUFFIX)
197 #define dumb_start_resampler PASTE(dumb_start_resampler, SUFFIX)
198 #define process_pickup PASTE(PASTE(process_pickup, SUFFIX), SUFFIX2)
199 #define dumb_resample PASTE(PASTE(PASTE(dumb_resample, SUFFIX), SUFFIX2), SUFFIX3)
200 #define dumb_resample_get_current_sample PASTE(PASTE(PASTE(dumb_resample_get_current_sample, SUFFIX), SUFFIX2), SUFFIX3)
201 #define dumb_end_resampler PASTE(dumb_end_resampler, SUFFIX)
202
203 /* Create resamplers for 16-bit source samples. */
204 #define SUFFIX _16
205 #define SRCTYPE short
206 #define SRCBITS 16
207 #define ALIAS(x, vol) (x * vol >> 8)
208 #define LINEAR(x0, x1) ((x0 << 8) + MULSC16(x1 - x0, subpos))
209 /*
210 #define SET_CUBIC_COEFFICIENTS(x0, x1, x2, x3) { \
211 a = (3 * (x1 - x2) + (x3 - x0)) << 7; \
212 b = ((x2 << 2) + (x0 << 1) - (5 * x1 + x3)) << 7; \
213 c = (x2 - x0) << 7; \
214 }
215 #define CUBIC(d) MULSC(MULSC(MULSC(MULSC(a, subpos) + b, subpos) + c, subpos) + (d << 8), vol)
216 */
217 #define CUBIC(x0, x1, x2, x3) ( \
218 x0 * cubicA0[subpos >> 6] + \
219 x1 * cubicA1[subpos >> 6] + \
220 x2 * cubicA1[1 + (subpos >> 6 ^ 1023)] + \
221 x3 * cubicA0[1 + (subpos >> 6 ^ 1023)])
222 #define CUBICVOL(x, vol) (int)((LONG_LONG)(x) * (vol << 10) >> 32)
223 #include "resample.inc"
224
225 /* Create resamplers for 8-bit source samples. */
226 #define SUFFIX _8
227 #define SRCTYPE signed char
228 #define SRCBITS 8
229 #define ALIAS(x, vol) (x * vol)
230 #define LINEAR(x0, x1) ((x0 << 16) + (x1 - x0) * subpos)
231 /*
232 #define SET_CUBIC_COEFFICIENTS(x0, x1, x2, x3) { \
233 a = 3 * (x1 - x2) + (x3 - x0); \
234 b = ((x2 << 2) + (x0 << 1) - (5 * x1 + x3)) << 15; \
235 c = (x2 - x0) << 15; \
236 }
237 #define CUBIC(d) MULSC(MULSC(MULSC((a * subpos >> 1) + b, subpos) + c, subpos) + (d << 16), vol)
238 */
239 #define CUBIC(x0, x1, x2, x3) (( \
240 x0 * cubicA0[subpos >> 6] + \
241 x1 * cubicA1[subpos >> 6] + \
242 x2 * cubicA1[1 + (subpos >> 6 ^ 1023)] + \
243 x3 * cubicA0[1 + (subpos >> 6 ^ 1023)]) << 6)
244 #define CUBICVOL(x, vol) (int)((LONG_LONG)(x) * (vol << 12) >> 32)
245 #include "resample.inc"
246
247
248 #undef dumb_reset_resampler
249 #undef dumb_start_resampler
250 #undef process_pickup
251 #undef dumb_resample
252 #undef dumb_resample_get_current_sample
253 #undef dumb_end_resampler
254
255
256
dumb_reset_resampler_n(int n,DUMB_RESAMPLER * resampler,void * src,int src_channels,long pos,long start,long end)257 void dumb_reset_resampler_n(int n, DUMB_RESAMPLER *resampler, void *src, int src_channels, long pos, long start, long end)
258 {
259 if (n == 8)
260 dumb_reset_resampler_8(resampler, src, src_channels, pos, start, end);
261 else if (n == 16)
262 dumb_reset_resampler_16(resampler, src, src_channels, pos, start, end);
263 else
264 dumb_reset_resampler(resampler, src, src_channels, pos, start, end);
265 }
266
267
268
dumb_start_resampler_n(int n,void * src,int src_channels,long pos,long start,long end)269 DUMB_RESAMPLER *dumb_start_resampler_n(int n, void *src, int src_channels, long pos, long start, long end)
270 {
271 if (n == 8)
272 return dumb_start_resampler_8(src, src_channels, pos, start, end);
273 else if (n == 16)
274 return dumb_start_resampler_16(src, src_channels, pos, start, end);
275 else
276 return dumb_start_resampler(src, src_channels, pos, start, end);
277 }
278
279
280
dumb_resample_n_1_1(int n,DUMB_RESAMPLER * resampler,sample_t * dst,long dst_size,float volume,float delta)281 long dumb_resample_n_1_1(int n, DUMB_RESAMPLER *resampler, sample_t *dst, long dst_size, float volume, float delta)
282 {
283 if (n == 8)
284 return dumb_resample_8_1_1(resampler, dst, dst_size, volume, delta);
285 else if (n == 16)
286 return dumb_resample_16_1_1(resampler, dst, dst_size, volume, delta);
287 else
288 return dumb_resample_1_1(resampler, dst, dst_size, volume, delta);
289 }
290
291
292
dumb_resample_n_1_2(int n,DUMB_RESAMPLER * resampler,sample_t * dst,long dst_size,float volume_left,float volume_right,float delta)293 long dumb_resample_n_1_2(int n, DUMB_RESAMPLER *resampler, sample_t *dst, long dst_size, float volume_left, float volume_right, float delta)
294 {
295 if (n == 8)
296 return dumb_resample_8_1_2(resampler, dst, dst_size, volume_left, volume_right, delta);
297 else if (n == 16)
298 return dumb_resample_16_1_2(resampler, dst, dst_size, volume_left, volume_right, delta);
299 else
300 return dumb_resample_1_2(resampler, dst, dst_size, volume_left, volume_right, delta);
301 }
302
303
304
dumb_resample_n_2_1(int n,DUMB_RESAMPLER * resampler,sample_t * dst,long dst_size,float volume_left,float volume_right,float delta)305 long dumb_resample_n_2_1(int n, DUMB_RESAMPLER *resampler, sample_t *dst, long dst_size, float volume_left, float volume_right, float delta)
306 {
307 if (n == 8)
308 return dumb_resample_8_2_1(resampler, dst, dst_size, volume_left, volume_right, delta);
309 else if (n == 16)
310 return dumb_resample_16_2_1(resampler, dst, dst_size, volume_left, volume_right, delta);
311 else
312 return dumb_resample_2_1(resampler, dst, dst_size, volume_left, volume_right, delta);
313 }
314
315
316
dumb_resample_n_2_2(int n,DUMB_RESAMPLER * resampler,sample_t * dst,long dst_size,float volume_left,float volume_right,float delta)317 long dumb_resample_n_2_2(int n, DUMB_RESAMPLER *resampler, sample_t *dst, long dst_size, float volume_left, float volume_right, float delta)
318 {
319 if (n == 8)
320 return dumb_resample_8_2_2(resampler, dst, dst_size, volume_left, volume_right, delta);
321 else if (n == 16)
322 return dumb_resample_16_2_2(resampler, dst, dst_size, volume_left, volume_right, delta);
323 else
324 return dumb_resample_2_2(resampler, dst, dst_size, volume_left, volume_right, delta);
325 }
326
327
328
dumb_resample_get_current_sample_n_1_1(int n,DUMB_RESAMPLER * resampler,float volume,sample_t * dst)329 void dumb_resample_get_current_sample_n_1_1(int n, DUMB_RESAMPLER *resampler, float volume, sample_t *dst)
330 {
331 if (n == 8)
332 dumb_resample_get_current_sample_8_1_1(resampler, volume, dst);
333 else if (n == 16)
334 dumb_resample_get_current_sample_16_1_1(resampler, volume, dst);
335 else
336 dumb_resample_get_current_sample_1_1(resampler, volume, dst);
337 }
338
339
340
dumb_resample_get_current_sample_n_1_2(int n,DUMB_RESAMPLER * resampler,float volume_left,float volume_right,sample_t * dst)341 void dumb_resample_get_current_sample_n_1_2(int n, DUMB_RESAMPLER *resampler, float volume_left, float volume_right, sample_t *dst)
342 {
343 if (n == 8)
344 dumb_resample_get_current_sample_8_1_2(resampler, volume_left, volume_right, dst);
345 else if (n == 16)
346 dumb_resample_get_current_sample_16_1_2(resampler, volume_left, volume_right, dst);
347 else
348 dumb_resample_get_current_sample_1_2(resampler, volume_left, volume_right, dst);
349 }
350
351
352
dumb_resample_get_current_sample_n_2_1(int n,DUMB_RESAMPLER * resampler,float volume_left,float volume_right,sample_t * dst)353 void dumb_resample_get_current_sample_n_2_1(int n, DUMB_RESAMPLER *resampler, float volume_left, float volume_right, sample_t *dst)
354 {
355 if (n == 8)
356 dumb_resample_get_current_sample_8_2_1(resampler, volume_left, volume_right, dst);
357 else if (n == 16)
358 dumb_resample_get_current_sample_16_2_1(resampler, volume_left, volume_right, dst);
359 else
360 dumb_resample_get_current_sample_2_1(resampler, volume_left, volume_right, dst);
361 }
362
363
364
dumb_resample_get_current_sample_n_2_2(int n,DUMB_RESAMPLER * resampler,float volume_left,float volume_right,sample_t * dst)365 void dumb_resample_get_current_sample_n_2_2(int n, DUMB_RESAMPLER *resampler, float volume_left, float volume_right, sample_t *dst)
366 {
367 if (n == 8)
368 dumb_resample_get_current_sample_8_2_2(resampler, volume_left, volume_right, dst);
369 else if (n == 16)
370 dumb_resample_get_current_sample_16_2_2(resampler, volume_left, volume_right, dst);
371 else
372 dumb_resample_get_current_sample_2_2(resampler, volume_left, volume_right, dst);
373 }
374
375
376
dumb_end_resampler_n(int n,DUMB_RESAMPLER * resampler)377 void dumb_end_resampler_n(int n, DUMB_RESAMPLER *resampler)
378 {
379 if (n == 8)
380 dumb_end_resampler_8(resampler);
381 else if (n == 16)
382 dumb_end_resampler_16(resampler);
383 else
384 dumb_end_resampler(resampler);
385 }
386