1 //============================================================================
2 //
3 //   SSSS    tt          lll  lll
4 //  SS  SS   tt           ll   ll
5 //  SS     tttttt  eeee   ll   ll   aaaa
6 //   SSSS    tt   ee  ee  ll   ll      aa
7 //      SS   tt   eeeeee  ll   ll   aaaaa  --  "An Atari 2600 VCS Emulator"
8 //  SS  SS   tt   ee      ll   ll  aa  aa
9 //   SSSS     ttt  eeeee llll llll  aaaaa
10 //
11 // Copyright (c) 1995-2021 by Bradford W. Mott, Stephen Anthony
12 // and the Stella Team
13 //
14 // See the file "License.txt" for information on usage and redistribution of
15 // this file, and for a DISCLAIMER OF ALL WARRANTIES.
16 //============================================================================
17 
18 #include <cmath>
19 
20 #include "LanczosResampler.hxx"
21 
22 namespace {
23 
24   constexpr float CLIPPING_FACTOR = 0.75;
25   constexpr float HIGH_PASS_CUT_OFF = 10;
26 
reducedDenominator(uInt32 n,uInt32 d)27   uInt32 reducedDenominator(uInt32 n, uInt32 d)
28   {
29     for (uInt32 i = std::min(n ,d); i > 1; --i) {
30       if ((n % i == 0) && (d % i == 0)) {
31         n /= i;
32         d /= i;
33         i = std::min(n ,d);
34       }
35     }
36 
37     return d;
38   }
39 
sinc(float x)40   float sinc(float x)
41   {
42     // We calculate the sinc with double precision in order to compensate for precision loss
43     // around zero
44     return x == 0.F ? 1 : static_cast<float>(
45         sin(BSPF::PI_d * static_cast<double>(x)) / BSPF::PI_d / static_cast<double>(x)
46     );
47   }
48 
lanczosKernel(float x,uInt32 a)49   float lanczosKernel(float x, uInt32 a) {
50     return sinc(x) * sinc(x / static_cast<float>(a));
51   }
52 
53 }
54 
55 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
LanczosResampler(Resampler::Format formatFrom,Resampler::Format formatTo,const Resampler::NextFragmentCallback & nextFragmentCallback,uInt32 kernelParameter)56 LanczosResampler::LanczosResampler(
57   Resampler::Format formatFrom,
58   Resampler::Format formatTo,
59   const Resampler::NextFragmentCallback& nextFragmentCallback,
60   uInt32 kernelParameter)
61 :
62   Resampler(formatFrom, formatTo, nextFragmentCallback),
63   // In order to find the number of kernels we need to precompute, we need to find N minimal such that
64   //
65   // N / formatTo.sampleRate = M / formatFrom.sampleRate
66   //
67   // with integral N and M. Equivalently, we have
68   //
69   // formatFrom.sampleRate / formatTo.sampleRate = M / N
70   //
71   // -> we find N from fully reducing the fraction.
72   myPrecomputedKernelCount{reducedDenominator(formatFrom.sampleRate, formatTo.sampleRate)},
73   myKernelSize{2 * kernelParameter},
74   myKernelParameter{kernelParameter},
75   myHighPassL{HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)},
76   myHighPassR{HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)},
77   myHighPass{HIGH_PASS_CUT_OFF, float(formatFrom.sampleRate)}
78 {
79   myPrecomputedKernels = make_unique<float[]>(myPrecomputedKernelCount * myKernelSize);
80 
81   if (myFormatFrom.stereo)
82   {
83     myBufferL = make_unique<ConvolutionBuffer>(myKernelSize);
84     myBufferR = make_unique<ConvolutionBuffer>(myKernelSize);
85   }
86   else
87     myBuffer = make_unique<ConvolutionBuffer>(myKernelSize);
88 
89   precomputeKernels();
90 }
91 
92 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
precomputeKernels()93 void LanczosResampler::precomputeKernels()
94 {
95   // timeIndex = time * formatFrom.sampleRate * formatTo.sampleRAte
96   uInt32 timeIndex = 0;
97 
98   for (uInt32 i = 0; i < myPrecomputedKernelCount; ++i) {
99     float* kernel = myPrecomputedKernels.get() + myKernelSize * i;
100     // The kernel is normalized such to be evaluate on time * formatFrom.sampleRate
101     float center =
102       static_cast<float>(timeIndex) / static_cast<float>(myFormatTo.sampleRate);
103 
104     for (uInt32 j = 0; j < 2 * myKernelParameter; ++j) {
105       kernel[j] = lanczosKernel(
106           center - static_cast<float>(j) + static_cast<float>(myKernelParameter) - 1.F, myKernelParameter
107         ) * CLIPPING_FACTOR;
108     }
109 
110     // Next step: time += 1 / formatTo.sampleRate
111     //
112     // By construction, we limit the argument during kernel evaluation to 0 .. 1, which
113     // corresponds to 0 .. 1 / formatFrom.sampleRate for time. To implement this, we decompose
114     // time as follows:
115     //
116     // time = N / formatFrom.sampleRate + delta
117     // timeIndex = N * formatTo.sampleRate + delta * formatTo.sampleRate * formatFrom.sampleRate
118     //
119     // with N integral and delta < 0. From this, it follows that we replace
120     // time with delta, i.e. take the modulus of timeIndex.
121     timeIndex = (timeIndex + myFormatFrom.sampleRate) % myFormatTo.sampleRate;
122   }
123 }
124 
125 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fillFragment(float * fragment,uInt32 length)126 void LanczosResampler::fillFragment(float* fragment, uInt32 length)
127 {
128   if (myIsUnderrun) {
129     Int16* nextFragment = myNextFragmentCallback();
130 
131     if (nextFragment) {
132       myCurrentFragment = nextFragment;
133       myFragmentIndex = 0;
134       myIsUnderrun = false;
135     }
136   }
137 
138   if (!myCurrentFragment) {
139     std::fill_n(fragment, length, 0.F);
140     return;
141   }
142 
143   const uInt32 outputSamples = myFormatTo.stereo ? (length >> 1) : length;
144 
145   for (uInt32 i = 0; i < outputSamples; ++i) {
146     float* kernel = myPrecomputedKernels.get() + (myCurrentKernelIndex * myKernelSize);
147     myCurrentKernelIndex = (myCurrentKernelIndex + 1) % myPrecomputedKernelCount;
148 
149     if (myFormatFrom.stereo) {
150       float sampleL = myBufferL->convoluteWith(kernel);
151       float sampleR = myBufferR->convoluteWith(kernel);
152 
153       if (myFormatTo.stereo) {
154         fragment[2*i] = sampleL;
155         fragment[2*i + 1] = sampleR;
156       }
157       else
158         fragment[i] = (sampleL + sampleR) / 2.F;
159     } else {
160       float sample = myBuffer->convoluteWith(kernel);
161 
162       if (myFormatTo.stereo)
163         fragment[2*i] = fragment[2*i + 1] = sample;
164       else
165         fragment[i] = sample;
166     }
167 
168     myTimeIndex += myFormatFrom.sampleRate;
169 
170     uInt32 samplesToShift = myTimeIndex / myFormatTo.sampleRate;
171     if (samplesToShift == 0) continue;
172 
173     myTimeIndex %= myFormatTo.sampleRate;
174     shiftSamples(samplesToShift);
175   }
176 }
177 
178 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
shiftSamples(uInt32 samplesToShift)179 inline void LanczosResampler::shiftSamples(uInt32 samplesToShift)
180 {
181   while (samplesToShift-- > 0) {
182     if (myFormatFrom.stereo) {
183       myBufferL->shift(myHighPassL.apply(myCurrentFragment[2*myFragmentIndex] / static_cast<float>(0x7fff)));
184       myBufferR->shift(myHighPassR.apply(myCurrentFragment[2*myFragmentIndex + 1] / static_cast<float>(0x7fff)));
185     }
186     else
187       myBuffer->shift(myHighPass.apply(myCurrentFragment[myFragmentIndex] / static_cast<float>(0x7fff)));
188 
189     ++myFragmentIndex;
190 
191     if (myFragmentIndex >= myFormatFrom.fragmentSize) {
192       myFragmentIndex %= myFormatFrom.fragmentSize;
193 
194       Int16* nextFragment = myNextFragmentCallback();
195       if (nextFragment) {
196         myCurrentFragment = nextFragment;
197         myIsUnderrun = false;
198       } else {
199         myUnderrunLogger.log();
200         myIsUnderrun = true;
201       }
202     }
203   }
204 }
205