1 /*
2  * Copyright (C) 2010, Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24 
25 #include "config.h"
26 
27 #if ENABLE(WEB_AUDIO)
28 
29 #include "HRTFPanner.h"
30 
31 #include "AudioBus.h"
32 #include "FFTConvolver.h"
33 #include "HRTFDatabase.h"
34 #include "HRTFDatabaseLoader.h"
35 #include <algorithm>
36 #include <wtf/MathExtras.h>
37 #include <wtf/RefPtr.h>
38 
39 using namespace std;
40 
41 namespace WebCore {
42 
43 // The value of 2 milliseconds is larger than the largest delay which exists in any HRTFKernel from the default HRTFDatabase (0.0136 seconds).
44 // We ASSERT the delay values used in process() with this value.
45 const double MaxDelayTimeSeconds = 0.002;
46 
HRTFPanner(double sampleRate)47 HRTFPanner::HRTFPanner(double sampleRate)
48     : Panner(PanningModelHRTF)
49     , m_sampleRate(sampleRate)
50     , m_isFirstRender(true)
51     , m_azimuthIndex(0)
52     , m_convolverL(fftSizeForSampleRate(sampleRate))
53     , m_convolverR(fftSizeForSampleRate(sampleRate))
54     , m_delayLineL(MaxDelayTimeSeconds, sampleRate)
55     , m_delayLineR(MaxDelayTimeSeconds, sampleRate)
56 {
57 }
58 
~HRTFPanner()59 HRTFPanner::~HRTFPanner()
60 {
61 }
62 
fftSizeForSampleRate(double sampleRate)63 size_t HRTFPanner::fftSizeForSampleRate(double sampleRate)
64 {
65     // The HRTF impulse responses (loaded as audio resources) are 512 sample-frames @44.1KHz.
66     // Currently, we truncate the impulse responses to half this size, but an FFT-size of twice impulse response size is needed (for convolution).
67     // So for sample rates around 44.1KHz an FFT size of 512 is good.  We double that size for higher sample rates.
68     ASSERT(sampleRate >= 44100 && sampleRate <= 96000.0);
69     return (sampleRate <= 48000.0) ? 512 : 1024;
70 }
71 
reset()72 void HRTFPanner::reset()
73 {
74     m_isFirstRender = true;
75     m_convolverL.reset();
76     m_convolverR.reset();
77     m_delayLineL.reset();
78     m_delayLineR.reset();
79 }
80 
wrapDistance(int i,int j,int length)81 static bool wrapDistance(int i, int j, int length)
82 {
83     int directDistance = abs(i - j);
84     int indirectDistance = length - directDistance;
85 
86     return indirectDistance < directDistance;
87 }
88 
calculateDesiredAzimuthIndexAndBlend(double azimuth,double & azimuthBlend)89 int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend)
90 {
91     // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> 360.
92     // The azimuth index may then be calculated from this positive value.
93     if (azimuth < 0)
94         azimuth += 360.0;
95 
96     HRTFDatabase* database = HRTFDatabaseLoader::defaultHRTFDatabase();
97     ASSERT(database);
98 
99     int numberOfAzimuths = database->numberOfAzimuths();
100     const double angleBetweenAzimuths = 360.0 / numberOfAzimuths;
101 
102     // Calculate the azimuth index and the blend (0 -> 1) for interpolation.
103     double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths;
104     int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat);
105     azimuthBlend = desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex);
106 
107     // We don't immediately start using this azimuth index, but instead approach this index from the last index we rendered at.
108     // This minimizes the clicks and graininess for moving sources which occur otherwise.
109     desiredAzimuthIndex = max(0, desiredAzimuthIndex);
110     desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex);
111     return desiredAzimuthIndex;
112 }
113 
pan(double desiredAzimuth,double elevation,AudioBus * inputBus,AudioBus * outputBus,size_t framesToProcess)114 void HRTFPanner::pan(double desiredAzimuth, double elevation, AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
115 {
116     unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;
117 
118     bool isInputGood = inputBus &&  numInputChannels >= 1 && numInputChannels <= 2;
119     ASSERT(isInputGood);
120 
121     bool isOutputGood = outputBus && outputBus->numberOfChannels() == 2 && framesToProcess <= outputBus->length();
122     ASSERT(isOutputGood);
123 
124     if (!isInputGood || !isOutputGood) {
125         if (outputBus)
126             outputBus->zero();
127         return;
128     }
129 
130     // This code only runs as long as the context is alive and after database has been loaded.
131     HRTFDatabase* database = HRTFDatabaseLoader::defaultHRTFDatabase();
132     ASSERT(database);
133     if (!database) {
134         outputBus->zero();
135         return;
136     }
137 
138     // IRCAM HRTF azimuths values from the loaded database is reversed from the panner's notion of azimuth.
139     double azimuth = -desiredAzimuth;
140 
141     bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0;
142     ASSERT(isAzimuthGood);
143     if (!isAzimuthGood) {
144         outputBus->zero();
145         return;
146     }
147 
148     // Normally, we'll just be dealing with mono sources.
149     // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
150     AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
151     AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
152 
153     // Get source and destination pointers.
154     float* sourceL = inputChannelL->data();
155     float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
156     float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->data();
157     float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->data();
158 
159     double azimuthBlend;
160     int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
161 
162     // This algorithm currently requires that we process in power-of-two size chunks at least 128.
163     ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess);
164     ASSERT(framesToProcess >= 128);
165 
166     const unsigned framesPerSegment = 128;
167     const unsigned numberOfSegments = framesToProcess / framesPerSegment;
168 
169     for (unsigned segment = 0; segment < numberOfSegments; ++segment) {
170         if (m_isFirstRender) {
171             // Snap exactly to desired position (first time and after reset()).
172             m_azimuthIndex = desiredAzimuthIndex;
173             m_isFirstRender = false;
174         } else {
175             // Each segment renders with an azimuth index closer by one to the desired azimuth index.
176             // Because inter-aural time delay is mostly a factor of azimuth and the delay is where the clicks and graininess come from,
177             // we don't bother smoothing the elevations.
178             int numberOfAzimuths = database->numberOfAzimuths();
179             bool wrap = wrapDistance(m_azimuthIndex, desiredAzimuthIndex, numberOfAzimuths);
180             if (wrap) {
181                 if (m_azimuthIndex < desiredAzimuthIndex)
182                     m_azimuthIndex = (m_azimuthIndex - 1 + numberOfAzimuths) % numberOfAzimuths;
183                 else if (m_azimuthIndex > desiredAzimuthIndex)
184                     m_azimuthIndex = (m_azimuthIndex + 1) % numberOfAzimuths;
185             } else {
186                 if (m_azimuthIndex < desiredAzimuthIndex)
187                     m_azimuthIndex = (m_azimuthIndex + 1) % numberOfAzimuths;
188                 else if (m_azimuthIndex > desiredAzimuthIndex)
189                     m_azimuthIndex = (m_azimuthIndex - 1 + numberOfAzimuths) % numberOfAzimuths;
190             }
191         }
192 
193         // Get the HRTFKernels and interpolated delays.
194         HRTFKernel* kernelL;
195         HRTFKernel* kernelR;
196         double frameDelayL;
197         double frameDelayR;
198         database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex, elevation, kernelL, kernelR, frameDelayL, frameDelayR);
199 
200         ASSERT(kernelL && kernelR);
201         if (!kernelL || !kernelR) {
202             outputBus->zero();
203             return;
204         }
205 
206         ASSERT(frameDelayL / sampleRate() < MaxDelayTimeSeconds && frameDelayR / sampleRate() < MaxDelayTimeSeconds);
207 
208         // Calculate the source and destination pointers for the current segment.
209         unsigned offset = segment * framesPerSegment;
210         float* segmentSourceL = sourceL + offset;
211         float* segmentSourceR = sourceR + offset;
212         float* segmentDestinationL = destinationL + offset;
213         float* segmentDestinationR = destinationR + offset;
214 
215         // First run through delay lines for inter-aural time difference.
216         m_delayLineL.setDelayFrames(frameDelayL);
217         m_delayLineR.setDelayFrames(frameDelayR);
218         m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment);
219         m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment);
220 
221         // Now do the convolutions in-place.
222         m_convolverL.process(kernelL->fftFrame(), segmentDestinationL, segmentDestinationL, framesPerSegment);
223         m_convolverR.process(kernelR->fftFrame(), segmentDestinationR, segmentDestinationR, framesPerSegment);
224     }
225 }
226 
227 } // namespace WebCore
228 
229 #endif // ENABLE(WEB_AUDIO)
230