1 /*
2 Copyright (C) 2016 Apple Inc. All Rights Reserved.
3 See LICENSE.txt for this sample’s licensing information
4 
5 Abstract:
6 Part of Core Audio AUBase Classes
7 */
8 
9 #include "AUEffectBase.h"
10 
11 /*
12 	This class does not deal as well as it should with N-M effects...
13 
14 	The problem areas are (if the channels don't match):
15 		ProcessInPlace if the channels don't match - there will be problems if InputChan != OutputChan
16 		Bypass - its just passing the buffers through when not processing them
17 
18 	This will be fixed in a future update...
19 */
20 
21 //_____________________________________________________________________________
22 //
AUEffectBase(AudioComponentInstance audioUnit,bool inProcessesInPlace)23 AUEffectBase::AUEffectBase(	AudioComponentInstance	audioUnit,
24 							bool					inProcessesInPlace ) :
25 	AUBase(audioUnit, 1, 1),		// 1 in bus, 1 out bus
26 	mBypassEffect(false),
27 	mParamSRDep (false),
28 	mProcessesInPlace(inProcessesInPlace),
29 	mMainOutput(NULL), mMainInput(NULL)
30 #if TARGET_OS_IPHONE
31 	, mOnlyOneKernel(false)
32 #endif
33 {
34 }
35 
36 //_____________________________________________________________________________
37 //
~AUEffectBase()38 AUEffectBase::~AUEffectBase()
39 {
40 	Cleanup();
41 }
42 
43 //_____________________________________________________________________________
44 //
Cleanup()45 void AUEffectBase::Cleanup()
46 {
47 	for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it)
48 		delete *it;
49 
50 	mKernelList.clear();
51 	mMainOutput = NULL;
52 	mMainInput = NULL;
53 }
54 
55 
56 //_____________________________________________________________________________
57 //
Initialize()58 OSStatus AUEffectBase::Initialize()
59 {
60 		// get our current numChannels for input and output
61 	SInt16 auNumInputs = (SInt16) GetInput(0)->GetStreamFormat().mChannelsPerFrame;
62 	SInt16 auNumOutputs = (SInt16) GetOutput(0)->GetStreamFormat().mChannelsPerFrame;
63 
64 		// does the unit publish specific information about channel configurations?
65     const AUChannelInfo *auChannelConfigs = NULL;
66     UInt32 numIOconfigs = SupportedNumChannels(&auChannelConfigs);
67 
68     if ((numIOconfigs > 0) && (auChannelConfigs != NULL))
69     {
70         bool foundMatch = false;
71         for (UInt32 i = 0; (i < numIOconfigs) && !foundMatch; ++i)
72         {
73             SInt16 configNumInputs = auChannelConfigs[i].inChannels;
74             SInt16 configNumOutputs = auChannelConfigs[i].outChannels;
75             if ((configNumInputs < 0) && (configNumOutputs < 0))
76             {
77 					// unit accepts any number of channels on input and output
78                 if (((configNumInputs == -1) && (configNumOutputs == -2))
79 					|| ((configNumInputs == -2) && (configNumOutputs == -1)))
80                 {
81 				    foundMatch = true;
82 					// unit accepts any number of channels on input and output IFF they are the same number on both scopes
83                 }
84 				else if (((configNumInputs == -1) && (configNumOutputs == -1)) && (auNumInputs == auNumOutputs))
85                 {
86 				    foundMatch = true;
87 					// unit has specified a particular number of channels on both scopes
88                 }
89 				else
90                     continue;
91             }
92             else
93             {
94 					// the -1 case on either scope is saying that the unit doesn't care about the
95 					// number of channels on that scope
96                 bool inputMatch = (auNumInputs == configNumInputs) || (configNumInputs == -1);
97                 bool outputMatch = (auNumOutputs == configNumOutputs) || (configNumOutputs == -1);
98                 if (inputMatch && outputMatch)
99                     foundMatch = true;
100             }
101         }
102         if (!foundMatch)
103             return kAudioUnitErr_FormatNotSupported;
104     }
105     else
106     {
107 			// there is no specifically published channel info
108 			// so for those kinds of effects, the assumption is that the channels (whatever their number)
109 			// should match on both scopes
110         if ((auNumOutputs != auNumInputs) || (auNumOutputs == 0))
111 		{
112 		    return kAudioUnitErr_FormatNotSupported;
113 		}
114     }
115 
116     MaintainKernels();
117 
118 	mMainOutput = GetOutput(0);
119 	mMainInput = GetInput(0);
120 
121 	const CAStreamBasicDescription& format = GetStreamFormat(kAudioUnitScope_Output, 0);
122 	format.IdentifyCommonPCMFormat(mCommonPCMFormat, NULL);
123 	mBytesPerFrame = format.mBytesPerFrame;
124 
125     return noErr;
126 }
127 
Reset(AudioUnitScope inScope,AudioUnitElement inElement)128 OSStatus			AUEffectBase::Reset(		AudioUnitScope 		inScope,
129 								 				AudioUnitElement 	inElement)
130 {
131 	for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it) {
132 		AUKernelBase *kernel = *it;
133 		if (kernel != NULL)
134 			kernel->Reset();
135 	}
136 
137 	return AUBase::Reset(inScope, inElement);
138 }
139 
GetPropertyInfo(AudioUnitPropertyID inID,AudioUnitScope inScope,AudioUnitElement inElement,UInt32 & outDataSize,Boolean & outWritable)140 OSStatus			AUEffectBase::GetPropertyInfo (AudioUnitPropertyID	inID,
141 												AudioUnitScope					inScope,
142 												AudioUnitElement				inElement,
143 												UInt32 &						outDataSize,
144 												Boolean &						outWritable)
145 {
146 	if (inScope == kAudioUnitScope_Global) {
147 		switch (inID) {
148 			case kAudioUnitProperty_BypassEffect:
149 				outWritable = true;
150 				outDataSize = sizeof (UInt32);
151 				return noErr;
152 			case kAudioUnitProperty_InPlaceProcessing:
153 				outWritable = true;
154 				outDataSize = sizeof (UInt32);
155 				return noErr;
156 		}
157 	}
158 	return AUBase::GetPropertyInfo (inID, inScope, inElement, outDataSize, outWritable);
159 }
160 
161 
GetProperty(AudioUnitPropertyID inID,AudioUnitScope inScope,AudioUnitElement inElement,void * outData)162 OSStatus			AUEffectBase::GetProperty (AudioUnitPropertyID 		inID,
163 									  AudioUnitScope 					inScope,
164 									  AudioUnitElement			 		inElement,
165 									  void *							outData)
166 {
167 	if (inScope == kAudioUnitScope_Global) {
168 		switch (inID) {
169 			case kAudioUnitProperty_BypassEffect:
170 				*((UInt32*)outData) = (IsBypassEffect() ? 1 : 0);
171 				return noErr;
172 			case kAudioUnitProperty_InPlaceProcessing:
173 				*((UInt32*)outData) = (mProcessesInPlace ? 1 : 0);
174 				return noErr;
175 		}
176 	}
177 	return AUBase::GetProperty (inID, inScope, inElement, outData);
178 }
179 
180 
SetProperty(AudioUnitPropertyID inID,AudioUnitScope inScope,AudioUnitElement inElement,const void * inData,UInt32 inDataSize)181 OSStatus			AUEffectBase::SetProperty(		AudioUnitPropertyID inID,
182 									   AudioUnitScope 		inScope,
183 									   AudioUnitElement 	inElement,
184 									   const void *			inData,
185 									   UInt32 				inDataSize)
186 {
187 	if (inScope == kAudioUnitScope_Global) {
188 		switch (inID) {
189 			case kAudioUnitProperty_BypassEffect:
190 			{
191 				if (inDataSize < sizeof(UInt32))
192 					return kAudioUnitErr_InvalidPropertyValue;
193 
194 				bool tempNewSetting = *((UInt32*)inData) != 0;
195 					// we're changing the state of bypass
196 				if (tempNewSetting != IsBypassEffect())
197 				{
198 					if (!tempNewSetting && IsBypassEffect() && IsInitialized()) // turning bypass off and we're initialized
199 						Reset(0, 0);
200 					SetBypassEffect (tempNewSetting);
201 				}
202 				return noErr;
203 			}
204 			case kAudioUnitProperty_InPlaceProcessing:
205 				mProcessesInPlace = (*((UInt32*)inData) != 0);
206 				return noErr;
207 		}
208 	}
209 	return AUBase::SetProperty (inID, inScope, inElement, inData, inDataSize);
210 }
211 
212 
MaintainKernels()213 void	AUEffectBase::MaintainKernels()
214 {
215 #if TARGET_OS_IPHONE
216 	UInt32 nKernels = mOnlyOneKernel ? 1 : GetNumberOfChannels();
217 #else
218 	UInt32 nKernels = GetNumberOfChannels();
219 #endif
220 
221 	if (mKernelList.size() < nKernels) {
222 		mKernelList.reserve(nKernels);
223 		for (UInt32 i = (UInt32)mKernelList.size(); i < nKernels; ++i)
224 			mKernelList.push_back(NewKernel());
225 	} else {
226 		while (mKernelList.size() > nKernels) {
227 			AUKernelBase *kernel = mKernelList.back();
228 			delete kernel;
229 			mKernelList.pop_back();
230 		}
231 	}
232 
233 	for(unsigned int i = 0; i < nKernels; i++ )
234 	{
235 		if(mKernelList[i]) {
236 			mKernelList[i]->SetChannelNum (i);
237 		}
238 	}
239 }
240 
StreamFormatWritable(AudioUnitScope scope,AudioUnitElement element)241 bool		AUEffectBase::StreamFormatWritable(	AudioUnitScope					scope,
242 												AudioUnitElement				element)
243 {
244 	return IsInitialized() ? false : true;
245 }
246 
ChangeStreamFormat(AudioUnitScope inScope,AudioUnitElement inElement,const CAStreamBasicDescription & inPrevFormat,const CAStreamBasicDescription & inNewFormat)247 OSStatus			AUEffectBase::ChangeStreamFormat(	AudioUnitScope				inScope,
248 														AudioUnitElement			inElement,
249 														const CAStreamBasicDescription & inPrevFormat,
250 														const CAStreamBasicDescription & inNewFormat)
251 {
252 	OSStatus result = AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
253 	if (result == noErr)
254 	{
255 		// for the moment this only dependency we know about
256 		// where a parameter's range may change is with the sample rate
257 		// and effects are only publishing parameters in the global scope!
258 		if (GetParamHasSampleRateDependency() && fnotequal(inPrevFormat.mSampleRate, inNewFormat.mSampleRate))
259 			PropertyChanged(kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0);
260 	}
261 
262 	return result;
263 }
264 
265 
266 // ____________________________________________________________________________
267 //
268 //	This method is called (potentially repeatedly) by ProcessForScheduledParams()
269 //	in order to perform the actual DSP required for this portion of the entire buffer
270 //	being processed.  The entire buffer can be divided up into smaller "slices"
271 //	according to the timestamps on the scheduled parameters...
272 //
ProcessScheduledSlice(void * inUserData,UInt32 inStartFrameInBuffer,UInt32 inSliceFramesToProcess,UInt32 inTotalBufferFrames)273 OSStatus		AUEffectBase::ProcessScheduledSlice(	void				*inUserData,
274 														UInt32				inStartFrameInBuffer,
275 														UInt32				inSliceFramesToProcess,
276 														UInt32				inTotalBufferFrames )
277 {
278 	ScheduledProcessParams	&sliceParams = *((ScheduledProcessParams*)inUserData);
279 
280 	AudioUnitRenderActionFlags 	&actionFlags = *sliceParams.actionFlags;
281 	AudioBufferList 			&inputBufferList = *sliceParams.inputBufferList;
282 	AudioBufferList 			&outputBufferList = *sliceParams.outputBufferList;
283 
284 	UInt32 channelSize = inSliceFramesToProcess * mBytesPerFrame;
285 		// fix the size of the buffer we're operating on before we render this slice of time
286 	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
287 		inputBufferList.mBuffers[i].mDataByteSize = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
288 	}
289 
290 	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
291 		outputBufferList.mBuffers[i].mDataByteSize = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
292 	}
293 		// process the buffer
294 	OSStatus result = ProcessBufferLists(actionFlags, inputBufferList, outputBufferList, inSliceFramesToProcess );
295 
296 		// we just partially processed the buffers, so increment the data pointers to the next part of the buffer to process
297 	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
298 		inputBufferList.mBuffers[i].mData =
299 			(char *)inputBufferList.mBuffers[i].mData + inputBufferList.mBuffers[i].mNumberChannels * channelSize;
300 	}
301 
302 	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
303 		outputBufferList.mBuffers[i].mData =
304 			(char *)outputBufferList.mBuffers[i].mData + outputBufferList.mBuffers[i].mNumberChannels * channelSize;
305 	}
306 
307 	return result;
308 }
309 
310 // ____________________________________________________________________________
311 //
312 
Render(AudioUnitRenderActionFlags & ioActionFlags,const AudioTimeStamp & inTimeStamp,UInt32 nFrames)313 OSStatus 	AUEffectBase::Render(	AudioUnitRenderActionFlags &ioActionFlags,
314 											const AudioTimeStamp &		inTimeStamp,
315 											UInt32						nFrames)
316 {
317 	if (!HasInput(0))
318 		return kAudioUnitErr_NoConnection;
319 
320 	OSStatus result = noErr;
321 
322 	result = mMainInput->PullInput(ioActionFlags, inTimeStamp, 0 /* element */, nFrames);
323 
324 	if (result == noErr)
325 	{
326 		if(ProcessesInPlace() && mMainOutput->WillAllocateBuffer())
327 		{
328 			mMainOutput->SetBufferList(mMainInput->GetBufferList() );
329 		}
330 
331 		if (ShouldBypassEffect())
332 		{
333 			// leave silence bit alone
334 
335 			if(!ProcessesInPlace() )
336 			{
337 				mMainInput->CopyBufferContentsTo (mMainOutput->GetBufferList());
338 			}
339 		}
340 		else
341 		{
342 			if(mParamList.size() == 0 )
343 			{
344 				// this will read/write silence bit
345 				result = ProcessBufferLists(ioActionFlags, mMainInput->GetBufferList(), mMainOutput->GetBufferList(), nFrames);
346 			}
347 			else
348 			{
349 				// deal with scheduled parameters...
350 
351 				AudioBufferList &inputBufferList = mMainInput->GetBufferList();
352 				AudioBufferList &outputBufferList = mMainOutput->GetBufferList();
353 
354 				ScheduledProcessParams processParams;
355 				processParams.actionFlags = &ioActionFlags;
356 				processParams.inputBufferList = &inputBufferList;
357 				processParams.outputBufferList = &outputBufferList;
358 
359 				// divide up the buffer into slices according to scheduled params then
360 				// do the DSP for each slice (ProcessScheduledSlice() called for each slice)
361 				result = ProcessForScheduledParams(	mParamList,
362 													nFrames,
363 													&processParams );
364 
365 
366 				// fixup the buffer pointers to how they were before we started
367 				UInt32 channelSize = nFrames * mBytesPerFrame;
368 				for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
369 					UInt32 size = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
370 					inputBufferList.mBuffers[i].mData = (char *)inputBufferList.mBuffers[i].mData - size;
371 					inputBufferList.mBuffers[i].mDataByteSize = size;
372 				}
373 
374 				for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
375 					UInt32 size = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
376 					outputBufferList.mBuffers[i].mData = (char *)outputBufferList.mBuffers[i].mData - size;
377 					outputBufferList.mBuffers[i].mDataByteSize = size;
378 				}
379 			}
380 		}
381 
382 		if ( (ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) && !ProcessesInPlace() )
383 		{
384 			AUBufferList::ZeroBuffer(mMainOutput->GetBufferList() );
385 		}
386 	}
387 
388 	return result;
389 }
390 
391 
ProcessBufferLists(AudioUnitRenderActionFlags & ioActionFlags,const AudioBufferList & inBuffer,AudioBufferList & outBuffer,UInt32 inFramesToProcess)392 OSStatus	AUEffectBase::ProcessBufferLists(
393 									AudioUnitRenderActionFlags &	ioActionFlags,
394 									const AudioBufferList &			inBuffer,
395 									AudioBufferList &				outBuffer,
396 									UInt32							inFramesToProcess )
397 {
398 	if (ShouldBypassEffect())
399 		return noErr;
400 
401 	// interleaved (or mono)
402 	switch (mCommonPCMFormat) {
403 		case CAStreamBasicDescription::kPCMFormatFloat32 :
404 			ProcessBufferListsT<Float32>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
405 			break;
406 		case CAStreamBasicDescription::kPCMFormatFixed824 :
407 			ProcessBufferListsT<SInt32>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
408 			break;
409 		case CAStreamBasicDescription::kPCMFormatInt16 :
410 			ProcessBufferListsT<SInt16>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
411 			break;
412 		default :
413 			throw CAException(kAudio_UnimplementedError);
414 	}
415 
416 	return noErr;
417 }
418 
GetSampleRate()419 Float64		AUEffectBase::GetSampleRate()
420 {
421 	return GetOutput(0)->GetStreamFormat().mSampleRate;
422 }
423 
GetNumberOfChannels()424 UInt32		AUEffectBase::GetNumberOfChannels()
425 {
426 	return GetOutput(0)->GetStreamFormat().mChannelsPerFrame;
427 }
428 
429