MixerHostAudio.m 131 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681
  1. #import "MixerHostAudio.h"
  2. #import "TPCircularBuffer.h" // ring buffer
  3. #import "SNFCoreAudioUtils.h" // Chris Adamson's debug print util
  4. //#import "CAStreamBasicDescription.h"
  5. //#import "CAComponentDescription.h"
  6. //#import "CAXException.h"
  7. #import "api.h"
  8. #define FILE_PLAYER_FILE @"Sounds/dmxbeat"
  9. #define FILE_PLAYER_FILE_TYPE @"aiff"
  10. // preset and supporting audio files for MIDI sampler
  11. // preset file should be in resources
  12. // base aiff file should be in resources/sounds folder
  13. #define AU_SAMPLER_FILE @"lead"
  14. #define AU_SAMPLER_FILE_TYPE @"aif"
  15. #define AU_SAMPLER_PRESET_FILE @"lead"
  16. // function defs for fft code
  17. float MagnitudeSquared(float x, float y);
  18. void ConvertInt16ToFloat(MixerHostAudio *THIS, void *buf, float *outputBuf, size_t capacity);
  19. // function defs for smb fft code
  20. void smbPitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize, long osamp, float sampleRate, float *indata, float *outdata);
  21. void smb2PitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize,
  22. long osamp, float sampleRate, float *indata, float *outdata,
  23. FFTSetup fftSetup, float * frequency);
  24. // function defs for mic fx dsp methods used in callbacks
  25. void ringMod( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer );
  26. OSStatus fftPassThrough ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  27. OSStatus fftPitchShift ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  28. OSStatus simpleDelay ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  29. OSStatus movingAverageFilterFloat ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  30. OSStatus logFilter ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  31. OSStatus convolutionFilter ( void *inRefCon, UInt32 inNumberFrames, SInt16 *sampleBuffer);
  32. // function defs for audio processing to support callbacks
  33. void lowPassWindowedSincFilter( float *buf , float fc );
  34. float xslide(int sval, float x );
  35. float getSynthEnvelope( void * inRefCon );
  36. void fixedPointToSInt16( SInt32 * source, SInt16 * target, int length );
  37. void SInt16ToFixedPoint( SInt16 * source, SInt32 * target, int length );
  38. void SInt16To32( SInt16 * source, SInt32 * target);
  39. void SInt16To32JX( SInt16 * source, SInt32 * target, int length );
  40. void SInt32To16( SInt32 * source, SInt16 * target);
  41. float getMeanVolumeSint16( SInt16 * vector , int length );
  42. SInt32 get2To1Sample(SInt32 n1,SInt32 n2,float volume,float volRecord);
  43. // audio callbacks
  44. static OSStatus inputRenderCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData );
  45. static OSStatus synthRenderCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData );
  46. static OSStatus micLineInRenderCallback (void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData );
  47. void audioRouteChangeListenerCallback ( void *inUserData, AudioSessionPropertyID inPropertyID, UInt32 inPropertyValueSize, const void *inPropertyValue );
  48. // midi callbacks
  49. void MyMIDINotifyProc (const MIDINotification *message, void *refCon);
  50. static void MyMIDIReadProc(const MIDIPacketList *pktlist, void *refCon, void *connRefCon);
  51. // ring buffer buffer declarations
  52. // SInt16 circular delay buffer (used for echo effect)
  53. SInt16 *delayBuffer;
  54. TPCircularBufferRecord delayBufferRecord;
  55. NSLock *delayBufferRecordLock;
  56. SInt16 *tempDelayBuffer;
  57. Reverb* g_Reverb;
  58. // float circular filter buffer declarations (used for filters)
  59. float *circularFilterBuffer;
  60. TPCircularBufferRecord circularFilterBufferRecord;
  61. NSLock *circularFilterBufferRecordLock;
  62. float *tempCircularFilterBuffer;
  63. // end of declarations //
  64. // callback functions
  65. static OSStatus saveToFileCallback (void *inRefCon,
  66. AudioUnitRenderActionFlags *ioActionFlags,
  67. const AudioTimeStamp *inTimeStamp,
  68. UInt32 inBusNumber,
  69. UInt32 inNumberFrames,
  70. AudioBufferList *ioData){
  71. MixerHostAudio* THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  72. if(THIS.isPaused)
  73. return noErr;
  74. OSStatus status;// = AudioUnitRender(THIS.ioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, ioData);
  75. status = ExtAudioFileWriteAsync(THIS.audioFile, inNumberFrames, ioData);
  76. }
  77. //#pragma mark Mixer input bus 0 & 1 render callback (loops buffers)
  78. static OSStatus inputRenderCallback (void *inRefCon,
  79. AudioUnitRenderActionFlags *ioActionFlags,
  80. const AudioTimeStamp *inTimeStamp,
  81. UInt32 inBusNumber,
  82. UInt32 inNumberFrames,
  83. AudioBufferList *ioData) {
  84. MixerHostAudio* THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  85. AudioUnitSampleType *dataInLeft = ioData->mBuffers[0].mData;
  86. AudioUnitSampleType *dataInRight = ioData->mBuffers[1].mData;
  87. memset(dataInRight,0, ioData->mBuffers[1].mDataByteSize);
  88. memset(dataInLeft, 0, ioData->mBuffers[0].mDataByteSize);
  89. if(THIS.isPaused){
  90. THIS = nil;
  91. dataInRight = NULL;
  92. dataInLeft = NULL;
  93. return noErr;
  94. }
  95. soundStructPtr soundPtr = [THIS getSoundArray:inBusNumber];
  96. UInt32 frameTotalForSound = soundPtr->frameCount;
  97. BOOL isStereo = soundPtr->isStereo;
  98. THIS.currentTime = (double)soundPtr->sampleNumber/soundPtr->sampleRate;
  99. if (soundPtr->sampleNumber < frameTotalForSound){
  100. if(!THIS.isReadFileToMemory){
  101. UInt32 numberOfPacketsToRead = inNumberFrames;
  102. OSStatus result;
  103. result = ExtAudioFileRead (soundPtr->audioFile,&numberOfPacketsToRead,ioData);
  104. if(result != noErr){
  105. // NSLog(@"ExtAudioFileRead=%d",result);
  106. // NSLog(@"mNumberBuffers=%d,%d",ioData->mNumberBuffers,ioData->mBuffers[0].mDataByteSize);
  107. }
  108. soundPtr->sampleNumber += numberOfPacketsToRead;
  109. memcpy((char*)soundPtr->audioDataLeft, dataInLeft, ioData->mBuffers[0].mDataByteSize);
  110. if(isStereo)
  111. memcpy((char*)soundPtr->audioDataRight, dataInRight, ioData->mBuffers[1].mDataByteSize);
  112. }else{
  113. AudioUnitSampleType *dataInLeft1;
  114. AudioUnitSampleType *dataInRight1;
  115. dataInLeft1 = soundPtr->audioDataLeft;
  116. if (isStereo) dataInRight1 = soundPtr->audioDataRight;
  117. UInt32 sampleNumber = soundPtr->sampleNumber;
  118. for (UInt32 i = 0; i < inNumberFrames; i++) {
  119. dataInLeft[i] = dataInLeft1[sampleNumber];
  120. if (isStereo)
  121. dataInRight[i] = dataInRight1[sampleNumber];
  122. else
  123. dataInRight[i] = dataInLeft1[sampleNumber];
  124. sampleNumber++;
  125. if (sampleNumber >= frameTotalForSound)
  126. break;
  127. }
  128. soundPtr->sampleNumber = sampleNumber;
  129. // if(sampleNumber >= soundPtr->frameCount)
  130. // [THIS.delegate playerItemDidReachEnd:nil];
  131. }
  132. if (isStereo){
  133. if(THIS.outputChanelIndex!=0){
  134. if(THIS.outputChanelIndex==-1)
  135. memcpy(dataInRight,dataInLeft,ioData->mBuffers[0].mDataByteSize);
  136. else
  137. memcpy(dataInLeft,dataInRight,ioData->mBuffers[1].mDataByteSize);
  138. }
  139. }
  140. }
  141. THIS = nil;
  142. dataInRight = NULL;
  143. dataInLeft = NULL;
  144. soundPtr = NULL;
  145. return noErr;
  146. }
  147. // synth callback - generates a sine wave with
  148. // freq = MixerHost.sinFreq
  149. // phase = MixerHost.sinPhase
  150. // note on = MixerHost.synthNoteOn
  151. // its a simple example of a synthesizer sound generator
  152. static OSStatus synthRenderCallback (void *inRefCon,
  153. AudioUnitRenderActionFlags *ioActionFlags,
  154. const AudioTimeStamp *inTimeStamp,
  155. UInt32 inBusNumber,
  156. UInt32 inNumberFrames,
  157. AudioBufferList *ioData){
  158. MixerHostAudio* THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  159. if(THIS.isPaused)
  160. return noErr;
  161. float freq = THIS.sinFreq; // get frequency data from instance variables
  162. float phase = THIS.sinPhase;
  163. float sinSignal; //
  164. float envelope; // scaling factor from envelope generator 0->1
  165. // NSLog(@"inside callback - freq: %f phase: %f", freq, phase );
  166. double phaseIncrement = 2 * M_PI * freq / THIS.graphSampleRate; // phase change per sample
  167. AudioSampleType *outSamples;
  168. outSamples = (AudioSampleType *) ioData->mBuffers[0].mData;
  169. // if a note isn't being triggered just fill the frames with zeroes and bail.
  170. // interesting note: when we didn't zero out the buffer, the microphone was
  171. // somehow activated on the synth channel... weird???
  172. //
  173. // synth note triggering is handled by envelope generator now but I left above comment - to illustrate
  174. // what can happen if your callback doesn't fill its output data buffers
  175. /*
  176. if( noteOn == NO ) {
  177. memset(outSamples, 0, inNumberFrames * sizeof(SInt16));
  178. return noErr;
  179. }
  180. */
  181. // build a sine wave (not a teddy bear)
  182. for (UInt32 i = 0; i < inNumberFrames; ++i) {
  183. sinSignal = sin(phase); // if we were using float samples this would be the value
  184. // scale to half of maximum volume level for integer samples
  185. // and use envelope value to determine instantaneous level
  186. // envelope = 1.0;
  187. envelope = getSynthEnvelope( inRefCon ); // envelope ranges from 0->1
  188. outSamples[i] = (SInt16) (((sinSignal * 32767.0f) / 2) * envelope);
  189. phase = phase + phaseIncrement; // increment phase
  190. if(phase >= (2 * M_PI * freq)) { // phase wraps around every cycle
  191. phase = phase - (2 * M_PI * freq);
  192. }
  193. }
  194. THIS.sinPhase = phase; // save for next time this callback is invoked
  195. return noErr;
  196. }
  197. #pragma mark -
  198. #pragma mark mic, line in Audio Rendering
  199. // callback for mic/lineIn input
  200. // this callback is now the clearinghouse for
  201. // DSP fx processing
  202. OSStatus micLineInCallback (void *inRefCon,
  203. AudioUnitRenderActionFlags *ioActionFlags,
  204. const AudioTimeStamp *inTimeStamp,
  205. UInt32 inBusNumber,
  206. UInt32 inNumberFrames,
  207. AudioBufferList *ioData){
  208. // set params & local variables
  209. // scope reference that allows access to everything in MixerHostAudio class
  210. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon;
  211. if(THIS.isPaused){
  212. memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
  213. return noErr;
  214. }
  215. AudioUnit rioUnit = THIS.ioUnit; // io unit which has the input data from mic/lineIn
  216. int i; // loop counter
  217. OSStatus err; // error returns
  218. OSStatus renderErr;
  219. OSStatus status;
  220. UInt32 bus1 = 1; // input bus
  221. AudioSampleType *inSamplesLeft; // convenience pointers to sample data
  222. AudioSampleType *inSamplesRight;
  223. int isStereo; // c boolean - for deciding how many channels to process.
  224. int numberOfChannels; // 1 = mono, 2= stereo
  225. // Sint16 buffers to hold sample data after conversion
  226. SInt16 *out16SamplesLeft = THIS.conversion16BufferLeft;
  227. SInt16 *out16SamplesRight = THIS.conversion16BufferRight;
  228. SInt16 *sampleBuffer;
  229. SInt32* out32SamplesLeft = THIS.conversion32BufferLeft; //合成之后的32位数据
  230. SInt32* out32SamplesRight = THIS.conversion32BufferRight;//合成之前的32位数据
  231. // start the actual processing
  232. numberOfChannels = THIS.displayNumberOfInputChannels;
  233. isStereo = numberOfChannels > 1 ? 1 : 0; // decide stereo or mono
  234. // printf("isStereo: %d\n", isStereo);
  235. // NSLog(@"frames: %lu, bus: %lu",inNumberFrames, inBusNumber );
  236. // copy all the input samples to the callback buffer - after this point we could bail and have a pass through
  237. renderErr = AudioUnitRender(rioUnit, ioActionFlags,
  238. inTimeStamp, bus1, inNumberFrames, ioData);
  239. if (renderErr < 0) {
  240. return renderErr;
  241. }
  242. // but you get format errors if you set Sint16 samples in an ASBD with 2 channels
  243. // So... now to simplify things, we're going to get all input as 8.24 and just
  244. // convert it to SInt16 or float for processing
  245. //
  246. // There may be some 3 stage conversions here, ie., 8.24->Sint16->float
  247. // that could probably obviously be replaced by direct 8.24->float conversion
  248. //
  249. // convert to SInt16
  250. inSamplesLeft = (AudioSampleType *) ioData->mBuffers[0].mData; // left channel
  251. inSamplesRight = (AudioSampleType *) ioData->mBuffers[1].mData; // right channel
  252. out16SamplesLeft = inSamplesLeft;
  253. out16SamplesRight = inSamplesRight;
  254. /* fixedPointToSInt16(inSamplesLeft, out16SamplesLeft, inNumberFrames);
  255. if(isStereo) {
  256. fixedPointToSInt16(inSamplesRight, out16SamplesRight, inNumberFrames);
  257. }*/
  258. // get average input volume level for meter display
  259. //
  260. // (note: there's a vdsp function to do this but it works on float samples
  261. THIS.displayInputLevelLeft = getMeanVolumeSint16( out16SamplesLeft, inNumberFrames); // assign to instance variable for display
  262. if(isStereo) {
  263. THIS.displayInputLevelRight = getMeanVolumeSint16(out16SamplesRight, inNumberFrames); // assign to instance variable for display
  264. }
  265. //
  266. // get user mic/line FX selection
  267. //
  268. // so... none of these effects except fftPassthrough and delay (echo) are fast enough to
  269. // render in stereo at the default sample rate and buffer sizes - on the ipad2
  270. // This is kind of sad but I didn't really do any optimization
  271. // and there's a lot of wasteful conversion and duplication going on... so there is hope
  272. // for now, run the effects in mono
  273. // NSLog(@"inNumberFrames=:%d",inNumberFrames);
  274. if(THIS.isEffecter) { // if user toggled on mic fx
  275. if(isStereo) { // if stereo, combine left and right channels into left
  276. for( i = 0; i < inNumberFrames; i++ ) {
  277. out16SamplesLeft[i] = (SInt16) ((.5 * (float) out16SamplesLeft[i]) + (.5 * (float) out16SamplesRight[i]));
  278. }
  279. }
  280. sampleBuffer = out16SamplesLeft;
  281. // do effect based on user selection
  282. switch (THIS.micFxType) {
  283. case 0:
  284. //ringMod( inRefCon, inNumberFrames, sampleBuffer );
  285. break;
  286. case 1:
  287. err = fftPitchShift ( inRefCon, inNumberFrames, sampleBuffer);
  288. //setReverbParem(g_Reverb,0.4,0.6,0.45,0.6,0.6);
  289. //err = simpleDelay1(g_Reverb, inRefCon, inNumberFrames, sampleBuffer,THIS.graphSampleRate,isStereo+1);
  290. break;
  291. case 2:
  292. //err = fftPassThrough ( inRefCon, inNumberFrames, sampleBuffer);
  293. setReverbParem(g_Reverb,0.5,0.3,0.45,0.3,0.3);
  294. err = simpleDelay1(g_Reverb, inRefCon, inNumberFrames, sampleBuffer,THIS.graphSampleRate,isStereo+1);
  295. break;
  296. case 3:
  297. //err = simpleDelay ( inRefCon, inNumberFrames, sampleBuffer);
  298. // setReverbParem(g_Reverb,0.5,0.4,0.45,0.6,0.6);
  299. setReverbParem(g_Reverb,0.5,0.4,0.45,0.5,0.5);
  300. err = simpleDelay1(g_Reverb, inRefCon, inNumberFrames, sampleBuffer,THIS.graphSampleRate,isStereo+1);
  301. break;
  302. case 4:
  303. //err = movingAverageFilterFloat ( inRefCon, inNumberFrames, sampleBuffer);
  304. // setReverbParem(g_Reverb,0.4,0.5,0.45,0.7,0.7);
  305. setReverbParem(g_Reverb,0.5,0.4,0.45,0.7,0.7);
  306. err = simpleDelay1(g_Reverb, inRefCon, inNumberFrames, sampleBuffer,THIS.graphSampleRate,isStereo+1);
  307. break;
  308. case 5:
  309. //err = convolutionFilter ( inRefCon, inNumberFrames, sampleBuffer);
  310. break;
  311. default:
  312. break;
  313. }
  314. // If stereo, copy left channel (mono) results to right channel
  315. if(isStereo) {
  316. for(i = 0; i < inNumberFrames; i++ ) {
  317. out16SamplesRight[i] = out16SamplesLeft[i];
  318. }
  319. }
  320. }
  321. // convert back to 8.24 fixed point
  322. /*SInt16ToFixedPoint(out16SamplesLeft, inSamplesLeft, inNumberFrames);
  323. if(isStereo) {
  324. SInt16ToFixedPoint(out16SamplesRight, inSamplesRight, inNumberFrames);
  325. }*/
  326. //合成音轨:
  327. // Declare variables to point to the audio buffers. Their data type must match the buffer data type.
  328. //录音保存:
  329. BOOL b=NO;
  330. if(THIS.isOutputer){
  331. if(THIS.isMixSave && THIS.isPlayer){
  332. out16SamplesLeft = THIS.conversion16BufferLeft;
  333. soundStructPtr soundStructPointerArray = [THIS getSoundArray:0];
  334. UInt32 frameTotalForSound = soundStructPointerArray->frameCount;
  335. AudioUnitSampleType *dataInLeft;
  336. AudioUnitSampleType *dataInRight;
  337. dataInLeft = soundStructPointerArray->audioDataLeft;
  338. if (soundStructPointerArray->isStereo)
  339. dataInRight = soundStructPointerArray->audioDataRight;
  340. float volume = THIS.volumePlayer;
  341. // NSLog(@"%d,%d",THIS.isHeadset,THIS.isHeadsetTrue);
  342. if( THIS.isHeadset && THIS.isHeadsetTrue)
  343. volume = volume*2;
  344. else
  345. volume = volume;
  346. float volRecord;
  347. if( THIS.isIPad1 )
  348. volRecord = 2;
  349. else
  350. volRecord = 1;
  351. //NSLog(@"volRecord=%f",volRecord);
  352. UInt32 writeNumber = soundStructPointerArray->writeNumber;
  353. writeNumber = 0;//为测试
  354. for (UInt32 i = 0; i < inNumberFrames; ++i) {//总是和录音的左声道相加,忽略右声道
  355. if (writeNumber <= frameTotalForSound)//超过时长时不再相加,直接用录音
  356. {
  357. SInt16To32(&inSamplesLeft[i],&out32SamplesLeft[i]);
  358. SInt16To32(&inSamplesLeft[i],&out32SamplesRight[i]);
  359. //NSLog(@"out32SamplesLeft=%d,dataInLeft=%d",out32SamplesLeft[i],dataInRight[writeNumber]);
  360. if(THIS.isReadFileToMemory){
  361. out32SamplesLeft[i] = get2To1Sample(out32SamplesLeft[i],dataInLeft[writeNumber+soundStructPointerArray->sampleNumber],volume,volRecord);
  362. }else{
  363. if (soundStructPointerArray->isStereo){
  364. if(THIS.outputChanelIndex == -1)
  365. out32SamplesLeft[i] = get2To1Sample(out32SamplesLeft[i],dataInLeft[writeNumber],volume,volRecord);//录音,伴奏,伴奏音量,录音音量
  366. else
  367. out32SamplesLeft[i] = get2To1Sample(out32SamplesLeft[i],dataInRight[writeNumber],volume,volRecord);
  368. }else
  369. out32SamplesLeft[i] = get2To1Sample(out32SamplesLeft[i],dataInLeft[writeNumber],volume,volRecord);
  370. }
  371. //NSLog(@"out32SamplesLeft=%d,%d",out32SamplesLeft[i],n);
  372. SInt32To16(&out32SamplesLeft[i], &out16SamplesLeft[i]);
  373. }
  374. writeNumber++;
  375. }
  376. soundStructPointerArray->writeNumber = writeNumber;
  377. dataInLeft = NULL;
  378. dataInRight = NULL;
  379. soundStructPointerArray = NULL;
  380. b = YES;
  381. }
  382. AudioBufferList changeBufList;
  383. changeBufList.mNumberBuffers = 1;
  384. changeBufList.mBuffers[0].mNumberChannels = 1;
  385. changeBufList.mBuffers[0].mDataByteSize = ioData->mBuffers[0].mDataByteSize;
  386. changeBufList.mBuffers[0].mData = out16SamplesLeft;
  387. if(THIS.isOutputMp3)
  388. status = [THIS writeMp3Buffer:out16SamplesLeft nSamples:inNumberFrames];
  389. else
  390. status = ExtAudioFileWriteAsync(THIS.audioFile, inNumberFrames, &changeBufList);
  391. THIS.recordSamples += inNumberFrames;
  392. THIS.timeLenRecord = (double)THIS.recordSamples/THIS.graphSampleRate;
  393. if(status != noErr){
  394. THIS.isErroring = YES;
  395. // NSLog(@"ExtAudioFileWriteAsync=%d",status);
  396. }
  397. }
  398. ioData->mBuffers[0].mDataByteSize *= 2;
  399. if(THIS.isPlayMic){//录音时,回放
  400. if(!b){//未转换时
  401. SInt16ToFixedPoint(inSamplesLeft, out32SamplesRight, inNumberFrames);
  402. /*SInt16 max=0;
  403. for (UInt32 i = 0; i < inNumberFrames; ++i) {//总是和录音的左声道相加,忽略右声道
  404. SInt16 n=&inSamplesLeft[i];
  405. if (n <100 && n>-100)
  406. n = 0;
  407. if (n > max)
  408. max = n;
  409. SInt16To32(&n,&out32SamplesRight[i]);
  410. }*/
  411. //NSLog(@"max=%d",max);
  412. }
  413. }else
  414. memset(out32SamplesRight, 0, ioData->mBuffers[0].mDataByteSize);
  415. ioData->mBuffers[0].mData = out32SamplesRight;
  416. THIS = nil;
  417. inSamplesLeft = NULL;
  418. inSamplesRight = NULL;
  419. sampleBuffer = NULL;
  420. out16SamplesLeft = NULL;
  421. out16SamplesRight = NULL;
  422. out32SamplesLeft = NULL;
  423. return noErr; // return with samples in iOdata
  424. }
  425. SInt32 get2To1Sample(SInt32 n1,SInt32 n2,float volume,float volRecord){
  426. return (n1*volRecord+volume*n2)/2; //录音放大2倍,伴奏跟随音量调节
  427. }
  428. #pragma mark -
  429. #pragma mark Audio route change listener callback
  430. // Audio session callback function for responding to audio route changes. If playing back audio and
  431. // the user unplugs a headset or headphones, or removes the device from a dock connector for hardware
  432. // that supports audio playback, this callback detects that and stops playback.
  433. //
  434. // Refer to AudioSessionPropertyListener in Audio Session Services Reference.
  435. void audioRouteChangeListenerCallback (
  436. void *inUserData,
  437. AudioSessionPropertyID inPropertyID,
  438. UInt32 inPropertyValueSize,
  439. const void *inPropertyValue
  440. ) {
  441. // Ensure that this callback was invoked because of an audio route change
  442. if (inPropertyID != kAudioSessionProperty_AudioRouteChange) return;
  443. // This callback, being outside the implementation block, needs a reference to the MixerHostAudio
  444. // object, which it receives in the inUserData parameter. You provide this reference when
  445. // registering this callback (see the call to AudioSessionAddPropertyListener).
  446. MixerHostAudio *audioObject = (__bridge MixerHostAudio *) inUserData;
  447. // if application sound is not playing, there's nothing to do, so return.
  448. if (NO == audioObject.isPlaying) {
  449. // NSLog (@"Audio route change while application audio is stopped.");
  450. return;
  451. } else {
  452. // Determine the specific type of audio route change that occurred.
  453. CFDictionaryRef routeChangeDictionary = inPropertyValue;
  454. CFNumberRef routeChangeReasonRef =
  455. CFDictionaryGetValue (
  456. routeChangeDictionary,
  457. CFSTR (kAudioSession_AudioRouteChangeKey_Reason)
  458. );
  459. SInt32 routeChangeReason;
  460. CFNumberGetValue (
  461. routeChangeReasonRef,
  462. kCFNumberSInt32Type,
  463. &routeChangeReason
  464. );
  465. // "Old device unavailable" indicates that a headset or headphones were unplugged, or that
  466. // the device was removed from a dock connector that supports audio output. In such a case,
  467. // pause or stop audio (as advised by the iOS Human Interface Guidelines).
  468. if (routeChangeReason == kAudioSessionRouteChangeReason_OldDeviceUnavailable) {
  469. // NSLog (@"Audio output device was removed; stopping audio playback.");
  470. //NSString *MixerHostAudioObjectPlaybackStateDidChangeNotification = @"MixerHostAudioObjectPlaybackStateDidChangeNotification";
  471. //[g_notify postNotificationName: MixerHostAudioObjectPlaybackStateDidChangeNotification object: audioObject];
  472. } else {
  473. // NSLog (@"A route change occurred that does not require stopping application audio.");
  474. }
  475. }
  476. }
  477. // recursive logarithmic smoothing (low pass filter)
  478. // based on algorithm in Max/MSP slide object
  479. // http://cycling74.com
  480. //
  481. float xslide(int sval, float x ) {
  482. static int firstTime = TRUE;
  483. static float yP;
  484. float y;
  485. if(sval <= 0) {
  486. sval = 1;
  487. }
  488. if(firstTime) {
  489. firstTime = FALSE;
  490. yP = x;
  491. }
  492. y = yP + ((x - yP) / sval);
  493. yP = y;
  494. return(y);
  495. }
  496. // pitch shifter using stft - based on dsp dimension articles and source
  497. // http://www.dspdimension.com/admin/pitch-shifting-using-the-ft/
  498. OSStatus fftPitchShift (
  499. void *inRefCon, // scope (MixerHostAudio)
  500. UInt32 inNumberFrames, // number of frames in this slice
  501. SInt16 *sampleBuffer) { // frames (sample data)
  502. // scope reference that allows access to everything in MixerHostAudio class
  503. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon;
  504. float *outputBuffer = THIS.outputBuffer; // sample buffers
  505. float *analysisBuffer = THIS.analysisBuffer;
  506. FFTSetup fftSetup = THIS.fftSetup; // fft setup structures need to support vdsp functions
  507. uint32_t stride = 1; // interleaving factor for vdsp functions
  508. int bufferCapacity = THIS.fftBufferCapacity; // maximum size of fft buffers
  509. float pitchShift = 1.0; // pitch shift factor 1=normal, range is .5->2.0
  510. long osamp = 4; // oversampling factor
  511. long fftSize = 1024; // fft size
  512. float frequency; // analysis frequency result
  513. // ConvertInt16ToFloat
  514. vDSP_vflt16((SInt16 *) sampleBuffer, stride, (float *) analysisBuffer, stride, bufferCapacity );
  515. // run the pitch shift
  516. // scale the fx control 0->1 to range of pitchShift .5->2.0
  517. pitchShift = (THIS.micFxControl * 1.5) + .5;
  518. // osamp should be at least 4, but at this time my ipod touch gets very unhappy with
  519. // anything greater than 2
  520. osamp = 4;
  521. fftSize = 1024; // this seems to work in real time since we are actually doing the fft on smaller windows
  522. smb2PitchShift( pitchShift , (long) inNumberFrames,
  523. fftSize, osamp, (float) THIS.graphSampleRate,
  524. (float *) analysisBuffer , (float *) outputBuffer,
  525. fftSetup, &frequency);
  526. // display detected pitch
  527. THIS.displayInputFrequency = (int) frequency;
  528. // very very cool effect but lets skip it temporarily
  529. // THIS.sinFreq = THIS.frequency; // set synth frequency to the pitch detected by microphone
  530. // now convert from float to Sint16
  531. vDSP_vfixr16((float *) outputBuffer, stride, (SInt16 *) sampleBuffer, stride, bufferCapacity );
  532. return noErr;
  533. }
  534. #pragma mark -
  535. #pragma mark fft passthrough function
  536. // called by audio callback function with a slice of sample frames
  537. //
  538. // note this is nearly identical to the code example in apple developer lib at
  539. // http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/vDSP_Programming_Guide/SampleCode/SampleCode.html%23//apple_ref/doc/uid/TP40005147-CH205-CIAEJIGF
  540. //
  541. // this code does a passthrough from mic input to mixer bus using forward and inverse fft
  542. // it also analyzes frequency with the freq domain data
  543. //-------------------------------------------------------------
  544. OSStatus fftPassThrough ( void *inRefCon, // scope referece for external data
  545. UInt32 inNumberFrames, // number of frames to process
  546. SInt16 *sampleBuffer) // frame buffer
  547. {
  548. // note: the fx control slider does nothing during fft passthrough
  549. // set all the params
  550. // scope reference that allows access to everything in MixerHostAudio class
  551. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon;
  552. COMPLEX_SPLIT A = THIS.fftA; // complex buffers
  553. void *dataBuffer = THIS.dataBuffer; // working sample buffers
  554. float *outputBuffer = THIS.outputBuffer;
  555. float *analysisBuffer = THIS.analysisBuffer;
  556. FFTSetup fftSetup = THIS.fftSetup; // fft structure to support vdsp functions
  557. // fft params
  558. uint32_t log2n = THIS.fftLog2n;
  559. uint32_t n = THIS.fftN;
  560. uint32_t nOver2 = THIS.fftNOver2;
  561. uint32_t stride = 1;
  562. int bufferCapacity = THIS.fftBufferCapacity;
  563. SInt16 index = THIS.fftIndex;
  564. // this next logic assumes that the bufferCapacity determined by maxFrames in the fft-setup is less than or equal to
  565. // the inNumberFrames (which should be determined by the av session IO buffer size (ie duration)
  566. //
  567. // If we can guarantee the fft buffer size is equal to the inNumberFrames, then this buffer filling step is unecessary
  568. //
  569. // at this point i think its essential to make the two buffers equal size in order to do the fft passthrough without doing
  570. // the overlapping buffer thing
  571. //
  572. // Fill the buffer with our sampled data. If we fill our buffer, run the
  573. // fft.
  574. // so I have a question - the fft buffer needs to be an even multiple of the frame (packet size?) or what?
  575. // NSLog(@"index: %d", index);
  576. int read = bufferCapacity - index;
  577. if (read > inNumberFrames) {
  578. // NSLog(@"filling");
  579. memcpy((SInt16 *)dataBuffer + index, sampleBuffer, inNumberFrames * sizeof(SInt16));
  580. THIS.fftIndex += inNumberFrames;
  581. } else {
  582. // NSLog(@"processing");
  583. // If we enter this conditional, our buffer will be filled and we should
  584. // perform the FFT.
  585. memcpy((SInt16 *)dataBuffer + index, sampleBuffer, read * sizeof(SInt16));
  586. // Reset the index.
  587. THIS.fftIndex = 0;
  588. // *************** FFT ***************
  589. // convert Sint16 to floating point
  590. vDSP_vflt16((SInt16 *) dataBuffer, stride, (float *) outputBuffer, stride, bufferCapacity );
  591. //
  592. // Look at the real signal as an interleaved complex vector by casting it.
  593. // Then call the transformation function vDSP_ctoz to get a split complex
  594. // vector, which for a real signal, divides into an even-odd configuration.
  595. //
  596. vDSP_ctoz((COMPLEX*)outputBuffer, 2, &A, 1, nOver2);
  597. // Carry out a Forward FFT transform.
  598. vDSP_fft_zrip(fftSetup, &A, stride, log2n, FFT_FORWARD);
  599. // The output signal is now in a split real form. Use the vDSP_ztoc to get
  600. // an interleaved complex vector.
  601. vDSP_ztoc(&A, 1, (COMPLEX *)analysisBuffer, 2, nOver2);
  602. // for display purposes...
  603. //
  604. // Determine the dominant frequency by taking the magnitude squared and
  605. // saving the bin which it resides in. This isn't precise and doesn't
  606. // necessary get the "fundamental" frequency, but its quick and sort of works...
  607. // note there are vdsp functions to do the amplitude calcs
  608. float dominantFrequency = 0;
  609. int bin = -1;
  610. for (int i=0; i<n; i+=2) {
  611. float curFreq = MagnitudeSquared(analysisBuffer[i], analysisBuffer[i+1]);
  612. if (curFreq > dominantFrequency) {
  613. dominantFrequency = curFreq;
  614. bin = (i+1)/2;
  615. }
  616. }
  617. dominantFrequency = bin*(THIS.graphSampleRate/bufferCapacity);
  618. // printf("Dominant frequency: %f \n" , dominantFrequency);
  619. THIS.displayInputFrequency = (int) dominantFrequency; // set instance variable with detected frequency
  620. // Carry out an inverse FFT transform.
  621. vDSP_fft_zrip(fftSetup, &A, stride, log2n, FFT_INVERSE );
  622. // scale it
  623. float scale = (float) 1.0 / (2 * n);
  624. vDSP_vsmul(A.realp, 1, &scale, A.realp, 1, nOver2 );
  625. vDSP_vsmul(A.imagp, 1, &scale, A.imagp, 1, nOver2 );
  626. // convert from split complex to interleaved complex form
  627. vDSP_ztoc(&A, 1, (COMPLEX *) outputBuffer, 2, nOver2);
  628. // now convert from float to Sint16
  629. vDSP_vfixr16((float *) outputBuffer, stride, (SInt16 *) sampleBuffer, stride, bufferCapacity );
  630. }
  631. return noErr;
  632. }
  633. // ring modulator effect - for SInt16 samples
  634. //
  635. // called from callback function that passes in a slice of frames
  636. //
  637. void ringMod(
  638. void *inRefCon, // scope (MixerHostAudio)
  639. UInt32 inNumberFrames, // number of frames in this slice
  640. SInt16 *sampleBuffer) { // frames (sample data)
  641. // scope reference that allows access to everything in MixerHostAudio class
  642. MixerHostAudio* THIS = (__bridge MixerHostAudio *)inRefCon;
  643. UInt32 frameNumber; // current frame number for looping
  644. float theta; // for frequency calculation
  645. static float phase = 0; // for frequency calculation
  646. float freq; // etc.,
  647. AudioSampleType *outSamples; // convenience pointer to result samples
  648. outSamples = (AudioSampleType *) sampleBuffer; // pointer to samples
  649. freq = (THIS.micFxControl * 4000) + .00001; // get freq from fx control slider
  650. // .00001 prevents divide by 0
  651. // loop through the samples
  652. for (frameNumber = 0; frameNumber < inNumberFrames; ++frameNumber) {
  653. theta = phase * M_PI * 2; // convert to radians
  654. outSamples[frameNumber] = (AudioSampleType) (sin(theta) * outSamples[frameNumber]);
  655. phase += 1.0 / (THIS.graphSampleRate / freq); // increment phase
  656. if (phase > 1.0) { // phase goes from 0 -> 1
  657. phase -= 1.0;
  658. }
  659. }
  660. }
  661. // simple AR envelope generator for synth note
  662. //
  663. // for now, attack and release value params hardcoded in this function
  664. //
  665. #define ENV_OFF 0
  666. #define ENV_ATTACK 1
  667. #define ENV_RELEASE 2
  668. float getSynthEnvelope( void * inRefCon ) {
  669. MixerHostAudio* THIS = (__bridge MixerHostAudio *)inRefCon; // access to mixerHostAudio scope
  670. static int state = ENV_OFF; // current state
  671. static int keyPressed = 0; // current(previous) state of key
  672. static float envelope = 0.0; // current envelope value 0->1
  673. float attack = 1000.0; // attack time in samples
  674. float release = 40000.0; // release time in samples
  675. float attackStep; // amount to increment each sample during attack phase
  676. float releaseStep; // amount to decrement each sample during release phase
  677. int newKeyState; // new on/off state of key
  678. // start
  679. attackStep = 1.0 / attack; // calculate attack and release steps
  680. releaseStep = 1.0 / release;
  681. newKeyState = THIS.synthNoteOn == YES ? 1 : 0;
  682. // printf("envelope: %f, state: %d, keyPressed: %d, newKeyState: %d\n", envelope, state, keyPressed, newKeyState);
  683. if(keyPressed == 0) { // key has been up
  684. if(newKeyState == 0) { // if key is still up
  685. switch(state)
  686. {
  687. case ENV_RELEASE:
  688. // printf("dec: env: %f, rs: %f\n", envelope, releaseStep );
  689. envelope -= releaseStep;
  690. if(envelope <= 0.) {
  691. envelope = 0.0;
  692. state = ENV_OFF;
  693. }
  694. break;
  695. default:
  696. state = ENV_OFF; // this should already be the case
  697. envelope = 0.0;
  698. break;
  699. }
  700. }
  701. else { // key was just pressed
  702. keyPressed = 1; // save new key state
  703. state = ENV_ATTACK; // change state to attack
  704. }
  705. }
  706. else { // key has been down
  707. if(newKeyState == 0) { // if key was just released
  708. keyPressed = 0; // save new key state
  709. state = ENV_RELEASE;
  710. }
  711. else { // key is still down
  712. switch(state)
  713. {
  714. case ENV_ATTACK:
  715. // printf("inc: env: %f, as: %f\n", envelope, attackStep );
  716. envelope += attackStep;
  717. if (envelope >= 1.0) {
  718. envelope = 1.0;
  719. }
  720. break;
  721. default:
  722. state = ENV_ATTACK; // this should already be the case
  723. break;
  724. }
  725. }
  726. }
  727. return (envelope);
  728. }
  729. // simple (one tap) delay using ring buffer
  730. //
  731. // called by callback with a slice of sample data in ioData
  732. //
  733. OSStatus simpleDelay (
  734. void *inRefCon, // scope reference
  735. UInt32 inNumberFrames, // number of frames to process
  736. SInt16 *sampleBuffer) // frame data
  737. {
  738. // set all the params
  739. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  740. UInt32 i; // loop counter
  741. // UInt32 averageVolume = 0; // for tracking microphone level
  742. int32_t tail; // tail of ring buffer (read pointer)
  743. // int32_t head; // head of ring buffer (write pointer)
  744. SInt16 *targetBuffer, *sourceBuffer; // convenience pointers to sample data
  745. SInt16 *buffer; //
  746. int sampleCount = 0; // number of samples processed in ring buffer
  747. int samplesToCopy = inNumberFrames; // total number of samples to process
  748. int32_t length; // length of ring buffer
  749. int32_t delayLength; // size of delay in samples
  750. int delaySlices; // number of slices to delay by
  751. // Put audio into circular delay buffer
  752. // write incoming samples into the ring at the current head position
  753. // head is incremented by inNumberFrames
  754. // The logic is a bit different than usual circular buffer because we don't care
  755. // whether the head catches up to the tail - because we're going to manually
  756. // set the tail position based on the delay length each time this function gets
  757. // called.
  758. samplesToCopy = inNumberFrames;
  759. sourceBuffer = sampleBuffer;
  760. length = TPCircularBufferLength(&delayBufferRecord);
  761. // printf("length: %d\n", length );
  762. // [delayBufferRecordLock lock]; // skip locks
  763. while(samplesToCopy > 0) {
  764. sampleCount = MIN(samplesToCopy, length - TPCircularBufferHead(&delayBufferRecord));
  765. if(sampleCount == 0) {
  766. break;
  767. }
  768. buffer = delayBuffer + TPCircularBufferHead(&delayBufferRecord);
  769. memcpy( buffer, sourceBuffer, sampleCount*sizeof(SInt16)); // actual copy
  770. sourceBuffer += sampleCount;
  771. samplesToCopy -= sampleCount;
  772. TPCircularBufferProduceAnywhere(&delayBufferRecord, sampleCount); // this increments head
  773. }
  774. // head = TPCircularBufferHead(&delayBufferRecord);
  775. // printf("new head is %d\n", head );
  776. // [THIS.delayBufferRecordLock unlock]; // skip lock because processing is local
  777. // Now we need to calculate where to put the tail - note this will probably blow
  778. // up if you don't make the circular buffer big enough for the delay
  779. delaySlices = (int) (THIS.micFxControl * 80);
  780. delayLength = delaySlices * inNumberFrames; // number of slices do delay by
  781. // printf("delayLength: %d\n", delayLength);
  782. tail = TPCircularBufferHead(&delayBufferRecord) - delayLength;
  783. if(tail < 0) {
  784. tail = length + tail;
  785. }
  786. // printf("new tail is %d", tail );
  787. TPCircularBufferSetTailAnywhere(&delayBufferRecord, tail);
  788. targetBuffer = tempDelayBuffer; // tail data will get copied into temporary buffer
  789. samplesToCopy = inNumberFrames;
  790. // Pull audio from playthrough buffer, in contiguous chunks
  791. // [delayBufferRecordLock lock]; // skip locks
  792. // this is the tricky part of the ring buffer where we need to break the circular
  793. // illusion and do linear housekeeping. If we're within 1024 of the physical
  794. // end of buffer, then copy out the samples in 2 steps.
  795. while ( samplesToCopy > 0 ) {
  796. sampleCount = MIN(samplesToCopy, length - TPCircularBufferTail(&delayBufferRecord));
  797. if ( sampleCount == 0 ) {
  798. break;
  799. }
  800. // set pointer based on location of the tail
  801. buffer = delayBuffer + TPCircularBufferTail(&delayBufferRecord);
  802. // printf("\ncopying %d to temp, head: %d, tail %d", sampleCount, head, tail );
  803. memcpy(targetBuffer, buffer, sampleCount*sizeof(SInt16)); // actual copy
  804. targetBuffer += sampleCount; // move up target pointer
  805. samplesToCopy -= sampleCount; // keep track of what's already written
  806. TPCircularBufferConsumeAnywhere(&delayBufferRecord, sampleCount); // this increments tail
  807. }
  808. // [THIS.delayBufferRecordLock unlock]; // skip locks
  809. // convenience pointers for looping
  810. AudioSampleType *outSamples;
  811. outSamples = (AudioSampleType *) sampleBuffer;
  812. // this is just a debug test to see if anything is in the delay buffer
  813. // by calculating mean volume of the buffer
  814. // and displaying it to the screen
  815. // for ( i = 0; i < inNumberFrames ; i++ ) {
  816. // averageVolume += abs((int) tempDelayBuffer[i]);
  817. // }
  818. // THIS.micLevel = averageVolume / inNumberFrames;
  819. // printf("\naverageVolume = %lu", averageVolume);
  820. // mix the delay buffer with the input buffer
  821. // so here the ratio is .4 * input signal
  822. // and .6 * delayed signal
  823. for ( i = 0; i < inNumberFrames ; i++ ) {
  824. outSamples[i] = (.4 * outSamples[i]) + (.6 * tempDelayBuffer[i]);
  825. }
  826. return noErr;
  827. }
  828. // logarithmic smoothing (low pass) filter
  829. // based on algorithm in Max/MSP slide object
  830. // http://cycling74.com
  831. //
  832. // called by callback with sample data in ioData
  833. //
  834. OSStatus logFilter (
  835. void *inRefCon, // scope reference
  836. UInt32 inNumberFrames, // number of frames to process
  837. SInt16 *sampleBuffer) // frame data
  838. {
  839. // set params
  840. // scope reference that allows access to everything in MixerHostAudio class
  841. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon;
  842. int i; // loop counter
  843. SInt16 *buffer;
  844. int slide; // smoothing factor (1 = no smoothing)
  845. // map fx control slider 0->1 to 1->15 for slide range
  846. slide = (int) (THIS.micFxControl * 14) + 1;
  847. buffer = sampleBuffer;
  848. // logarihmic filter
  849. for(i = 0 ; i < inNumberFrames; i++ ) {
  850. sampleBuffer[i] = (SInt16) xslide( slide, (float) buffer[i]);
  851. }
  852. return noErr;
  853. }
  854. // recursive Moving Average filter (float)
  855. // from http://www.dspguide.com/
  856. // table 15-2
  857. //
  858. // called by callback with a slice of sample data in ioData
  859. //
  860. // note - the integer version didn't work
  861. // but this version works fine
  862. // integer version causes clipping regardless of length
  863. //
  864. OSStatus movingAverageFilterFloat (
  865. void *inRefCon, // scope reference
  866. UInt32 inNumberFrames, // number of frames to process
  867. SInt16 *sampleBuffer) // frame data
  868. {
  869. // set all the params
  870. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  871. int i; // loop counter
  872. // UInt32 averageVolume = 0; // for tracking microphone level
  873. float *analysisBuffer = THIS.analysisBuffer; // working sample data buffers
  874. size_t bufferCapacity = THIS.fftBufferCapacity;
  875. int32_t tail; // tail of ring buffer (read pointer)
  876. float *targetBuffer, *sourceBuffer; // convenience points for sample data
  877. float *buffer; //
  878. int sampleCount = 0; // number of samples read in while processing ring buffer
  879. int samplesToCopy = inNumberFrames; // total number samples to process in ring buffer
  880. int32_t length; // length of ring buffer
  881. int32_t delayLength; //
  882. int filterLength; // size of filter (in samples)
  883. int middle; // middle of filter
  884. float acc; // accumulator for moving average calculation
  885. float *resultBuffer; // output
  886. int stride = 1; // interleaving factor for sample data for vdsp functions
  887. // convenience pointers for looping
  888. float *signalBuffer; //
  889. // on first pass, move the head up far enough into the ring buffer so we
  890. // have enough zero padding to process the incoming signal data
  891. // set filter size from mix fx control
  892. filterLength = (int) (THIS.micFxControl * 30) + 3;
  893. if((filterLength % 2) == 0) { // if even
  894. filterLength += 1; // make it odd
  895. }
  896. // printf("filterLength %d\n", filterLength );
  897. // filterLength = 51;
  898. middle = (filterLength - 1) / 2;
  899. // convert vector to float
  900. // ConvertInt16ToFloat
  901. vDSP_vflt16((SInt16 *) sampleBuffer, stride, (float *) analysisBuffer, stride, bufferCapacity );
  902. // Put audio into circular delay buffer
  903. // write incoming samples into the ring at the current head position
  904. // head is incremented by inNumberFrames
  905. // The logic is a bit different than usual circular buffer because we don't care
  906. // whether the head catches up to the tail - because we're doing all the processing
  907. // within this function. So tail position gets reset manually each time.
  908. samplesToCopy = inNumberFrames;
  909. sourceBuffer = analysisBuffer;
  910. length = TPCircularBufferLength(&circularFilterBufferRecord);
  911. // printf("length: %d\n", length );
  912. // [delayBufferRecordLock lock]; // skip locks
  913. while(samplesToCopy > 0) {
  914. sampleCount = MIN(samplesToCopy, length - TPCircularBufferHead(&circularFilterBufferRecord));
  915. if(sampleCount == 0) {
  916. break;
  917. }
  918. buffer = circularFilterBuffer + TPCircularBufferHead(&circularFilterBufferRecord);
  919. memcpy( buffer, sourceBuffer, sampleCount*sizeof(float)); // actual copy
  920. sourceBuffer += sampleCount;
  921. samplesToCopy -= sampleCount;
  922. TPCircularBufferProduceAnywhere(&circularFilterBufferRecord, sampleCount); // this increments head
  923. }
  924. // head = TPCircularBufferHead(&delayBufferRecord);
  925. // printf("new head is %d\n", head );
  926. // [THIS.delayBufferRecordLock unlock]; // skip lock because processing is local
  927. // Now we need to calculate where to put the tail - note this will probably blow
  928. // up if you don't make the circular buffer big enough for the delay
  929. // delaySlices = (int) (THIS.micFxControl * 80);
  930. delayLength = (inNumberFrames + filterLength) - 1;
  931. // printf("delayLength: %d\n", delayLength);
  932. tail = TPCircularBufferHead(&circularFilterBufferRecord) - delayLength;
  933. if(tail < 0) {
  934. tail = length + tail;
  935. }
  936. // printf("new tail is %d", tail );
  937. TPCircularBufferSetTailAnywhere(&circularFilterBufferRecord, tail);
  938. targetBuffer = tempCircularFilterBuffer; // tail data will get copied into temporary buffer
  939. samplesToCopy = delayLength;
  940. // Pull audio from playthrough buffer, in contiguous chunks
  941. // [delayBufferRecordLock lock]; // skip locks
  942. // this is the tricky part of the ring buffer where we need to break the circular
  943. // illusion and do linear housekeeping. If we're within 1024 of the physical
  944. // end of buffer, then copy out the samples in 2 steps.
  945. while ( samplesToCopy > 0 ) {
  946. sampleCount = MIN(samplesToCopy, length - TPCircularBufferTail(&circularFilterBufferRecord));
  947. if ( sampleCount == 0 ) {
  948. break;
  949. }
  950. // set pointer based on location of the tail
  951. buffer = circularFilterBuffer + TPCircularBufferTail(&circularFilterBufferRecord);
  952. // printf("\ncopying %d to temp, head: %d, tail %d", sampleCount, head, tail );
  953. memcpy(targetBuffer, buffer, sampleCount*sizeof(float)); // actual copy
  954. targetBuffer += sampleCount; // move up target pointer
  955. samplesToCopy -= sampleCount; // keep track of what's already written
  956. TPCircularBufferConsumeAnywhere(&circularFilterBufferRecord, sampleCount); // this increments tail
  957. }
  958. // [THIS.delayBufferRecordLock unlock]; // skip locks
  959. // ok now we have enough samples in the temp delay buffer to actually run the
  960. // filter. For example, if slice size is 1024 and filterLength is 101 - then we
  961. // should have 1124 samples in the tempDelayBuffer
  962. signalBuffer = tempCircularFilterBuffer;
  963. resultBuffer = THIS.outputBuffer;
  964. acc = 0; // accumulator - find y[50] by averaging points x[0] to x[100]
  965. for(i = 0; i < filterLength; i++ ) {
  966. acc += signalBuffer[i];
  967. }
  968. resultBuffer[0] = (float) acc / filterLength;
  969. // recursive moving average filter
  970. middle = (filterLength - 1) / 2;
  971. for ( i = middle + 1; i < (inNumberFrames + middle) ; i++ ) {
  972. acc = acc + signalBuffer[i + middle] - signalBuffer[i - (middle + 1)];
  973. resultBuffer[i - middle] = (float) acc / filterLength;
  974. }
  975. // printf("last i-middle is: %d\n", i - middle);
  976. // now convert from float to Sint16
  977. vDSP_vfixr16((float *) resultBuffer, stride, (SInt16 *) sampleBuffer, stride, bufferCapacity );
  978. return noErr;
  979. }
  980. // 101 point windowed sinc lowpass filter from http://www.dspguide.com/
  981. // table 16-1
  982. void lowPassWindowedSincFilter( float *buf , float fc ) {
  983. // re-calculate 101 point lowpass filter kernel
  984. int i;
  985. int m = 100;
  986. float sum = 0;
  987. for( i = 0; i < 101 ; i++ ) {
  988. if((i - m / 2) == 0 ) {
  989. buf[i] = 2 * M_PI * fc;
  990. }
  991. else {
  992. buf[i] = sin(2 * M_PI * fc * (i - m / 2)) / (i - m / 2);
  993. }
  994. buf[i] = buf[i] * (.54 - .46 * cos(2 * M_PI * i / m ));
  995. }
  996. // normalize for unity gain at dc
  997. for ( i = 0 ; i < 101 ; i++ ) {
  998. sum = sum + buf[i];
  999. }
  1000. for ( i = 0 ; i < 101 ; i++ ) {
  1001. buf[i] = buf[i] / sum;
  1002. }
  1003. }
  1004. // Convoluation Filter example (float)
  1005. // called by callback with a slice of sample data in ioData
  1006. //
  1007. OSStatus convolutionFilter (
  1008. void *inRefCon, // scope reference
  1009. UInt32 inNumberFrames, // number of frames to process
  1010. SInt16 *sampleBuffer) // frame data
  1011. {
  1012. // set all the params
  1013. MixerHostAudio *THIS = (__bridge MixerHostAudio *)inRefCon; // scope reference that allows access to everything in MixerHostAudio class
  1014. // int i; // loop counter
  1015. // UInt32 averageVolume = 0; // for tracking microphone level
  1016. float *analysisBuffer = THIS.analysisBuffer; // working data buffers
  1017. size_t bufferCapacity = THIS.fftBufferCapacity;
  1018. int32_t tail; // tail of ring buffer (read pointer)
  1019. // int32_t head; // head of ring buffer (write pointer)
  1020. float *targetBuffer, *sourceBuffer;
  1021. // static BOOL firstTime = YES; // flag for some buffer initialization
  1022. float *buffer;
  1023. int sampleCount = 0;
  1024. int samplesToCopy = inNumberFrames;
  1025. int32_t length;
  1026. int32_t delayLength;
  1027. // int delaySlices; // number of slices to delay by
  1028. // int filterLength;
  1029. // int middle;
  1030. // float acc; // accumulator for moving average calculation
  1031. // float *resultBuffer; // output
  1032. int stride = 1;
  1033. // convolution stuff
  1034. float *filterBuffer = THIS.filterBuffer; // impusle response buffer
  1035. int filterLength = THIS.filterLength; // length of filterBuffer
  1036. float *signalBuffer = THIS.signalBuffer; // signal buffer
  1037. // int signalLength = THIS.signalLength; // signal length
  1038. float *resultBuffer = THIS.resultBuffer; // result buffer
  1039. int resultLength = THIS.resultLength; // result length
  1040. int filterStride = -1; // -1 = convolution, 1 = correlation
  1041. float fc; // cutoff frequency
  1042. resultLength = 1024;
  1043. filterLength = 101;
  1044. // get mix fx control for cutoff freq (fc)
  1045. fc = (THIS.micFxControl * .18) + .001;
  1046. // make filter with this fc
  1047. lowPassWindowedSincFilter( filterBuffer, fc);
  1048. // Convert input signal from Int16ToFloat
  1049. vDSP_vflt16((SInt16 *) sampleBuffer, stride, (float *) analysisBuffer, stride, bufferCapacity );
  1050. // Put audio into circular delay buffer
  1051. // write incoming samples into the ring at the current head position
  1052. // head is incremented by inNumberFrames
  1053. // The logic is a bit different than usual circular buffer because we don't care
  1054. // whether the head catches up to the tail - because we're doing all the processing
  1055. // within this function. So tail position gets reset manually each time.
  1056. samplesToCopy = inNumberFrames;
  1057. sourceBuffer = analysisBuffer;
  1058. length = TPCircularBufferLength(&circularFilterBufferRecord);
  1059. // printf("length: %d\n", length );
  1060. // [delayBufferRecordLock lock]; // skip locks
  1061. while(samplesToCopy > 0) {
  1062. sampleCount = MIN(samplesToCopy, length - TPCircularBufferHead(&circularFilterBufferRecord));
  1063. if(sampleCount == 0) {
  1064. break;
  1065. }
  1066. buffer = circularFilterBuffer + TPCircularBufferHead(&circularFilterBufferRecord);
  1067. memcpy( buffer, sourceBuffer, sampleCount*sizeof(float)); // actual copy
  1068. sourceBuffer += sampleCount;
  1069. samplesToCopy -= sampleCount;
  1070. TPCircularBufferProduceAnywhere(&circularFilterBufferRecord, sampleCount); // this increments head
  1071. }
  1072. // head = TPCircularBufferHead(&delayBufferRecord);
  1073. // printf("new head is %d\n", head );
  1074. // [THIS.delayBufferRecordLock unlock]; // skip lock because processing is local
  1075. // Now we need to calculate where to put the tail - note this will probably blow
  1076. // up if you don't make the circular buffer big enough for the delay
  1077. // delaySlices = (int) (THIS.micFxControl * 80);
  1078. delayLength = (inNumberFrames + filterLength) - 1;
  1079. // printf("delayLength: %d\n", delayLength);
  1080. tail = TPCircularBufferHead(&circularFilterBufferRecord) - delayLength;
  1081. if(tail < 0) {
  1082. tail = length + tail;
  1083. }
  1084. // printf("new tail is %d", tail );
  1085. TPCircularBufferSetTailAnywhere(&circularFilterBufferRecord, tail);
  1086. // targetBuffer = tempCircularFilterBuffer; // tail data will get copied into temporary buffer
  1087. targetBuffer = signalBuffer; // tail data will get copied into temporary buffer
  1088. samplesToCopy = delayLength;
  1089. // Pull audio from playthrough buffer, in contiguous chunks
  1090. // [delayBufferRecordLock lock]; // skip locks
  1091. // this is the tricky part of the ring buffer where we need to break the circular
  1092. // illusion and do linear housekeeping. If we're within 1024 of the physical
  1093. // end of buffer, then copy out the samples in 2 steps.
  1094. while ( samplesToCopy > 0 ) {
  1095. sampleCount = MIN(samplesToCopy, length - TPCircularBufferTail(&circularFilterBufferRecord));
  1096. if ( sampleCount == 0 ) {
  1097. break;
  1098. }
  1099. // set pointer based on location of the tail
  1100. buffer = circularFilterBuffer + TPCircularBufferTail(&circularFilterBufferRecord);
  1101. // printf("\ncopying %d to temp, head: %d, tail %d", sampleCount, head, tail );
  1102. memcpy(targetBuffer, buffer, sampleCount*sizeof(float)); // actual copy
  1103. targetBuffer += sampleCount; // move up target pointer
  1104. samplesToCopy -= sampleCount; // keep track of what's already written
  1105. TPCircularBufferConsumeAnywhere(&circularFilterBufferRecord, sampleCount); // this increments tail
  1106. }
  1107. // [THIS.delayBufferRecordLock unlock]; // skip locks
  1108. // ok now we have enough samples in the temp delay buffer to actually run the
  1109. // filter. For example, if slice size is 1024 and filterLength is 101 - then we
  1110. // should have 1124 samples in the tempDelayBuffer
  1111. // do convolution
  1112. filterStride = -1; // convolution
  1113. vDSP_conv( signalBuffer, stride, filterBuffer + filterLength - 1, filterStride, resultBuffer, stride, resultLength, filterLength );
  1114. // now convert from float to Sint16
  1115. vDSP_vfixr16((float *) resultBuffer, stride, (SInt16 *) sampleBuffer, stride, bufferCapacity );
  1116. return noErr;
  1117. }
  1118. // convert sample vector from fixed point 8.24 to SInt16
  1119. void fixedPointToSInt16( SInt32 * source, SInt16 * target, int length ) {
  1120. int i;
  1121. for(i = 0;i < length; i++ ) {
  1122. target[i] = (SInt16) (source[i] >> 9);
  1123. //printf("%d=%d\n",source[i],target[i]);
  1124. }
  1125. }
  1126. // convert sample vector from SInt16 to fixed point 8.24
  1127. void SInt16ToFixedPoint( SInt16 * source, SInt32 * target, int length ) {
  1128. int i;
  1129. for(i = 0;i < length; i++ ) {
  1130. target[i] = (SInt32) (source[i] << 9);
  1131. if(source[i] < 0) {
  1132. target[i] |= 0xFF000000;
  1133. }
  1134. else {
  1135. target[i] &= 0x00FFFFFF;
  1136. }
  1137. //printf("%d=%d\n",source[i],target[i]);
  1138. }
  1139. }
  1140. void SInt16To32JX( SInt16 * source, SInt32 * target, int length ) {
  1141. int i;
  1142. for(i = 0;i < length; i++ ) {
  1143. target[i] = (SInt32) (source[i] << 8);
  1144. if(source[0] < 0) {
  1145. target[0] |= 0xFF000000;
  1146. }
  1147. else {
  1148. target[0] &= 0x00FFFFFF;
  1149. }
  1150. }
  1151. }
  1152. void SInt32To16( SInt32 * source, SInt16 * target) {
  1153. target[0] = (SInt16) (source[0] >> 9);
  1154. }
  1155. // convert sample vector from SInt16 to fixed point 8.24
  1156. void SInt16To32( SInt16 * source, SInt32 * target) {
  1157. target[0] = (SInt32) (source[0] << 9);
  1158. if(source[0] < 0) {
  1159. target[0] |= 0xFF000000;
  1160. }
  1161. else {
  1162. target[0] &= 0x00FFFFFF;
  1163. }
  1164. }
  1165. float getMeanVolumeSint16( SInt16 * vector , int length ) {
  1166. // get average input volume level for meter display
  1167. // by calculating log of mean volume of the buffer
  1168. // and displaying it to the screen
  1169. // (note: there's a vdsp function to do this but it works on float samples
  1170. int sum;
  1171. int i;
  1172. int averageVolume;
  1173. float logVolume;
  1174. sum = 0;
  1175. for ( i = 0; i < length ; i++ ) {
  1176. sum += abs((int) vector[i]);
  1177. }
  1178. averageVolume = sum / length;
  1179. // printf("\naverageVolume before scale = %lu", averageVolume );
  1180. // now convert to logarithm and scale log10(0->32768) into 0->1 for display
  1181. logVolume = log10f( (float) averageVolume );
  1182. logVolume = logVolume / log10(32768);
  1183. return (logVolume);
  1184. }
  1185. // calculate magnitude
  1186. #pragma mark -
  1187. #pragma mark fft
  1188. // for some calculation in the fft callback
  1189. // check to see if there is a vDsp library version
  1190. float MagnitudeSquared(float x, float y) {
  1191. return ((x*x) + (y*y));
  1192. }
  1193. // end of audio functions supporting callbacks
  1194. // mixerHostAudio class
  1195. #pragma mark -
  1196. @implementation MixerHostAudio
  1197. // properties (see header file for definitions and comments)
  1198. @synthesize audioFile;
  1199. @synthesize stereoStreamFormat; // stereo format for use in buffer and mixer input for "guitar" sound
  1200. @synthesize monoStreamFormat; // mono format for use in buffer and mixer input for "beats" sound
  1201. @synthesize SInt16StreamFormat;
  1202. @synthesize floatStreamFormat;
  1203. @synthesize auEffectStreamFormat;
  1204. @synthesize graphSampleRate; // sample rate to use throughout audio processing chain
  1205. @synthesize mixerUnit; // the Multichannel Mixer unit
  1206. @synthesize ioUnit; // the io unit
  1207. @synthesize mixerNode;
  1208. @synthesize iONode;
  1209. @synthesize playing; // Boolean flag to indicate whether audio is playing or not
  1210. @synthesize interruptedDuringPlayback; // Boolean flag to indicate whether audio was playing when an interruption arrived
  1211. @synthesize fftSetup; // this is required by fft methods in the callback
  1212. @synthesize fftA;
  1213. @synthesize fftLog2n;
  1214. @synthesize fftN;
  1215. @synthesize fftNOver2; // params for fft setup
  1216. @synthesize dataBuffer; // input buffer from mic
  1217. @synthesize outputBuffer; // for fft conversion
  1218. @synthesize analysisBuffer; // for fft frequency analysis
  1219. @synthesize conversion16BufferLeft;
  1220. @synthesize conversion16BufferRight;
  1221. @synthesize conversion32BufferLeft;
  1222. @synthesize conversion32BufferRight;
  1223. @synthesize filterBuffer;
  1224. @synthesize filterLength;
  1225. @synthesize signalBuffer;
  1226. @synthesize signalLength;
  1227. @synthesize resultBuffer;
  1228. @synthesize resultLength;
  1229. @synthesize fftBufferCapacity; // In samples
  1230. @synthesize fftIndex; // In samples - this is a horrible variable name
  1231. @synthesize displayInputFrequency;
  1232. @synthesize displayInputLevelLeft;
  1233. @synthesize displayInputLevelRight;
  1234. @synthesize displayNumberOfInputChannels;
  1235. @synthesize sinFreq;
  1236. @synthesize sinPhase;
  1237. @synthesize synthNoteOn;
  1238. @synthesize micFxType;
  1239. @synthesize micFxOn;
  1240. @synthesize micFxControl;
  1241. @synthesize inputDeviceIsAvailable;
  1242. @synthesize isPlayer; //开启播放器
  1243. @synthesize isRecorder; //开启录音器
  1244. @synthesize isGenerator;//开启产生器
  1245. @synthesize isOutputer; //开启录音保存
  1246. @synthesize isEffecter; //开启效果器
  1247. @synthesize isMixSave; //开启混音保存
  1248. @synthesize isPlayMic;
  1249. @synthesize isPaused;
  1250. @synthesize isHeadset;
  1251. @synthesize isHeadsetTrue;
  1252. @synthesize isIPad1;
  1253. @synthesize isIOS5,isIOS6;
  1254. @synthesize isErroring;
  1255. @synthesize isReadFileToMemory;
  1256. @synthesize importAudioFile; //播放文件
  1257. @synthesize outputAudioFile; //输出文件
  1258. @synthesize outputChanelIndex;//输出声道
  1259. @synthesize volumeRecorder; //输入音量
  1260. @synthesize volumePlayer; //输出音量
  1261. @synthesize currentTime;
  1262. @synthesize timeLenRecord;
  1263. @synthesize recordSamples;
  1264. @synthesize delegate;
  1265. @synthesize isOutputMp3;
  1266. // end of properties
  1267. #pragma mark -
  1268. #pragma mark Initialize
  1269. // Get the app ready for playback.
  1270. - (id) init {
  1271. self = [super init];
  1272. if (!self) return nil;
  1273. isPlayer = NO;
  1274. isRecorder = NO;
  1275. isGenerator = NO;
  1276. isOutputer = NO;
  1277. isEffecter = NO;
  1278. isMixSave = NO;
  1279. isPaused = YES;
  1280. isPlayMic = NO;
  1281. isErroring = NO;
  1282. outputChanelIndex = 0;
  1283. volumeRecorder = 1;
  1284. volumePlayer = 1;
  1285. importAudioFile = nil;
  1286. outputAudioFile = nil;
  1287. interruptedDuringPlayback = NO;
  1288. micFxOn = NO;
  1289. micFxControl = .5;
  1290. micFxType = 0;
  1291. conversion16BufferLeft = NULL;
  1292. conversion16BufferRight = NULL;
  1293. conversion32BufferLeft = NULL;
  1294. conversion32BufferRight = NULL;
  1295. dataBuffer = NULL;
  1296. outputBuffer = NULL;
  1297. analysisBuffer = NULL;
  1298. delayBufferRecordLock = nil;
  1299. delayBuffer = NULL;
  1300. tempDelayBuffer = NULL;
  1301. circularFilterBuffer = NULL;
  1302. circularFilterBufferRecordLock = NULL;
  1303. tempCircularFilterBuffer = NULL;
  1304. filterBuffer = NULL;
  1305. signalBuffer = NULL;
  1306. resultBuffer = NULL;
  1307. g_Reverb = createReverb();
  1308. setReverbParem(g_Reverb,0.2,0.2,0.45,0.99,0.99);// dry*2 + wet*3 = 100
  1309. // isIPad1 = [[[UIDevice currentDevice] platformString] isEqualToString:@"iPad 1G"];
  1310. isIPad1 = 0;
  1311. return self;
  1312. }
  1313. #pragma mark -
  1314. #pragma mark Deallocate
  1315. - (void) dealloc {
  1316. // NSLog(@"audioRecorder.dealloc");
  1317. //AudioSessionRemovePropertyListener(kAudioSessionProperty_AudioRouteChange);
  1318. //AUGraphUninitialize(processingGraph);
  1319. //DisposeAUGraph(processingGraph);
  1320. for (int i = 0; i < NUM_FILES; i++) {
  1321. if (sourceURLArray[i] != NULL){
  1322. CFRelease (sourceURLArray[i]);
  1323. sourceURLArray[i] = NULL;
  1324. }
  1325. if (soundStructArray[i].audioDataLeft != NULL) {
  1326. free (soundStructArray[i].audioDataLeft);
  1327. soundStructArray[i].audioDataLeft = 0;
  1328. }
  1329. if (soundStructArray[i].audioDataRight != NULL) {
  1330. free (soundStructArray[i].audioDataRight);
  1331. soundStructArray[i].audioDataRight = 0;
  1332. }
  1333. }
  1334. if(conversion16BufferLeft != NULL)
  1335. free(conversion16BufferLeft);
  1336. if(conversion16BufferRight != NULL)
  1337. free(conversion16BufferRight);
  1338. if(conversion32BufferLeft != NULL)
  1339. free(conversion32BufferLeft);
  1340. if(conversion32BufferRight != NULL)
  1341. free(conversion32BufferRight);
  1342. if(dataBuffer != NULL)
  1343. free(dataBuffer);
  1344. if(outputBuffer != NULL)
  1345. free(outputBuffer);
  1346. if(analysisBuffer != NULL)
  1347. free(analysisBuffer);
  1348. if(delayBufferRecordLock){
  1349. // [delayBufferRecordLock release];
  1350. delayBufferRecordLock = nil;
  1351. }
  1352. if(delayBuffer != NULL)
  1353. free(delayBuffer);
  1354. if(tempDelayBuffer != NULL)
  1355. free(tempDelayBuffer);
  1356. if(circularFilterBuffer != NULL)
  1357. free(circularFilterBuffer);
  1358. if(circularFilterBufferRecordLock != NULL)
  1359. free((__bridge void *)(circularFilterBufferRecordLock));
  1360. if(tempCircularFilterBuffer != NULL)
  1361. free(tempCircularFilterBuffer);
  1362. if(filterBuffer != NULL)
  1363. free(filterBuffer);
  1364. if(signalBuffer != NULL)
  1365. free(signalBuffer);
  1366. if(resultBuffer != NULL)
  1367. free(resultBuffer);
  1368. vDSP_destroy_fftsetup(fftSetup);
  1369. free(fftA.realp);
  1370. free(fftA.imagp);
  1371. self.importAudioFile = nil;
  1372. self.outputAudioFile = nil;
  1373. deleteReverb(g_Reverb);
  1374. // [super dealloc];
  1375. }
  1376. -(void) initBuffer{
  1377. [self convolutionSetup];
  1378. [self FFTSetup];
  1379. [self initDelayBuffer];
  1380. //[self obtainSoundFileURLs];
  1381. [self setupStereoStreamFormat];
  1382. [self setupMonoStreamFormat];
  1383. [self setupSInt16StreamFormat];
  1384. [self setupStereoFileFormat];
  1385. [self setupMonoFileFormat];
  1386. //if(isPlayer)
  1387. // [self readAudioFilesIntoMemory];
  1388. }
  1389. -(void) setup{
  1390. [self setupAudioSession];
  1391. [self initBuffer];
  1392. [self configureAndInitializeAudioProcessingGraph];
  1393. [self setMixerOutputGain:1];
  1394. }
  1395. #pragma mark -
  1396. #pragma mark Audio set up
  1397. //
  1398. // AVAudioSession setup
  1399. // This is all the external housekeeping needed in any ios coreaudio app
  1400. //
  1401. - (void) setupAudioSession {
  1402. // some debugging to find out about ourselves
  1403. OSStatus error;
  1404. #if !CA_PREFER_FIXED_POINT
  1405. // NSLog(@"not fixed point");
  1406. #else
  1407. // NSLog(@"fixed point");
  1408. #endif
  1409. #if TARGET_IPHONE_SIMULATOR
  1410. // #warning *** Simulator mode: beware ***
  1411. // NSLog(@"simulator is running");
  1412. #else
  1413. // NSLog(@"device is running");
  1414. #endif
  1415. if(UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiomPhone) {
  1416. // NSLog(@"running iphone or ipod touch...\n");
  1417. }
  1418. AVAudioSession *mySession = [AVAudioSession sharedInstance];
  1419. [mySession setActive: NO error: nil];
  1420. // Specify that this object is the delegate of the audio session, so that
  1421. // this object's endInterruption method will be invoked when needed.
  1422. //[mySession setDelegate: self];
  1423. // tz change to play and record
  1424. // Assign the Playback category to the audio session.
  1425. NSError *audioSessionError = nil;
  1426. [mySession setCategory: AVAudioSessionCategoryPlayAndRecord
  1427. error: &audioSessionError];
  1428. if (audioSessionError != nil) {
  1429. // NSLog (@"Error setting audio session category.");
  1430. }
  1431. inputDeviceIsAvailable = [mySession inputIsAvailable];
  1432. if(inputDeviceIsAvailable) {
  1433. // NSLog(@"input device is available");
  1434. }
  1435. else {
  1436. // NSLog(@"input device not available...");
  1437. [mySession setCategory: AVAudioSessionCategoryPlayback
  1438. error: &audioSessionError];
  1439. }
  1440. // Request the desired hardware sample rate.
  1441. if(self.graphSampleRate<=0)
  1442. self.graphSampleRate = 44100.0; // Hertz
  1443. [mySession setPreferredHardwareSampleRate: graphSampleRate
  1444. error: &audioSessionError];
  1445. if (audioSessionError != nil) {
  1446. // NSLog (@"Error setting preferred hardware sample rate.");
  1447. }
  1448. // refer to IOS developer library : Audio Session Programming Guide
  1449. // set preferred buffer duration to 1024 using
  1450. // try ((buffer size + 1) / sample rate) - due to little arm6 floating point bug?
  1451. // doesn't seem to help - the duration seems to get set to whatever the system wants...
  1452. Float32 currentBufferDuration;
  1453. // if(isIOS5)
  1454. currentBufferDuration = (Float32) (1024 / self.graphSampleRate);
  1455. // else
  1456. // currentBufferDuration = (Float32) (102400 / self.graphSampleRate);
  1457. UInt32 sss = sizeof(currentBufferDuration);
  1458. AudioSessionSetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, sizeof(currentBufferDuration), &currentBufferDuration);
  1459. // NSLog(@"setting buffer duration to: %f", currentBufferDuration);
  1460. UInt32 doChangeDefaultRoute = 1;
  1461. error = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
  1462. if (error) printf("couldn't set audio speaker!");
  1463. doChangeDefaultRoute = 1;
  1464. error = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryEnableBluetoothInput,sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
  1465. if (error) printf("couldn't set blue input!");
  1466. // find out how many input channels are available
  1467. //UInt32 allowMixing = true;
  1468. //error = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers, sizeof(allowMixing), &allowMixing);
  1469. //UInt32 value = kAudioSessionMode_Measurement;
  1470. //error = AudioSessionSetProperty(kAudioSessionProperty_Mode,sizeof(value), &value);
  1471. //if (error) printf("couldn't set audio mode!");
  1472. // note: this is where ipod touch (w/o mic) erred out when mic (ie earbud thing) was not plugged - before we added
  1473. // the code above to check for mic available
  1474. // Activate the audio session
  1475. [mySession setActive: YES error: &audioSessionError];
  1476. if (audioSessionError != nil) {
  1477. // NSLog (@"Error activating audio session during initial setup.");
  1478. }
  1479. // Obtain the actual hardware sample rate and store it for later use in the audio processing graph.
  1480. self.graphSampleRate = [mySession currentHardwareSampleRate];
  1481. // NSLog(@"Actual sample rate is: %f", self.graphSampleRate );
  1482. // find out the current buffer duration
  1483. // to calculate duration use: buffersize / sample rate, eg., 512 / 44100 = .012
  1484. // Obtain the actual buffer duration - this may be necessary to get fft stuff working properly in passthru
  1485. AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareIOBufferDuration, &sss, &currentBufferDuration);
  1486. // NSLog(@"Actual current hardware io buffer duration: %f ", currentBufferDuration );
  1487. // Register the audio route change listener callback function with the audio session.
  1488. //AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange,audioRouteChangeListenerCallback,self);
  1489. //强制修改系统声音输出设备:
  1490. //UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
  1491. //error = AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof(audioRouteOverride), &audioRouteOverride);
  1492. NSInteger numberOfChannels = [mySession currentHardwareInputNumberOfChannels];
  1493. // NSLog(@"number of channels: %d", numberOfChannels );
  1494. displayNumberOfInputChannels = numberOfChannels; // set instance variable for display
  1495. mySession = nil;
  1496. audioSessionError = nil;
  1497. return ; // everything ok
  1498. }
  1499. // this converts the samples in the input buffer into floats
  1500. //
  1501. // there is an accelerate framework vdsp function
  1502. // that does this conversion, so we're not using this function now
  1503. // but its good to know how to do it this way, although I would split it up into a setup and execute module
  1504. // I left this code to show how its done with an audio converter
  1505. //
  1506. void ConvertInt16ToFloat(MixerHostAudio *THIS, void *buf, float *outputBuf, size_t capacity) {
  1507. AudioConverterRef converter;
  1508. OSStatus err;
  1509. size_t bytesPerSample = sizeof(float);
  1510. AudioStreamBasicDescription outFormat = {0};
  1511. outFormat.mFormatID = kAudioFormatLinearPCM;
  1512. outFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
  1513. outFormat.mBitsPerChannel = 8 * bytesPerSample;
  1514. outFormat.mFramesPerPacket = 1;
  1515. outFormat.mChannelsPerFrame = 1;
  1516. outFormat.mBytesPerPacket = bytesPerSample * outFormat.mFramesPerPacket;
  1517. outFormat.mBytesPerFrame = bytesPerSample * outFormat.mChannelsPerFrame;
  1518. outFormat.mSampleRate = THIS->graphSampleRate;
  1519. const AudioStreamBasicDescription inFormat = THIS->SInt16StreamFormat;
  1520. UInt32 inSize = capacity*sizeof(SInt16);
  1521. UInt32 outSize = capacity*sizeof(float);
  1522. // this is the famed audio converter
  1523. err = AudioConverterNew(&inFormat, &outFormat, &converter);
  1524. if(noErr != err) {
  1525. // NSLog(@"error in audioConverterNew: %ld", err);
  1526. }
  1527. err = AudioConverterConvertBuffer(converter, inSize, buf, &outSize, outputBuf);
  1528. if(noErr != err) {
  1529. // NSLog(@"error in audioConverterConvertBuffer: %ld", err);
  1530. }
  1531. }
  1532. - (void) setupStereoFileFormat {
  1533. // The AudioSampleType data type is the recommended type for sample data in audio
  1534. // units. This obtains the byte size of the type for use in filling in the ASBD.
  1535. size_t bytesPerSample = sizeof (AudioSampleType);
  1536. // NSLog (@"size of AudioSampleType: %lu", bytesPerSample);
  1537. // Fill the application audio format struct's fields to define a linear PCM,
  1538. // stereo, noninterleaved stream at the hardware sample rate.
  1539. stereoFileFormat.mFormatID = kAudioFormatLinearPCM;
  1540. stereoFileFormat.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
  1541. stereoFileFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
  1542. stereoFileFormat.mChannelsPerFrame = 2; // 2 indicates stereo
  1543. stereoFileFormat.mBytesPerPacket = bytesPerSample * stereoFileFormat.mChannelsPerFrame;
  1544. stereoFileFormat.mFramesPerPacket = 1;
  1545. stereoFileFormat.mBytesPerFrame = bytesPerSample * stereoFileFormat.mChannelsPerFrame;
  1546. stereoFileFormat.mBitsPerChannel = 8 * bytesPerSample;
  1547. stereoFileFormat.mSampleRate = graphSampleRate;
  1548. // NSLog (@"The stereo file format:");
  1549. [self printASBD: stereoFileFormat];
  1550. }
  1551. - (void) setupMonoFileFormat {
  1552. // The AudioSampleType data type is the recommended type for sample data in audio
  1553. // units. This obtains the byte size of the type for use in filling in the ASBD.
  1554. size_t bytesPerSample = sizeof (AudioSampleType);
  1555. // Fill the application audio format struct's fields to define a linear PCM,
  1556. // stereo, noninterleaved stream at the hardware sample rate.
  1557. monoFileFormat.mFormatID = kAudioFormatLinearPCM;
  1558. monoFileFormat.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
  1559. monoFileFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
  1560. monoFileFormat.mBytesPerPacket = bytesPerSample;
  1561. monoFileFormat.mFramesPerPacket = 1;
  1562. monoFileFormat.mBytesPerFrame = bytesPerSample;
  1563. monoFileFormat.mChannelsPerFrame = 1; // 1 indicates mono
  1564. monoFileFormat.mBitsPerChannel = 8 * bytesPerSample;
  1565. monoFileFormat.mSampleRate = graphSampleRate;
  1566. // NSLog (@"The mono file format:");
  1567. [self printASBD: monoFileFormat];
  1568. }
  1569. // setup asbd stream formats
  1570. - (void) setupStereoStreamFormat {
  1571. // The AudioSampleType data type is the recommended type for sample data in audio
  1572. // units. This obtains the byte size of the type for use in filling in the ASBD.
  1573. size_t bytesPerSample = sizeof (AudioUnitSampleType);
  1574. // NSLog (@"size of AudioSampleType: %lu", bytesPerSample);
  1575. // Fill the application audio format struct's fields to define a linear PCM,
  1576. // stereo, noninterleaved stream at the hardware sample rate.
  1577. stereoStreamFormat.mFormatID = kAudioFormatLinearPCM;
  1578. stereoStreamFormat.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
  1579. stereoStreamFormat.mBytesPerPacket = bytesPerSample;
  1580. stereoStreamFormat.mFramesPerPacket = 1;
  1581. stereoStreamFormat.mBytesPerFrame = bytesPerSample;
  1582. stereoStreamFormat.mChannelsPerFrame = 2; // 2 indicates stereo
  1583. stereoStreamFormat.mBitsPerChannel = 8 * bytesPerSample;
  1584. stereoStreamFormat.mSampleRate = graphSampleRate;
  1585. // NSLog (@"The stereo stream format:");
  1586. [self printASBD: stereoStreamFormat];
  1587. }
  1588. - (void) setupMonoStreamFormat {
  1589. // The AudioSampleType data type is the recommended type for sample data in audio
  1590. // units. This obtains the byte size of the type for use in filling in the ASBD.
  1591. size_t bytesPerSample = sizeof (AudioUnitSampleType);
  1592. // Fill the application audio format struct's fields to define a linear PCM,
  1593. // stereo, noninterleaved stream at the hardware sample rate.
  1594. monoStreamFormat.mFormatID = kAudioFormatLinearPCM;
  1595. //monoStreamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
  1596. monoStreamFormat.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
  1597. monoStreamFormat.mBytesPerPacket = bytesPerSample;
  1598. monoStreamFormat.mFramesPerPacket = 1;
  1599. monoStreamFormat.mBytesPerFrame = bytesPerSample;
  1600. monoStreamFormat.mChannelsPerFrame = 1; // 1 indicates mono
  1601. monoStreamFormat.mBitsPerChannel = 8 * bytesPerSample;
  1602. monoStreamFormat.mSampleRate = graphSampleRate;
  1603. // NSLog (@"The mono stream format:");
  1604. [self printASBD: monoStreamFormat];
  1605. }
  1606. // this will be the stream format for anything that gets seriously processed by a render callback function
  1607. // it users 16bit signed int for sample data, assuming that this callback is probably on the input bus of a mixer
  1608. // or the input scope of the rio Output bus, in either case, we're assumeing that the AU will do the necessary format
  1609. // conversion to satisfy the output hardware - tz
  1610. //
  1611. // important distinction here with asbd's:
  1612. //
  1613. // note the difference between AudioSampleType and AudioSampleType
  1614. //
  1615. // the former is an 8.24 (32 bit) fixed point sample format
  1616. // the latter is signed 16 bit (SInt16) integer sample format
  1617. //
  1618. // a subtle name differnce for a huge programming differece
  1619. - (void) setupSInt16StreamFormat {
  1620. // Stream format for Signed 16 bit integers
  1621. //
  1622. // note: as of ios5 this works for signal channel mic/line input (not stereo)
  1623. // and for mono audio generators (like synths) which pull no device data
  1624. // This obtains the byte size of the type for use in filling in the ASBD.
  1625. size_t bytesPerSample = sizeof (AudioSampleType); // Sint16
  1626. // NSLog (@"size of AudioSampleType: %lu", bytesPerSample);
  1627. // Fill the application audio format struct's fields to define a linear PCM,
  1628. // stereo, noninterleaved stream at the hardware sample rate.
  1629. SInt16StreamFormat.mFormatID = kAudioFormatLinearPCM;
  1630. SInt16StreamFormat.mFormatFlags = kAudioFormatFlagsCanonical;
  1631. SInt16StreamFormat.mBytesPerPacket = bytesPerSample;
  1632. SInt16StreamFormat.mFramesPerPacket = 1;
  1633. SInt16StreamFormat.mBytesPerFrame = bytesPerSample;
  1634. SInt16StreamFormat.mChannelsPerFrame = 1; // 1 indicates mono
  1635. SInt16StreamFormat.mBitsPerChannel = 8 * bytesPerSample;
  1636. SInt16StreamFormat.mSampleRate = graphSampleRate;
  1637. // NSLog (@"The SInt16 (mono) stream format:");
  1638. [self printASBD: SInt16StreamFormat];
  1639. }
  1640. // this is a test of using a float stream for the output scope of rio input bus
  1641. // and the input bus of a mixer channel
  1642. // the reason for this is that it would allow float algorithms to run without extra conversion
  1643. // that is, if it actually works
  1644. //
  1645. // so - apparently this doesn't work - at least in the context just described - there was no error in setting it
  1646. //
  1647. - (void) setupFloatStreamFormat {
  1648. // This obtains the byte size of the type for use in filling in the ASBD.
  1649. size_t bytesPerSample = sizeof(float);
  1650. // Fill the application audio format struct's fields to define a linear PCM,
  1651. // stereo, noninterleaved stream at the hardware sample rate.
  1652. floatStreamFormat.mFormatID = kAudioFormatLinearPCM;
  1653. floatStreamFormat.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked;
  1654. floatStreamFormat.mBytesPerPacket = bytesPerSample;
  1655. floatStreamFormat.mFramesPerPacket = 1;
  1656. floatStreamFormat.mBytesPerFrame = bytesPerSample;
  1657. floatStreamFormat.mChannelsPerFrame = 1; // 1 indicates mono
  1658. floatStreamFormat.mBitsPerChannel = 8 * bytesPerSample;
  1659. floatStreamFormat.mSampleRate = graphSampleRate;
  1660. // NSLog (@"The float stream format:");
  1661. [self printASBD: floatStreamFormat];
  1662. }
  1663. // initialize the circular delay buffer
  1664. //
  1665. - (void) initDelayBuffer {
  1666. // Allocate buffer
  1667. delayBuffer = (SInt16*)malloc(sizeof(SInt16) * kDelayBufferLength);
  1668. memset(delayBuffer,0, kDelayBufferLength ); // set to zero
  1669. // Initialise record
  1670. TPCircularBufferInit(&delayBufferRecord, kDelayBufferLength);
  1671. delayBufferRecordLock = [[NSLock alloc] init];
  1672. // this should be set with a constant equal to frame buffer size
  1673. // so we're using this for other big stuff, so...
  1674. tempDelayBuffer = (SInt16*)malloc(sizeof(SInt16) * 4096);
  1675. // now do the same thing for the float filter buffer
  1676. // Allocate buffer
  1677. circularFilterBuffer = (float *)malloc(sizeof(float) * kDelayBufferLength);
  1678. memset(circularFilterBuffer,0, kDelayBufferLength ); // set to zero
  1679. // Initialise record
  1680. TPCircularBufferInit(&circularFilterBufferRecord, kDelayBufferLength);
  1681. circularFilterBufferRecordLock = [[NSLock alloc] init];
  1682. // this should be set with a constant equal to frame buffer size
  1683. // so we're using this for other big stuff, so...
  1684. tempCircularFilterBuffer = (float *)malloc(sizeof(float) * 4096);
  1685. }
  1686. // Setup FFT - structures needed by vdsp functions
  1687. - (void) FFTSetup {
  1688. // I'm going to just convert everything to 1024
  1689. // on the simulator the callback gets 512 frames even if you set the buffer to 1024, so this is a temp workaround in our efforts
  1690. // to make the fft buffer = the callback buffer,
  1691. // for smb it doesn't matter if frame size is bigger than callback buffer
  1692. UInt32 maxFrames = 1024; // fft size
  1693. // setup input and output buffers to equal max frame size
  1694. dataBuffer = (void*)malloc(maxFrames * sizeof(SInt16));
  1695. outputBuffer = (float*)malloc(maxFrames *sizeof(float));
  1696. analysisBuffer = (float*)malloc(maxFrames *sizeof(float));
  1697. // set the init stuff for fft based on number of frames
  1698. fftLog2n = log2f(maxFrames); // log base2 of max number of frames, eg., 10 for 1024
  1699. fftN = 1 << fftLog2n; // actual max number of frames, eg., 1024 - what a silly way to compute it
  1700. fftNOver2 = maxFrames/2; // half fft size
  1701. fftBufferCapacity = maxFrames; // yet another way of expressing fft size
  1702. fftIndex = 0; // index for reading frame data in callback
  1703. // split complex number buffer
  1704. fftA.realp = (float *)malloc(fftNOver2 * sizeof(float)); //
  1705. fftA.imagp = (float *)malloc(fftNOver2 * sizeof(float)); //
  1706. // zero return indicates an error setting up internal buffers
  1707. fftSetup = vDSP_create_fftsetup(fftLog2n, FFT_RADIX2);
  1708. if( fftSetup == (FFTSetup) 0) {
  1709. // NSLog(@"Error - unable to allocate FFT setup buffers" );
  1710. }
  1711. }
  1712. // Setup stuff for convolution testing
  1713. - (void)convolutionSetup {
  1714. int i;
  1715. // just throwing this in here for testing
  1716. // these are the callback data conversion buffers
  1717. conversion16BufferLeft = (void *) malloc(2048 * sizeof(SInt16));
  1718. conversion16BufferRight = (void *) malloc(2048 * sizeof(SInt16));
  1719. conversion32BufferLeft = (void *) malloc(2048 * sizeof(SInt32));
  1720. conversion32BufferRight = (void *) malloc(2048 * sizeof(SInt32));
  1721. filterLength = 101;
  1722. // signal length is actually 1024 but we're padding it
  1723. // with convolution the result length is signal + filter - 1
  1724. signalLength = 2048;
  1725. resultLength = 2048;
  1726. filterBuffer = (void*)malloc(filterLength * sizeof(float));
  1727. signalBuffer = (void*)malloc(signalLength * sizeof(float));
  1728. resultBuffer = (void*)malloc(resultLength * sizeof(float));
  1729. // paddingBuffer = (void*)malloc(paddingLength * sizeof(float));
  1730. // build a filter
  1731. // 101 point windowed sinc lowpass filter from http://www.dspguide.com/
  1732. // table 16-1
  1733. // note - now the filter gets rebuilt on the fly according to UI value for cutoff frequency
  1734. //
  1735. // calculate lowpass filter kernel
  1736. int m = 100;
  1737. float fc = .14;
  1738. for( i = 0; i < 101 ; i++ ) {
  1739. if((i - m / 2) == 0 ) {
  1740. filterBuffer[i] = 2 * M_PI * fc;
  1741. }
  1742. else {
  1743. filterBuffer[i] = sin(2 * M_PI * fc * (i - m / 2)) / (i - m / 2);
  1744. }
  1745. filterBuffer[i] = filterBuffer[i] * (.54 - .46 * cos(2 * M_PI * i / m ));
  1746. }
  1747. // normalize for unity gain at dc
  1748. float sum = 0;
  1749. for ( i = 0 ; i < 101 ; i++ ) {
  1750. sum = sum + filterBuffer[i];
  1751. }
  1752. for ( i = 0 ; i < 101 ; i++ ) {
  1753. filterBuffer[i] = filterBuffer[i] / sum;
  1754. }
  1755. }
  1756. #pragma mark -
  1757. #pragma mark Read audio files into memory
  1758. - (void) readAudioFilesIntoMemory {
  1759. for (int i = 0; i < NUM_FILES; ++i) {
  1760. // NSLog (@"readAudioFilesIntoMemory - file %i", i);
  1761. // Instantiate an extended audio file object.
  1762. ExtAudioFileRef audioFileObject = 0;
  1763. // Open an audio file and associate it with the extended audio file object.
  1764. OSStatus result = ExtAudioFileOpenURL (sourceURLArray[i], &audioFileObject);
  1765. if (noErr != result || NULL == audioFileObject) {[self printErrorMessage: @"ExtAudioFileOpenURL" withStatus: result]; return;}
  1766. //设置解码器,ipad上不正常,不能用:
  1767. /*
  1768. UInt32 codec = kAppleHardwareAudioCodecManufacturer;
  1769. result = ExtAudioFileSetProperty(audioFileObject,
  1770. kExtAudioFileProperty_CodecManufacturer,
  1771. sizeof(codec),
  1772. &codec);
  1773. if(result)
  1774. printf("ExtAudioFileSetProperty %ld \n", result);*/
  1775. // Get the audio file's length in frames.
  1776. UInt64 totalFramesInFile = 0;
  1777. UInt32 frameLengthPropertySize = sizeof (totalFramesInFile);
  1778. result = ExtAudioFileGetProperty (
  1779. audioFileObject,
  1780. kExtAudioFileProperty_FileLengthFrames,
  1781. &frameLengthPropertySize,
  1782. &totalFramesInFile
  1783. );
  1784. if (noErr != result) {[self printErrorMessage: @"ExtAudioFileGetProperty (audio file length in frames)" withStatus: result]; return;}
  1785. // Assign the frame count to the soundStructArray instance variable
  1786. // Get the audio file's number of channels.
  1787. AudioStreamBasicDescription fileAudioFormat = {0};
  1788. UInt32 formatPropertySize = sizeof (fileAudioFormat);
  1789. result = ExtAudioFileGetProperty (
  1790. audioFileObject,
  1791. kExtAudioFileProperty_FileDataFormat,
  1792. &formatPropertySize,
  1793. &fileAudioFormat
  1794. );
  1795. if (noErr != result) {[self printErrorMessage: @"ExtAudioFileGetProperty (file audio format)" withStatus: result]; return;}
  1796. UInt32 channelCount = fileAudioFormat.mChannelsPerFrame;
  1797. UInt32 bufferLen;
  1798. if(isReadFileToMemory)
  1799. bufferLen = totalFramesInFile*sizeof(AudioUnitSampleType);
  1800. else
  1801. bufferLen = 4096*sizeof(AudioUnitSampleType);
  1802. if(isIOS6)
  1803. self.graphSampleRate= fileAudioFormat.mSampleRate;
  1804. else
  1805. self.graphSampleRate = 44100.0; // Hertz
  1806. // Allocate memory in the soundStructArray instance variable to hold the left channel,
  1807. // or mono, audio data
  1808. soundStructArray[i].audioDataLeft = malloc(bufferLen);
  1809. memset(soundStructArray[i].audioDataLeft,0,bufferLen);
  1810. AudioStreamBasicDescription streamFormat = {0};
  1811. if (2 == channelCount) {
  1812. soundStructArray[i].isStereo = YES;
  1813. // Sound is stereo, so allocate memory in the soundStructArray instance variable to
  1814. // hold the right channel audio data
  1815. soundStructArray[i].audioDataRight = malloc(bufferLen);
  1816. memset(soundStructArray[i].audioDataRight,0,bufferLen);
  1817. [self setupStereoStreamFormat];
  1818. streamFormat = stereoStreamFormat;
  1819. } else if (1 == channelCount) {
  1820. soundStructArray[i].isStereo = NO;
  1821. [self setupMonoStreamFormat];
  1822. streamFormat = monoStreamFormat;
  1823. } else {
  1824. // NSLog (@"*** WARNING: File format not supported - wrong number of channels");
  1825. ExtAudioFileDispose (audioFileObject);
  1826. return;
  1827. }
  1828. // Assign the appropriate mixer input bus stream data format to the extended audio
  1829. // file object. This is the format used for the audio data placed into the audio
  1830. // buffer in the SoundStruct data structure, which is in turn used in the
  1831. // inputRenderCallback callback function.
  1832. result = ExtAudioFileSetProperty (
  1833. audioFileObject,
  1834. kExtAudioFileProperty_ClientDataFormat,
  1835. sizeof (streamFormat),
  1836. &streamFormat
  1837. );
  1838. if (noErr != result) {[self printErrorMessage: @"ExtAudioFileSetProperty (client data format)" withStatus: result]; return;}
  1839. soundStructArray[i].sampleNumber = 0;
  1840. soundStructArray[i].frameCount = totalFramesInFile;
  1841. soundStructArray[i].audioFile = audioFileObject;
  1842. soundStructArray[i].sampleRate = fileAudioFormat.mSampleRate;
  1843. // soundStructArray[i].sampleRate = self.graphSampleRate;
  1844. if(!isReadFileToMemory)
  1845. return;
  1846. AudioBufferList *bufferList;
  1847. bufferList = (AudioBufferList *) malloc (
  1848. sizeof (AudioBufferList) + sizeof (AudioBuffer) * (channelCount - 1)
  1849. );
  1850. if (NULL == bufferList) {
  1851. // NSLog (@"*** malloc failure for allocating bufferList memory");
  1852. return;
  1853. }
  1854. // initialize the mNumberBuffers member
  1855. bufferList->mNumberBuffers = channelCount;
  1856. // initialize the mBuffers member to 0
  1857. AudioBuffer emptyBuffer = {0};
  1858. size_t arrayIndex;
  1859. for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
  1860. bufferList->mBuffers[arrayIndex] = emptyBuffer;
  1861. }
  1862. // set up the AudioBuffer structs in the buffer list
  1863. bufferList->mBuffers[0].mNumberChannels = 1;
  1864. bufferList->mBuffers[0].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
  1865. bufferList->mBuffers[0].mData = soundStructArray[i].audioDataLeft;
  1866. if (2 == channelCount) {
  1867. bufferList->mBuffers[1].mNumberChannels = 1;
  1868. bufferList->mBuffers[1].mDataByteSize = totalFramesInFile * sizeof (AudioUnitSampleType);
  1869. bufferList->mBuffers[1].mData = soundStructArray[i].audioDataRight;
  1870. }
  1871. // Perform a synchronous, sequential read of the audio data out of the file and
  1872. // into the soundStructArray[i].audioDataLeft and (if stereo) .audioDataRight members.
  1873. UInt32 numberOfPacketsToRead = (UInt32) totalFramesInFile;
  1874. result = ExtAudioFileRead (
  1875. audioFileObject,
  1876. &numberOfPacketsToRead,
  1877. bufferList
  1878. );
  1879. free (bufferList);
  1880. // NSLog(@"ExtAudioFileRead=%d",result);
  1881. soundStructArray[i].sampleNumber = 0;
  1882. soundStructArray[i].frameCount = totalFramesInFile;
  1883. soundStructArray[i].sampleRate = fileAudioFormat.mSampleRate;
  1884. // NSLog (@"Finished reading file %i into memory", i);
  1885. // Dispose of the extended audio file object, which also
  1886. // closes the associated file.
  1887. ExtAudioFileDispose (audioFileObject);
  1888. }
  1889. }
  1890. // create and setup audio processing graph by setting component descriptions and adding nodes
  1891. - (void) setupAudioProcessingGraph {
  1892. OSStatus result = noErr;
  1893. // Create a new audio processing graph.
  1894. result = NewAUGraph (&processingGraph);
  1895. if (noErr != result) {[self printErrorMessage: @"NewAUGraph" withStatus: result]; return;}
  1896. //............................................................................
  1897. // Specify the audio unit component descriptions for the audio units to be
  1898. // added to the graph.
  1899. // remote I/O unit connects both to mic/lineIn and to speaker
  1900. AudioComponentDescription iOUnitDescription;
  1901. iOUnitDescription.componentType = kAudioUnitType_Output;
  1902. iOUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
  1903. // iOUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
  1904. iOUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
  1905. iOUnitDescription.componentFlags = 0;
  1906. iOUnitDescription.componentFlagsMask = 0;
  1907. // Multichannel mixer unit
  1908. AudioComponentDescription MixerUnitDescription;
  1909. MixerUnitDescription.componentType = kAudioUnitType_Mixer;
  1910. MixerUnitDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
  1911. MixerUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
  1912. MixerUnitDescription.componentFlags = 0;
  1913. MixerUnitDescription.componentFlagsMask = 0;
  1914. // NSLog (@"Adding nodes to audio processing graph");
  1915. // io unit
  1916. result = AUGraphAddNode (processingGraph,&iOUnitDescription,&iONode);
  1917. if (noErr != result) {[self printErrorMessage: @"AUGraphNewNode failed for I/O unit" withStatus: result]; return;}
  1918. // mixer unit
  1919. result = AUGraphAddNode (processingGraph,&MixerUnitDescription,&mixerNode);
  1920. if (noErr != result) {[self printErrorMessage: @"AUGraphNewNode failed for Mixer unit" withStatus: result]; return;}
  1921. }
  1922. - (void) connectAudioProcessingGraph {
  1923. OSStatus result = noErr;
  1924. result = AUGraphConnectNodeInput (processingGraph,
  1925. mixerNode, // source node
  1926. 0, // source node output bus number
  1927. iONode, // destination node
  1928. 0 // desintation node input bus number
  1929. );
  1930. if (noErr != result) {[self printErrorMessage: @"AUGraphConnectNodeInput" withStatus: result]; return;}
  1931. }
  1932. #pragma mark -
  1933. #pragma mark Audio processing graph setup
  1934. - (void) configureAndInitializeAudioProcessingGraph {
  1935. // NSLog (@"Configuring and then initializing audio processing graph");
  1936. OSStatus result = noErr;
  1937. UInt16 busNumber; // mixer input bus number (starts with 0)
  1938. [self setupAudioProcessingGraph];
  1939. result = AUGraphOpen (processingGraph);
  1940. if (noErr != result) {[self printErrorMessage: @"AUGraphOpen" withStatus: result]; return;}
  1941. // Obtain the I/O unit instance from the corresponding node.
  1942. result = AUGraphNodeInfo (
  1943. processingGraph,
  1944. iONode,
  1945. NULL,
  1946. &ioUnit
  1947. );
  1948. if (result) {[self printErrorMessage: @"AUGraphNodeInfo - I/O unit" withStatus: result]; return;}
  1949. // I/O Unit Setup (input bus)
  1950. if(inputDeviceIsAvailable && isRecorder) { // if no input device, skip this step
  1951. const AudioUnitElement ioUnitInputBus = 1;
  1952. // Enable input for the I/O unit, which is disabled by default. (Output is
  1953. // enabled by default, so there's no need to explicitly enable it.)
  1954. UInt32 enableInput = 1;
  1955. result = AudioUnitSetProperty (
  1956. ioUnit,
  1957. kAudioOutputUnitProperty_EnableIO,
  1958. kAudioUnitScope_Input,
  1959. ioUnitInputBus,
  1960. &enableInput,
  1961. sizeof (enableInput)
  1962. );
  1963. // set the stream format for the callback that does processing
  1964. // of the mic/line input samples
  1965. // using 8.24 fixed point now because SInt doesn't work in stereo
  1966. // Apply the stream format to the output scope of the I/O unit's input bus.
  1967. // tz 11/28 stereo input!!
  1968. //
  1969. // we could set the asbd to stereo and then decide in the callback
  1970. // whether to use the right channel or not, but for now I would like to not have
  1971. // the extra rendering and copying step for an un-used channel - so we'll customize
  1972. // the asbd selection here...
  1973. // Now checking for number of input channels to decide mono or stereo asbd
  1974. // note, we're assuming mono for one channel, stereo for anything else
  1975. // if no input channels, then the program shouldn't have gotten this far.
  1976. if( displayNumberOfInputChannels == 1) {
  1977. // NSLog (@"Setting kAudioUnitProperty_StreamFormat (monoStreamFormat) for the I/O unit input bus's output scope");
  1978. result = AudioUnitSetProperty (
  1979. ioUnit,
  1980. kAudioUnitProperty_StreamFormat,
  1981. kAudioUnitScope_Output,
  1982. ioUnitInputBus,
  1983. &monoFileFormat,
  1984. sizeof (monoStreamFormat)
  1985. );
  1986. if (result) {[self printErrorMessage: @"AudioUnitSetProperty (set I/O unit input stream format output scope) monoStreamFormat" withStatus: result]; return;}
  1987. /*const UInt32 one = 1;
  1988. const UInt32 zero = 0;
  1989. const UInt32 quality = 127;
  1990. result = AudioUnitSetProperty(ioUnit, kAUVoiceIOProperty_BypassVoiceProcessing,
  1991. kAudioUnitScope_Global, ioUnitInputBus, &one, sizeof(one));
  1992. result = AudioUnitSetProperty(ioUnit, kAUVoiceIOProperty_VoiceProcessingEnableAGC,
  1993. kAudioUnitScope_Global, ioUnitInputBus, &zero, sizeof(zero));
  1994. result = AudioUnitSetProperty(ioUnit, kAUVoiceIOProperty_DuckNonVoiceAudio,
  1995. kAudioUnitScope_Global, ioUnitInputBus, &zero, sizeof(zero));
  1996. result = AudioUnitSetProperty(ioUnit, kAUVoiceIOProperty_VoiceProcessingQuality,
  1997. kAudioUnitScope_Global, ioUnitInputBus, &quality, sizeof(zero));*/
  1998. }
  1999. else {
  2000. // NSLog (@"Setting kAudioUnitProperty_StreamFormat (stereoStreamFormat) for the I/O unit input bus's output scope");
  2001. result = AudioUnitSetProperty (
  2002. ioUnit,
  2003. kAudioUnitProperty_StreamFormat,
  2004. kAudioUnitScope_Output,
  2005. ioUnitInputBus,
  2006. &stereoStreamFormat,
  2007. sizeof (stereoStreamFormat)
  2008. );
  2009. if (result) {[self printErrorMessage: @"AudioUnitSetProperty (set I/O unit input stream format output scope) stereoStreamFormat" withStatus: result]; return;}
  2010. }
  2011. }
  2012. // this completes setup for the RIO audio unit other than seting the callback which gets attached to the mixer input bus
  2013. // Obtain the mixer unit instance from its corresponding node.
  2014. result = AUGraphNodeInfo (
  2015. processingGraph,
  2016. mixerNode,
  2017. NULL,
  2018. &mixerUnit
  2019. );
  2020. if (noErr != result) {[self printErrorMessage: @"AUGraphNodeInfo" withStatus: result]; return;}
  2021. // Multichannel Mixer unit Setup
  2022. UInt32 busCount = 4; // bus count for mixer unit input
  2023. UInt32 guitarBus = 0; // mixer unit bus 0 will be stereo and will take the guitar sound
  2024. UInt32 beatsBus = 1; // mixer unit bus 1 will be mono and will take the beats sound
  2025. UInt32 micBus = 2; // mixer unit bus 2 will be mono and will take the microphone input
  2026. UInt32 synthBus = 3; // mixer unit bus 2 will be mono and will take the microphone input
  2027. // NSLog (@"Setting mixer unit input bus count to: %lu", busCount);
  2028. result = AudioUnitSetProperty (
  2029. mixerUnit,
  2030. kAudioUnitProperty_ElementCount,
  2031. kAudioUnitScope_Input,
  2032. 0,
  2033. &busCount,
  2034. sizeof (busCount)
  2035. );
  2036. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit bus count)" withStatus: result]; return;}
  2037. // NSLog (@"Setting kAudioUnitProperty_MaximumFramesPerSlice for mixer unit global scope");
  2038. // Increase the maximum frames per slice allows the mixer unit to accommodate the
  2039. // larger slice size used when the screen is locked.
  2040. // UInt32 maximumFramesPerSlice = 4096;
  2041. UInt32 maximumFramesPerSlice = 1024;
  2042. result = AudioUnitSetProperty (
  2043. mixerUnit,
  2044. kAudioUnitProperty_MaximumFramesPerSlice,
  2045. kAudioUnitScope_Global,
  2046. 0,
  2047. &maximumFramesPerSlice,
  2048. sizeof (maximumFramesPerSlice)
  2049. );
  2050. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit input stream format)" withStatus: result]; return;}
  2051. // Attach the input render callback and context to each input bus
  2052. // this is for the two file players
  2053. // subtract 2 from bus count because we're not including mic & synth bus for now... tz
  2054. if(isPlayer){
  2055. for (UInt16 busNumber = 0; busNumber < NUM_FILES; ++busNumber) {
  2056. // Setup the structure that contains the input render callback
  2057. AURenderCallbackStruct inputCallbackStruct;
  2058. inputCallbackStruct.inputProc = &inputRenderCallback;
  2059. inputCallbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
  2060. // NSLog (@"Registering the render callback with mixer unit input bus %u", busNumber);
  2061. // Set a callback for the specified node's specified input
  2062. result = AUGraphSetNodeInputCallback (
  2063. processingGraph,
  2064. mixerNode,
  2065. busNumber,
  2066. &inputCallbackStruct
  2067. );
  2068. if (noErr != result) {[self printErrorMessage: @"AUGraphSetNodeInputCallback" withStatus: result]; return;}
  2069. }
  2070. }
  2071. // now attach the separate render callback for the mic/lineIn channel
  2072. if(inputDeviceIsAvailable & isRecorder) {
  2073. UInt16 busNumber = 2; // mic channel on mixer
  2074. // Setup the structure that contains the input render callback
  2075. AURenderCallbackStruct inputCallbackStruct;
  2076. inputCallbackStruct.inputProc = micLineInCallback; // 8.24 version
  2077. inputCallbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
  2078. // NSLog (@"Registering the render callback - mic/lineIn - with mixer unit input bus %u", busNumber);
  2079. // Set a callback for the specified node's specified input
  2080. result = AUGraphSetNodeInputCallback (
  2081. processingGraph,
  2082. mixerNode,
  2083. busNumber,
  2084. &inputCallbackStruct
  2085. );
  2086. if (noErr != result) {[self printErrorMessage: @"AUGraphSetNodeInputCallback mic/lineIn" withStatus: result]; return;}
  2087. }
  2088. if(isGenerator){
  2089. // now attach the separate render callback for the synth channel
  2090. busNumber = 3; // synth channel on mixer
  2091. AURenderCallbackStruct synthCallbackStruct; // Setup structure that contains the render callback function
  2092. synthCallbackStruct.inputProc = synthRenderCallback; // for sound generation
  2093. synthCallbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self); // this pointer allows callback to access scope of this class
  2094. // NSLog (@"Registering the render callback - synth - with mixer unit input bus %u", busNumber);
  2095. // Set a callback for the specified node's specified input
  2096. result = AUGraphSetNodeInputCallback (
  2097. processingGraph,
  2098. mixerNode,
  2099. busNumber,
  2100. &synthCallbackStruct
  2101. );
  2102. if (noErr != result) {[self printErrorMessage: @"AUGraphSetNodeInputCallback" withStatus: result]; return;}
  2103. }
  2104. busNumber = 0; // synth channel on mixer
  2105. UInt32 enableOutput = 1;
  2106. result = AudioUnitSetProperty (
  2107. ioUnit,
  2108. kAudioOutputUnitProperty_EnableIO,
  2109. kAudioUnitScope_Output,
  2110. busNumber,
  2111. &enableOutput,
  2112. sizeof (enableOutput)
  2113. );
  2114. /*
  2115. AURenderCallbackStruct saveCallbackStruct; // Setup structure that contains the render callback function
  2116. saveCallbackStruct.inputProc = saveToFileCallback; // for sound generation
  2117. saveCallbackStruct.inputProcRefCon = self; // this pointer allows callback to access scope of this class
  2118. NSLog (@"Registering the save callback - synth - with mixer unit input bus %u", busNumber);
  2119. result = AUGraphSetNodeInputCallback (
  2120. processingGraph,
  2121. iONode,
  2122. busNumber,
  2123. &saveCallbackStruct
  2124. );
  2125. if (noErr != result) {[self printErrorMessage: @"AUGraphSetNodeInputCallback" withStatus: result]; return;}*/
  2126. // set either mono or stereo 8.24 format (default) for mic/lineIn bus
  2127. // Note: you can also get mono mic/line input using SInt16 samples (see synth asbd)
  2128. // But SInt16 gives asbd format errors with more than one channel
  2129. if(isRecorder){
  2130. if(displayNumberOfInputChannels == 1) { // number of available channels determines mono/stereo choice
  2131. // NSLog (@"Setting monoStreamFormat for mixer unit bus 2 (mic/lineIn)");
  2132. result = AudioUnitSetProperty (
  2133. mixerUnit,
  2134. kAudioUnitProperty_StreamFormat,
  2135. kAudioUnitScope_Input,
  2136. micBus,
  2137. &monoStreamFormat,
  2138. sizeof (monoStreamFormat)
  2139. );
  2140. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit bus 2 mic/line stream format mono)" withStatus: result];}
  2141. }
  2142. else if(displayNumberOfInputChannels > 1) { // do the stereo asbd
  2143. // NSLog (@"Setting stereoStreamFormat for mixer unit bus 2 mic/lineIn input");
  2144. result = AudioUnitSetProperty (
  2145. mixerUnit,
  2146. kAudioUnitProperty_StreamFormat,
  2147. kAudioUnitScope_Input,
  2148. micBus,
  2149. &stereoStreamFormat,
  2150. sizeof (stereoStreamFormat)
  2151. );
  2152. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit bus 2 mic/line stream format stereo)" withStatus: result];return;}
  2153. }
  2154. }
  2155. // set Sint16 mono asbd for synth bus
  2156. if(isGenerator){
  2157. // NSLog (@"Setting Sint16 stream format for mixer unit synth input bus 3");
  2158. result = AudioUnitSetProperty (
  2159. mixerUnit,
  2160. kAudioUnitProperty_StreamFormat,
  2161. kAudioUnitScope_Input,
  2162. synthBus,
  2163. &SInt16StreamFormat,
  2164. sizeof (SInt16StreamFormat)
  2165. );
  2166. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit synth input bus 3 mono stream format)" withStatus: result];return;}
  2167. }
  2168. // NSLog (@"Setting sample rate for mixer unit output scope");
  2169. // Set the mixer unit's output sample rate format. This is the only aspect of the output stream
  2170. // format that must be explicitly set.
  2171. result = AudioUnitSetProperty (
  2172. mixerUnit,
  2173. kAudioUnitProperty_SampleRate,
  2174. kAudioUnitScope_Output,
  2175. 0,
  2176. &graphSampleRate,
  2177. sizeof (graphSampleRate)
  2178. );
  2179. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (set mixer unit output stream format)" withStatus: result]; return;}
  2180. //根据源文件设置输入格式:
  2181. // NSLog (@"Setting stream format for mixer unit 0 input bus 0");
  2182. result = AudioUnitSetProperty (
  2183. mixerUnit,
  2184. kAudioUnitProperty_StreamFormat,
  2185. kAudioUnitScope_Input,
  2186. 0,
  2187. &stereoStreamFormat,
  2188. sizeof (monoStreamFormat)
  2189. );
  2190. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetProperty (Setting stream format for mixer unit 0 input bus 0,error))" withStatus: result];return;}
  2191. [self connectAudioProcessingGraph];
  2192. // Initialize audio processing graph
  2193. // NSLog (@"Audio processing graph state immediately before initializing it:");
  2194. CAShow (processingGraph);
  2195. // NSLog (@"Initializing the audio processing graph");
  2196. // Initialize the audio processing graph, configure audio data stream formats for
  2197. // each input and output, and validate the connections between audio units.
  2198. result = AUGraphInitialize (processingGraph);
  2199. if (noErr != result) {[self printErrorMessage: @"AUGraphInitialize" withStatus: result]; return;}
  2200. synthNoteOn = NO;
  2201. sinFreq = 200.0;
  2202. sinPhase = 0;
  2203. }
  2204. #pragma mark -
  2205. #pragma mark Playback control
  2206. // Start playback
  2207. // This is the master on/off switch that starts the processing graph
  2208. - (void) start {
  2209. [self start:0];
  2210. }
  2211. - (void) start :(SInt64)n {
  2212. for(int i=0;i<NUM_FILES;i++){
  2213. soundStructArray[i].writeNumber = 0;
  2214. soundStructArray[i].sampleNumber = n;
  2215. }
  2216. recordSamples = 0;
  2217. // NSLog(@"recorder.start");
  2218. // NSLog (@"Starting audio processing graph");
  2219. OSStatus result = AUGraphStart (processingGraph);
  2220. //usleep(50);
  2221. //result = AUGraphStop (processingGraph);
  2222. //result = AUGraphStart (processingGraph);
  2223. if (noErr != result) {[self printErrorMessage: @"AUGraphStart" withStatus: result]; return;}
  2224. self.playing = YES;
  2225. }
  2226. // Stop playback
  2227. - (BOOL) stop{
  2228. // NSLog (@"Stopping audio processing graph");
  2229. Boolean isRunning = false;
  2230. OSStatus result = AUGraphIsRunning (processingGraph, &isRunning);
  2231. if (noErr != result) {[self printErrorMessage: @"AUGraphIsRunning" withStatus: result]; return 0;}
  2232. if (isRunning) {
  2233. result = AUGraphStop (processingGraph);
  2234. if (noErr != result) {[self printErrorMessage: @"AUGraphStop" withStatus: result]; return 0;}
  2235. self.playing = NO;
  2236. if(isOutputer){
  2237. // NSLog(@"recorder.stop");
  2238. if(isOutputMp3)
  2239. [self closeMp3File];
  2240. else{
  2241. result = ExtAudioFileDispose(soundStructArray[0].audioFile);
  2242. while (ExtAudioFileDispose(audioFile)!=noErr){
  2243. usleep(50);
  2244. // NSLog(@"ExtAudioFileDispose sleep");
  2245. }
  2246. }
  2247. }
  2248. }
  2249. return 1;
  2250. }
  2251. - (void) pause{
  2252. //if(!playing)
  2253. // return;
  2254. // NSLog(@"recorder.pause");
  2255. [self mute];
  2256. /* _oldVolPlayer = volumePlayer;
  2257. _oldVolRecorder = volumeRecorder;
  2258. self.volumePlayer = 0;
  2259. self.volumeRecorder = 0;*/
  2260. isPaused = YES;
  2261. }
  2262. - (void) play{
  2263. if(!playing)
  2264. return;
  2265. // NSLog(@"recorder.play");
  2266. self.volumePlayer = volumePlayer;
  2267. self.volumeRecorder = volumeRecorder;
  2268. isPaused = NO;
  2269. }
  2270. - (void) seek:(NSTimeInterval)n{
  2271. if(!playing)
  2272. return;
  2273. OSStatus result;
  2274. BOOL b = isPaused;
  2275. //result = AUGraphStop (processingGraph);
  2276. isPaused = YES;
  2277. float rate = soundStructArray[0].sampleRate;
  2278. //usleep(50);
  2279. // NSLog(@"seek time %f:%d=%f,rate=%f",n,soundStructArray[0].sampleNumber,rate*n,rate);
  2280. //[self start:graphSampleRate*n];
  2281. soundStructArray[0].sampleNumber = rate*n;
  2282. isPaused = b;
  2283. if(!isReadFileToMemory)
  2284. result = ExtAudioFileSeek(soundStructArray[0].audioFile, soundStructArray[0].sampleNumber);
  2285. // SInt64 pos;
  2286. // result = ExtAudioFileTell(soundStructArray[0].audioFile,&pos);
  2287. // NSLog(@"%d",pos);
  2288. }
  2289. #pragma mark -
  2290. #pragma mark Mixer unit control
  2291. // mixer handler methods
  2292. // Enable or disable a specified bus
  2293. - (void) enableMixerInput: (UInt32) inputBus isOn: (AudioUnitParameterValue) isOnValue {
  2294. // NSLog (@"Bus %d now %@", (int) inputBus, isOnValue ? @"on" : @"off");
  2295. OSStatus result = AudioUnitSetParameter (
  2296. mixerUnit,
  2297. kMultiChannelMixerParam_Enable,
  2298. kAudioUnitScope_Input,
  2299. inputBus,
  2300. isOnValue,
  2301. 0
  2302. );
  2303. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetParameter (enable the mixer unit)" withStatus: result]; return;}
  2304. /*
  2305. // Ensure that the sound loops stay in sync when reenabling an input bus
  2306. if (0 == inputBus && 1 == isOnValue) {
  2307. soundStructArray[0].sampleNumber = soundStructArray[1].sampleNumber;
  2308. }
  2309. if (1 == inputBus && 1 == isOnValue) {
  2310. soundStructArray[1].sampleNumber = soundStructArray[0].sampleNumber;
  2311. }*/
  2312. // We removed the UI switch for bus 1 (beats) and merged it with bus 0 (guitar)
  2313. // So if bus 0 switch is pressed call this method again with '1'
  2314. // A wimpy form of recursion.
  2315. /*if( inputBus == 0 ) {
  2316. inputBus = 1;
  2317. [self enableMixerInput: inputBus isOn: isOnValue ];
  2318. }*/
  2319. }
  2320. // Set the mixer unit input volume for a specified bus
  2321. - (void) setMixerInput: (UInt32) inputBus gain: (AudioUnitParameterValue) newGain {
  2322. /*
  2323. This method does *not* ensure that sound loops stay in sync if the user has
  2324. moved the volume of an input channel to zero. When a channel's input
  2325. level goes to zero, the corresponding input render callback is no longer
  2326. invoked. Consequently, the sample number for that channel remains constant
  2327. while the sample number for the other channel continues to increment. As a
  2328. workaround, the view controller Nib file specifies that the minimum input
  2329. level is 0.01, not zero. (tz: changed this to .00001)
  2330. The enableMixerInput:isOn: method in this class, however, does ensure that the
  2331. loops stay in sync when a user disables and then reenables an input bus.
  2332. */
  2333. OSStatus result;
  2334. if(inputBus == 2){
  2335. newGain *= 5;
  2336. if(newGain<=0)
  2337. newGain = 0.00001;
  2338. }
  2339. result = AudioUnitSetParameter (mixerUnit,kMultiChannelMixerParam_Volume,kAudioUnitScope_Input,inputBus,newGain,0);
  2340. if(result)printf("AudioUnitSetParameter1 error\n");
  2341. //result = AudioUnitSetParameter(mixerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, 0xFFFFFFFF, 1.0, 0));
  2342. //if(result)printf("AudioUnitSetParameter2 error\n");
  2343. /*inputBus = 1;
  2344. result = AudioUnitSetParameter (ioUnit,kHALOutputParam_Volume,kAudioUnitScope_Output,inputBus,1,0);
  2345. if(result)printf("AudioUnitSetParameter8 error\n");*/
  2346. }
  2347. // Set the mixer unit output volume
  2348. - (void) setMixerOutputGain: (AudioUnitParameterValue) newGain {
  2349. OSStatus result = AudioUnitSetParameter (
  2350. mixerUnit,
  2351. kMultiChannelMixerParam_Volume,
  2352. kAudioUnitScope_Output,
  2353. 0,
  2354. newGain,
  2355. 0
  2356. );
  2357. if (noErr != result) {[self printErrorMessage: @"AudioUnitSetParameter (set mixer unit output volume)" withStatus: result]; return;}
  2358. // for testing - get the current output level
  2359. // This didn't work. I'd like to know how to do this without a callback
  2360. // outputLevel = [self getMixerOutputLevel];
  2361. }
  2362. // Get the mxer unit output level (post)
  2363. - (Float32) getMixerOutputLevel {
  2364. // this does not work in any shape or form on any bus of the mixer
  2365. // input or output scope....
  2366. Float32 outputLevel;
  2367. CheckError( AudioUnitGetParameter ( mixerUnit,
  2368. kMultiChannelMixerParam_PostAveragePower,
  2369. kAudioUnitScope_Output,
  2370. 0,
  2371. &outputLevel
  2372. ) ,"AudioUnitGetParameter (get mixer unit level") ;
  2373. // printf("mixer level is: %f\n", outputLevel);
  2374. return outputLevel;
  2375. }
  2376. - (void) playSynthNote {
  2377. // NSLog( @"play synth note");
  2378. synthNoteOn = YES;
  2379. sinFreq = 391.0; // G
  2380. }
  2381. - (void) stopSynthNote {
  2382. // NSLog(@"stop synth note");
  2383. synthNoteOn = NO;
  2384. }
  2385. #pragma mark -
  2386. #pragma mark Audio Session Delegate Methods
  2387. // Respond to having been interrupted. This method sends a notification to the
  2388. // controller object, which in turn invokes the playOrStop: toggle method. The
  2389. // interruptedDuringPlayback flag lets the endInterruptionWithFlags: method know
  2390. // whether playback was in progress at the time of the interruption.
  2391. - (void) beginInterruption {
  2392. // NSLog (@"Audio session was interrupted.");
  2393. if (playing) {
  2394. self.interruptedDuringPlayback = YES;
  2395. NSString *MixerHostAudioObjectPlaybackStateDidChangeNotification = @"MixerHostAudioObjectPlaybackStateDidChangeNotification";
  2396. [g_notify postNotificationName: MixerHostAudioObjectPlaybackStateDidChangeNotification object: self];
  2397. }
  2398. }
  2399. // Respond to the end of an interruption. This method gets invoked, for example,
  2400. // after the user dismisses a clock alarm.
  2401. - (void) endInterruptionWithFlags: (NSUInteger) flags {
  2402. // Test if the interruption that has just ended was one from which this app
  2403. // should resume playback.
  2404. if (flags & AVAudioSessionInterruptionFlags_ShouldResume) {
  2405. NSError *endInterruptionError = nil;
  2406. [[AVAudioSession sharedInstance] setActive: YES
  2407. error: &endInterruptionError];
  2408. if (endInterruptionError != nil) {
  2409. // NSLog (@"Unable to reactivate the audio session after the interruption ended.");
  2410. return;
  2411. } else {
  2412. // NSLog (@"Audio session reactivated after interruption.");
  2413. if (interruptedDuringPlayback) {
  2414. self.interruptedDuringPlayback = NO;
  2415. // Resume playback by sending a notification to the controller object, which
  2416. // in turn invokes the playOrStop: toggle method.
  2417. NSString *MixerHostAudioObjectPlaybackStateDidChangeNotification = @"MixerHostAudioObjectPlaybackStateDidChangeNotification";
  2418. [g_notify postNotificationName: MixerHostAudioObjectPlaybackStateDidChangeNotification object: self];
  2419. }
  2420. }
  2421. }
  2422. }
  2423. #pragma mark -
  2424. #pragma mark Utility methods
  2425. // You can use this method during development and debugging to look at the
  2426. // fields of an AudioStreamBasicDescription struct.
  2427. - (void) printASBD: (AudioStreamBasicDescription) asbd {
  2428. char formatIDString[5];
  2429. UInt32 formatID = CFSwapInt32HostToBig (asbd.mFormatID);
  2430. bcopy (&formatID, formatIDString, 4);
  2431. formatIDString[4] = '\0';
  2432. // NSLog (@" Sample Rate: %10.0f", asbd.mSampleRate);
  2433. // NSLog (@" Format ID: %10s", formatIDString);
  2434. // NSLog (@" Format Flags: %10lu", asbd.mFormatFlags);
  2435. // NSLog (@" Bytes per Packet: %10lu", asbd.mBytesPerPacket);
  2436. // NSLog (@" Frames per Packet: %10lu", asbd.mFramesPerPacket);
  2437. // NSLog (@" Bytes per Frame: %10lu", asbd.mBytesPerFrame);
  2438. // NSLog (@" Channels per Frame: %10lu", asbd.mChannelsPerFrame);
  2439. // NSLog (@" Bits per Channel: %10lu", asbd.mBitsPerChannel);
  2440. }
  2441. - (void) printErrorMessage: (NSString *) errorString withStatus: (OSStatus) result {
  2442. char str[20];
  2443. // see if it appears to be a 4-char-code
  2444. *(UInt32 *)(str + 1) = CFSwapInt32HostToBig(result);
  2445. if (isprint(str[1]) && isprint(str[2]) && isprint(str[3]) && isprint(str[4])) {
  2446. str[0] = str[5] = '\'';
  2447. str[6] = '\0';
  2448. } else
  2449. // no, format it as an integer
  2450. sprintf(str, "%d", (int)result);
  2451. // fprintf(stderr, "Error: %s (%s)\n", operation, str);
  2452. // NSLog (
  2453. // @"*** %@ error: %s\n",
  2454. // errorString,
  2455. // str
  2456. // );
  2457. }
  2458. -(BOOL)createFile{
  2459. if(!isOutputer)
  2460. return NO;
  2461. AudioStreamBasicDescription audioFormat;
  2462. audioFormat.mSampleRate = self.graphSampleRate;
  2463. audioFormat.mFormatID = kAudioFormatLinearPCM;
  2464. //audioFormat.mFormatID = kAudioFormatMPEG4AAC;
  2465. //audioFormat.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
  2466. audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
  2467. audioFormat.mFramesPerPacket = 1;
  2468. audioFormat.mChannelsPerFrame = 1;
  2469. audioFormat.mBitsPerChannel = 16;
  2470. audioFormat.mBytesPerPacket = 2*audioFormat.mChannelsPerFrame;
  2471. audioFormat.mBytesPerFrame = 2*audioFormat.mChannelsPerFrame;
  2472. // On initialise le fichier audio
  2473. NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
  2474. NSString *documentsDirectory = [paths objectAtIndex:0];
  2475. NSString *destinationFilePath = outputAudioFile;
  2476. // NSLog(@">>> %@", destinationFilePath);
  2477. CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
  2478. OSStatus status = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &audioFile);
  2479. CFRelease(destinationURL);
  2480. status = ExtAudioFileSetProperty(audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
  2481. status = ExtAudioFileWriteAsync(audioFile, 0, NULL);//必须要有,能初始化
  2482. // NSLog(@"ExtAudioFileWriteAsync=%d",status);
  2483. /*AudioConverterRef m_encoderConverter;
  2484. status = AudioConverterNew ( &monoStreamFormat,
  2485. &audioFormat,
  2486. &m_encoderConverter);*/
  2487. }
  2488. -(BOOL)openFile{
  2489. NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
  2490. NSString *documentsDirectory = [paths objectAtIndex:0];
  2491. NSString *source = [[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] ;
  2492. // NSLog(@">>> %@", source);
  2493. CFURLRef sourceURL;
  2494. sourceURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)source, kCFURLPOSIXPathStyle, false);
  2495. // open the source file
  2496. ExtAudioFileOpenURL(sourceURL, &audioFile);
  2497. // get the source data format
  2498. UInt32 size = sizeof(stereoStreamFormat);
  2499. ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileDataFormat, &size, &stereoStreamFormat);
  2500. size = sizeof(stereoStreamFormat);
  2501. ExtAudioFileSetProperty(audioFile, kExtAudioFileProperty_ClientDataFormat, size, &stereoStreamFormat);
  2502. UInt64 numFrames = 0;
  2503. UInt32 propSize = sizeof(numFrames);
  2504. ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileLengthFrames, &propSize, &numFrames);
  2505. /*UInt32 n;
  2506. OSStatus status = ExtAudioFileRead(audioFile, &n, bufferList);*/
  2507. // NSLog(@"%d,%d",numFrames,propSize);
  2508. }
  2509. -(soundStructPtr) getSoundArray:(int)index{
  2510. return &(soundStructArray[index]);
  2511. }
  2512. -(void) setImportAudioFile:(NSString *)str{
  2513. if( !isPlayer)
  2514. return;
  2515. if(str != importAudioFile && importAudioFile){
  2516. // [importAudioFile release];
  2517. importAudioFile = nil;
  2518. if(sourceURLArray[0]!=NULL){
  2519. CFRelease(sourceURLArray[0]);
  2520. sourceURLArray[0] = NULL;
  2521. }
  2522. }
  2523. if(str == nil)
  2524. return;
  2525. // importAudioFile = [str retain];
  2526. importAudioFile = str;
  2527. CFURLRef sourceURL;
  2528. sourceURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)importAudioFile, kCFURLPOSIXPathStyle, false);
  2529. sourceURLArray[0] = sourceURL;
  2530. [self readAudioFilesIntoMemory];
  2531. [self enableMixerInput:0 isOn:1];
  2532. self.volumePlayer = 1;
  2533. // NSLog(@"recorder.openfile");
  2534. }
  2535. -(void) setOutputAudioFile:(NSString *)str{
  2536. // if(str != outputAudioFile && outputAudioFile)
  2537. // [outputAudioFile release];
  2538. if(str == nil)
  2539. return;
  2540. // outputAudioFile = [str retain];
  2541. outputAudioFile = str;
  2542. isOutputMp3 = [outputAudioFile rangeOfString:@".mp3"].location != NSNotFound;
  2543. if(isOutputMp3)
  2544. [self createMp3File];
  2545. else
  2546. [self createFile];
  2547. // NSLog(@"recorder.createFile");
  2548. }
  2549. -(void) setVolumeRecorder:(float)n{
  2550. [self setMixerInput:2 gain:n];
  2551. volumeRecorder = n;
  2552. // NSLog(@"recorder.volRecord=%f",n);
  2553. }
  2554. -(void) setVolumePlayer:(float)n{
  2555. [self setMixerInput:0 gain:n];
  2556. volumePlayer = n;
  2557. // NSLog(@"recorder.volPlay=%f",n);
  2558. }
  2559. -(void) mute{
  2560. int n = 0;
  2561. [self setMixerInput:2 gain:n];
  2562. [self setMixerInput:0 gain:n];
  2563. }
  2564. -(void)writeAudioFile:(int)totalFrames{
  2565. AudioBufferList *bufferList;
  2566. int channelCount=1;
  2567. bufferList = (AudioBufferList *) malloc (
  2568. sizeof (AudioBufferList) + sizeof (AudioBuffer) * (channelCount - 1)
  2569. );
  2570. if (NULL == bufferList) {
  2571. // NSLog (@"*** malloc failure for allocating bufferList memory");
  2572. return;
  2573. }
  2574. // initialize the mNumberBuffers member
  2575. bufferList->mNumberBuffers = channelCount;
  2576. /*
  2577. AudioBuffer emptyBuffer = {0};
  2578. size_t arrayIndex;
  2579. for (arrayIndex = 0; arrayIndex < channelCount; arrayIndex++) {
  2580. bufferList->mBuffers[arrayIndex] = emptyBuffer;
  2581. }*/
  2582. UInt32 bufferLen = totalFrames*sizeof(AudioSampleType);
  2583. SInt16 *out16SamplesLeft = malloc(bufferLen);
  2584. memset(out16SamplesLeft, 0, totalFrames);
  2585. // set up the AudioBuffer structs in the buffer list
  2586. bufferList->mBuffers[0].mNumberChannels = 1;
  2587. bufferList->mBuffers[0].mDataByteSize = totalFrames * sizeof (AudioSampleType);
  2588. bufferList->mBuffers[0].mData = out16SamplesLeft;
  2589. OSStatus status;
  2590. status = ExtAudioFileWriteAsync(audioFile, totalFrames, bufferList);
  2591. }
  2592. - (void) createMp3File
  2593. {
  2594. // NSLog(@"createMp3File");
  2595. @try {
  2596. _mp3 = fopen([outputAudioFile cStringUsingEncoding:1], "wb"); //output
  2597. lame = lame_init();
  2598. @try {
  2599. lame_set_num_channels(lame,1);
  2600. lame_set_out_samplerate(lame, graphSampleRate);
  2601. lame_set_in_samplerate(lame, graphSampleRate);
  2602. lame_set_VBR(lame, vbr_default);
  2603. lame_set_brate(lame, 64);
  2604. lame_set_mode(lame, MONO);
  2605. lame_set_quality(lame, 2);
  2606. // id3tag_set_title(lame, [songName cStringUsingEncoding:1]);
  2607. // id3tag_set_artist(lame,[g_.jxServer.user_name cStringUsingEncoding:1]);
  2608. // lame_set_num_samples(lame,2^16-1);
  2609. // lame_set_bWriteVbrTag(lame, 0);
  2610. }
  2611. @catch (NSException *exception) {
  2612. // NSLog(@"%@",[exception description]);
  2613. }
  2614. lame_init_params(lame);
  2615. }
  2616. @catch (NSException *exception) {
  2617. // NSLog(@"%@",[exception description]);
  2618. }
  2619. @finally {
  2620. }
  2621. }
  2622. -(BOOL) writeMp3Buffer:(void*)buffer_l nSamples:(int)nSamples
  2623. {
  2624. int write = lame_encode_buffer(lame, buffer_l, NULL, nSamples,
  2625. _mp3_buffer, MP3_SIZE);
  2626. // _mp3_buffer, 1.25*nSamples + 7200);
  2627. // NSLog(@"writeMp3Buffer=%d",write);
  2628. fwrite(_mp3_buffer, write, 1, _mp3);
  2629. return noErr;
  2630. }
  2631. - (void) closeMp3File{
  2632. int write = lame_encode_flush(lame, _mp3_buffer, MP3_SIZE);
  2633. // NSLog(@"closeMp3File=%d",write);
  2634. fwrite(_mp3_buffer, write, 1, _mp3);
  2635. lame_close(lame);
  2636. fclose(_mp3);
  2637. }
  2638. -(void)delete{
  2639. if([[NSFileManager defaultManager] fileExistsAtPath:outputAudioFile])
  2640. [[NSFileManager defaultManager] removeItemAtPath:outputAudioFile error:nil];
  2641. }
  2642. @end