Record data format using the Audio Queue system

I am writing an iPhone application that should record the voice of users and transfer audio data to the library for changes such as changing the tempo and pitch. I started with Apple's SpeakHere example code:

http://developer.apple.com/library/ios/#samplecode/SpeakHere/Introduction/Intro.html

This project lays the foundation for recording a user's voice and playing it. It works well.

Now I dive into the code, and I need to figure out how to submit audio data to the SoundTouch library ( http://www.surina.net/soundtouch/ ) to change the pitch. I got acquainted with the Audio Queue card, looking at the code, and found a place where I get audio data from the recording.

Essentially, you call AudioQueueNewInputto create a new input queue. You pass a callback function that is called every time a piece of audio data is available. It is in this callback that I need to transfer chunks of data to SoundTouch.

I have all the settings, but the noise that I reproduce from the SoundTouch library is very static (it almost does not look like the original). If I don’t transfer it through SoundTouch and play the original sound, it works fine.

Basically, I am missing out on something that represents the actual data that I receive. I assumed that I get a stream short, which is a sample, 1 sample for each channel. What SoundTouch expects, so it should not be right in any way.

, , , .

void AQRecorder::SetupAudioFormat(UInt32 inFormatID)
{
memset(&mRecordFormat, 0, sizeof(mRecordFormat));

UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate,
                                          &size, 
                                          &mRecordFormat.mSampleRate), "couldn't get hardware sample rate");

size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumberChannels, 
                                          &size, 
                                          &mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");

mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
    // if we want pcm, default to signed 16-bit little-endian
    mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
    mRecordFormat.mBitsPerChannel = 16;
    mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
    mRecordFormat.mFramesPerPacket = 1;
}
}

, :

    SetupAudioFormat(kAudioFormatLinearPCM);

    // create the queue
    XThrowIfError(AudioQueueNewInput(
                                  &mRecordFormat,
                                  MyInputBufferHandler,
                                  this /* userData */,
                                  NULL /* run loop */, NULL /* run loop mode */,
                                  0 /* flags */, &mQueue), "AudioQueueNewInput failed");

, , , :

void AQRecorder::MyInputBufferHandler(void *inUserData,
                                  AudioQueueRef inAQ,
                                  AudioQueueBufferRef inBuffer,
                                  const AudioTimeStamp *inStartTime,
                                  UInt32 inNumPackets,
                                  const AudioStreamPacketDescription *inPacketDesc) {
AQRecorder *aqr = (AQRecorder *)inUserData;
try {
        if (inNumPackets > 0) {
            CAStreamBasicDescription queueFormat = aqr->DataFormat();
            SoundTouch *soundTouch = aqr->getSoundTouch();

            soundTouch->putSamples((const SAMPLETYPE *)inBuffer->mAudioData,
                                   inBuffer->mAudioDataByteSize / 2 / queueFormat.NumberChannels());

            SAMPLETYPE *samples = (SAMPLETYPE *)malloc(sizeof(SAMPLETYPE) * 10000 * queueFormat.NumberChannels());
            UInt32 numSamples;
            while((numSamples = soundTouch->receiveSamples((SAMPLETYPE *)samples, 10000))) {
                // write packets to file
                XThrowIfError(AudioFileWritePackets(aqr->mRecordFile,
                                                    FALSE,
                                                    numSamples * 2 * queueFormat.NumberChannels(),
                                                    NULL,
                                                    aqr->mRecordPacket,
                                                    &numSamples,
                                                    samples),
                              "AudioFileWritePackets failed");
                aqr->mRecordPacket += numSamples;
            }
            free(samples);
        }

        // if we're not stopping, re-enqueue the buffe so that it gets filled again
        if (aqr->IsRunning())
            XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
    char buf[256];
    fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}

, inBuffer->mAudioData SoundTouch. , , mAudioData?

+5
2

, endianess, .. , , . mFormatFlags AudioStreamBasicDescription, . , , (, newSample = sample + 0x8000)

0

. , 16- .

sample_le = (0xff00 & (sample_be << 8)) | (0x00ff & (sample_be >> 8)) ;
+1

All Articles