HTML 5: AudioContext AudioBuffer

I need to understand how the sound buffer works and do it. I want to do the following sequence: Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers. Auto means automatic data transfer and manually. I do it myself through code in processor.onaudioprocess. So I have the following code:

navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
    navigator.getUserMedia(
        {audio: true}, 
        function(stream) {
            audioContext = new AudioContext();
            //STEP 1 - we create buffer and its node
            speakers = audioContext.destination;
            myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
            var bufferNode = audioContext.createBufferSource();
            bufferNode.buffer = myAudioBuffer;
            bufferNode.connect(speakers);
            bufferNode.start();

            //STEP 2- we create microphone and processor
            microphone = audioContext.createMediaStreamSource(stream);
            var processor = (microphone.context.createScriptProcessor || 
                microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
            processor.onaudioprocess = function(audioProcessingEvent) {
                var inputBuffer = audioProcessingEvent.inputBuffer;
                var inputData = inputBuffer.getChannelData(0); // we have only one channel
                var nowBuffering = myAudioBuffer.getChannelData(0);
                for (var sample = 0; sample < inputBuffer.length; sample++) {
                  nowBuffering[sample] = inputData[sample];
                }
            }

            microphone.connect(processor);                    

        },
        function() {
            console.log("Error 003.")
        });
}

However, this code does not work. No mistakes, only silence. Where is my mistake?

+4
source share
2 answers

Do you get silence (i.e. the onprocess call is called, but the buffers are empty) or nothing (i.e. your onprocess is never called)?

, context.destination. , .

+2

, OP . , JSFiddle. , - " ", .

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
  captureMicrophone();
}

// First Step - Capture microphone and process the input
function captureMicrophone() {
  // process input from microphone
  const processAudio = ev =>
      processBuffer(ev.inputBuffer.getChannelData(CHANNEL));

  // setup media stream from microphone
  const microphoneStream = stream => {
    const microphone = audioContext.createMediaStreamSource(stream);
    microphone.connect(processor);
    // #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
    processor.connect(mute);
  };
  // TODO: Handle error properly (see todo above - but probably more specific)
  const userMediaError = err => console.error(err);

  // Second step - Process buffer and output to speakers
  const processBuffer = buffer => {
    audioBuffer.getChannelData(CHANNEL).set(buffer);
    // We could move this out but that would affect audio quality
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;
    source.connect(speakers);
    source.start();
  }

  const audioContext = new AudioContext();
  const speakers = audioContext.destination;
  // We currently only operate on this channel we might need to add a couple
  // lines of code if this fact changes
  const CHANNEL = 0;
  const CHANNELS = 1;
  const BUFFER_SIZE = 4096;
  const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);

  const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);

  // #2 Not needed we could directly pass through to speakers since there no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

  // #2 Not needed we could directly pass through to speakers since there no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

, , . , .

, , Recorder.js

, , createScriptProcessor, createBuffer, , . , .

. JSFiddle!

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
  captureMicrophone();
}
function captureMicrophone() {
  const audioContext = new AudioContext();
  const speaker = audioContext.destination;
  const processor = audioContext.createScriptProcessor(4096, 1, 1);

  const processAudio =
      ev => {
        const CHANNEL = 0;
        const inputBuffer = ev.inputBuffer;
        const outputBuffer = ev.outputBuffer;
        const inputData = inputBuffer.getChannelData(CHANNEL);
        const outputData = outputBuffer.getChannelData(CHANNEL);

        // TODO: manually do something with the audio
        for (let i = 0; i < inputBuffer.length; ++i) {
          outputData[i] = inputData[i];
        }
      };

  const microphoneStream =
      stream => {
        const microphone = audioContext.createMediaStreamSource(stream);
        microphone.connect(processor);
        processor.connect(speaker);
      };

  // TODO: handle error properly
  const userMediaError = err => console.error(err);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
+4

All Articles