I had a problem with audio and video file tracks. they are gone, and everything is fine with my code, but now you can use it to merge the audio file and the video file together .
code:
private void muxing() { String outputFile = ""; try { File file = new File(Environment.getExternalStorageDirectory() + File.separator + "final2.mp4"); file.createNewFile(); outputFile = file.getAbsolutePath(); MediaExtractor videoExtractor = new MediaExtractor(); AssetFileDescriptor afdd = getAssets().openFd("Produce.MP4"); videoExtractor.setDataSource(afdd.getFileDescriptor() ,afdd.getStartOffset(),afdd.getLength()); MediaExtractor audioExtractor = new MediaExtractor(); audioExtractor.setDataSource(audioFilePath); Log.d(TAG, "Video Extractor Track Count " + videoExtractor.getTrackCount() ); Log.d(TAG, "Audio Extractor Track Count " + audioExtractor.getTrackCount() ); MediaMuxer muxer = new MediaMuxer(outputFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4); videoExtractor.selectTrack(0); MediaFormat videoFormat = videoExtractor.getTrackFormat(0); int videoTrack = muxer.addTrack(videoFormat); audioExtractor.selectTrack(0); MediaFormat audioFormat = audioExtractor.getTrackFormat(0); int audioTrack = muxer.addTrack(audioFormat); Log.d(TAG, "Video Format " + videoFormat.toString() ); Log.d(TAG, "Audio Format " + audioFormat.toString() ); boolean sawEOS = false; int frameCount = 0; int offset = 100; int sampleSize = 256 * 1024; ByteBuffer videoBuf = ByteBuffer.allocate(sampleSize); ByteBuffer audioBuf = ByteBuffer.allocate(sampleSize); MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo(); MediaCodec.BufferInfo audioBufferInfo = new MediaCodec.BufferInfo(); videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC); audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC); muxer.start(); while (!sawEOS) { videoBufferInfo.offset = offset; videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset); if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0) { Log.d(TAG, "saw input EOS."); sawEOS = true; videoBufferInfo.size = 0; } else { videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime(); videoBufferInfo.flags = videoExtractor.getSampleFlags(); muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo); videoExtractor.advance(); frameCount++; Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + videoBufferInfo.presentationTimeUs +" Flags:" + videoBufferInfo.flags +" Size(KB) " + videoBufferInfo.size / 1024); Log.d(TAG, "Frame (" + frameCount + ") Audio PresentationTimeUs:" + audioBufferInfo.presentationTimeUs +" Flags:" + audioBufferInfo.flags +" Size(KB) " + audioBufferInfo.size / 1024); } } Toast.makeText(getApplicationContext() , "frame:" + frameCount , Toast.LENGTH_SHORT).show(); boolean sawEOS2 = false; int frameCount2 =0; while (!sawEOS2) { frameCount2++; audioBufferInfo.offset = offset; audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, offset); if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0) { Log.d(TAG, "saw input EOS."); sawEOS2 = true; audioBufferInfo.size = 0; } else { audioBufferInfo.presentationTimeUs = audioExtractor.getSampleTime(); audioBufferInfo.flags = audioExtractor.getSampleFlags(); muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo); audioExtractor.advance(); Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + videoBufferInfo.presentationTimeUs +" Flags:" + videoBufferInfo.flags +" Size(KB) " + videoBufferInfo.size / 1024); Log.d(TAG, "Frame (" + frameCount + ") Audio PresentationTimeUs:" + audioBufferInfo.presentationTimeUs +" Flags:" + audioBufferInfo.flags +" Size(KB) " + audioBufferInfo.size / 1024); } } Toast.makeText(getApplicationContext() , "frame:" + frameCount2 , Toast.LENGTH_SHORT).show(); muxer.stop(); muxer.release(); } catch (IOException e) { Log.d(TAG, "Mixer Error 1 " + e.getMessage()); } catch (Exception e) { Log.d(TAG, "Mixer Error 2 " + e.getMessage()); }
}
thanks to these code examples: MediaMuxer code examples are really perfect
source share