MF SinkWriter mp4文件的播放时间是添加音频样本的一半时间,并且图像的回放速度是其的两倍

问题描述:

我为我的c#项目创建了一个托管C++库,以将图像和音频编码为mp4容器基础在MSDN教程SinkWriter上。为了测试结果是否正常,我创建了一个提供600帧的方法。这些帧代表每秒60帧的10秒视频。MF SinkWriter mp4文件的播放时间是添加音频样本的一半时间,并且图像的回放速度是其的两倍

的图片我提供改变每一秒,我的音频文件包含计数到10

我现在面临的问题是,输出视频actualy只有5秒长的发言权。视频的元数据显示它是10秒,但不是。此外,语音勉强计数高达5.

如果我只写入没有音频部分的图像样本,视频的持续时间是预期的10秒。

我在这里错过了什么?

这里是我的应用程序的一些部分。

这是我用来创建600帧的c#部分,然后我也调用了c#部分中的PushFrame方法。

var videoFrameCount = 10 * FPS; 
SetBinaryImage(); 

for (int i = 0; i <= videoFrameCount; i++) 
{ 
    // New picture every second 
    if (i > 0 && i % FPS == 0) 
    { 
     SetBinaryImage(); 
    } 

    PushFrame(); 
} 

PushFrame方法将图像和音频数据复制到由SinkWriter提供的指针。然后我调用SinkWriter的PushFrame方法。

private void PushFrame() 
{ 
    try 
    { 
     encodeStopwatch.Reset(); 
     encodeStopwatch.Start(); 

     // Video 
     var frameBufferHandler = GCHandle.Alloc(frameBuffer, GCHandleType.Pinned); 
     frameBufferPtr = frameBufferHandler.AddrOfPinnedObject(); 
     CopyImageDataToPointer(BinaryImage, ScreenWidth, ScreenHeight, frameBufferPtr); 

     // Audio 
     var audioBufferHandler = GCHandle.Alloc(audioBuffer, GCHandleType.Pinned); 
     audioBufferPtr = audioBufferHandler.AddrOfPinnedObject(); 
     var readLength = audioBuffer.Length; 

     if (BinaryAudio.Length - (audioOffset + audioBuffer.Length) < 0) 
     { 
      readLength = BinaryAudio.Length - audioOffset; 
     } 

     if (!EndOfFile) 
     { 
      Marshal.Copy(BinaryAudio, audioOffset, (IntPtr)audioBufferPtr, readLength); 
      audioOffset += audioBuffer.Length; 

     } 

     if (readLength < audioBuffer.Length && !EndOfFile) 
     { 
      EndOfFile = true; 
     } 

     unsafe 
     { 
      // Copy video data 
      var yuv = SinkWriter.VideoCapturerBuffer(); 
      SinkWriter.Encode((byte*)frameBufferPtr, ScreenWidth, ScreenHeight, (int)SWPF.SWPF_RGB, yuv); 

      // Copy audio data 
      var audioDestPtr = SinkWriter.AudioCapturerBuffer(); 
      SinkWriter.EncodeAudio((byte*)audioBufferPtr, audioDestPtr); 

      SinkWriter.PushFrame(); 
     } 

     encodeStopwatch.Stop(); 
     Console.WriteLine($"YUV frame generated in: {encodeStopwatch.TakeTotalMilliseconds()} ms"); 
    } 
    catch (Exception ex) 
    { 
    } 
} 

这里是我添加到C++中的SinkWriter的一些部分。音频部分的媒体类型是好的我猜是因为音频的播放工作。

的rtStart和rtDuration定义是这样的:

LONGLONG rtStart = 0; 
UINT64 rtDuration; 
MFFrameRateToAverageTimePerFrame(fps, 1, &rtDuration); 

来自编码器的两个缓冲器像这样使用

int SinkWriter::Encode(Byte * rgbBuf, int w, int h, int pxFormat, Byte * yufBuf) 
{ 
    const LONG cbWidth = 4 * VIDEO_WIDTH; 
    const DWORD cbBuffer = cbWidth * VIDEO_HEIGHT; 

    // Create a new memory buffer. 
    HRESULT hr = MFCreateMemoryBuffer(cbBuffer, &pFrameBuffer); 

    // Lock the buffer and copy the video frame to the buffer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pFrameBuffer->Lock(&yufBuf, NULL, NULL); 
    } 

    if (SUCCEEDED(hr)) 
    { 
     // Calculate the stride 
     DWORD bitsPerPixel = GetBitsPerPixel(pxFormat); 
     DWORD bytesPerPixel = bitsPerPixel/8; 
     DWORD stride = w * bytesPerPixel; 

     // Copy image in yuv pointer 
     hr = MFCopyImage(
      yufBuf,      // Destination buffer. 
      stride,     // Destination stride. 
      rgbBuf,  // First row in source image. 
      stride,     // Source stride. 
      stride,     // Image width in bytes. 
      h    // Image height in pixels. 
     ); 
    } 

    if (pFrameBuffer) 
    { 
     pFrameBuffer->Unlock(); 
    } 

    // Set the data length of the buffer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pFrameBuffer->SetCurrentLength(cbBuffer); 
    } 

    if (SUCCEEDED(hr)) 
    { 
     return 0; 
    } 
    else 
    { 
     return -1; 
    } 

    return 0; 
} 

int SinkWriter::EncodeAudio(Byte * src, Byte * dest) 
{ 
    DWORD samplePerSecond = AUDIO_SAMPLES_PER_SECOND * AUDIO_BITS_PER_SAMPLE * AUDIO_NUM_CHANNELS; 
    DWORD cbBuffer = samplePerSecond/1000; 

    // Create a new memory buffer. 
    HRESULT hr = MFCreateMemoryBuffer(cbBuffer, &pAudioBuffer); 

    // Lock the buffer and copy the video frame to the buffer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pAudioBuffer->Lock(&dest, NULL, NULL); 
    } 

    CopyMemory(dest, src, cbBuffer); 

    if (pAudioBuffer) 
    { 
     pAudioBuffer->Unlock(); 
    } 

    // Set the data length of the buffer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pAudioBuffer->SetCurrentLength(cbBuffer); 
    } 

    if (SUCCEEDED(hr)) 
    { 
     return 0; 
    } 
    else 
    { 
     return -1; 
    } 

    return 0; 
} 

这是经过SinkWriter的SinkWriter的PushFrame方法, streamIndex,audioIndex,rtStart和rtDuration到WriteFrame方法。

int SinkWriter::PushFrame() 
{ 
    if (initialized) 
    { 
     HRESULT hr = WriteFrame(ptrSinkWriter, stream, audio, rtStart, rtDuration); 
     if (FAILED(hr)) 
     { 
      return -1; 
     } 

     rtStart += rtDuration; 

     return 0; 
    } 

    return -1; 
} 

这里是结合视频和音频样本的WriteFrame方法。

HRESULT SinkWriter::WriteFrame(IMFSinkWriter *pWriter, DWORD streamIndex, DWORD audioStreamIndex, const LONGLONG& rtStart, const LONGLONG& rtDuration) 
{ 
    IMFSample *pVideoSample = NULL; 

    // Create a media sample and add the buffer to the sample. 
    HRESULT hr = MFCreateSample(&pVideoSample); 

    if (SUCCEEDED(hr)) 
    { 
     hr = pVideoSample->AddBuffer(pFrameBuffer); 
    } 
    if (SUCCEEDED(hr)) 
    { 
     pVideoSample->SetUINT32(MFSampleExtension_Discontinuity, FALSE); 
    } 
    // Set the time stamp and the duration. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pVideoSample->SetSampleTime(rtStart); 
    } 
    if (SUCCEEDED(hr)) 
    { 
     hr = pVideoSample->SetSampleDuration(rtDuration); 
    } 

    // Send the sample to the Sink Writer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pWriter->WriteSample(streamIndex, pVideoSample); 
    } 

    // Audio 
    IMFSample *pAudioSample = NULL; 

    if (SUCCEEDED(hr)) 
    { 
     hr = MFCreateSample(&pAudioSample); 
    } 

    if (SUCCEEDED(hr)) 
    { 
     hr = pAudioSample->AddBuffer(pAudioBuffer); 
    } 

    // Set the time stamp and the duration. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pAudioSample->SetSampleTime(rtStart); 
    } 
    if (SUCCEEDED(hr)) 
    { 
     hr = pAudioSample->SetSampleDuration(rtDuration); 
    } 
    // Send the sample to the Sink Writer. 
    if (SUCCEEDED(hr)) 
    { 
     hr = pWriter->WriteSample(audioStreamIndex, pAudioSample); 
    } 


    SafeRelease(&pVideoSample); 
    SafeRelease(&pFrameBuffer); 
    SafeRelease(&pAudioSample); 
    SafeRelease(&pAudioBuffer); 
    return hr; 
} 
+0

'pVideoSample-> SetSampleTime'参数可能是你想要的一半。至少,你应该使用调试器检查这一点,并确定这一点。 –

+0

'SetSampleTime'应该可以,因为当我删除AudioSample时,视频的持续时间和时间是有效的。但我会尽量让时间翻倍。 编辑: 我试图加倍它,但然后创建的视频将不再工作。在严重的帧之后,我需要很长时间才能生成剩余的帧。 – datoml

问题是计算音频的缓冲区大小是错误的。 这是正确的计算:

var avgBytesPerSecond = sampleRate * 2 * channels; 
var avgBytesPerMillisecond = avgBytesPerSecond/1000; 
var bufferSize = avgBytesPerMillisecond * (1000/60); 
audioBuffer = new byte[bufferSize]; 

在我的问题,我有缓冲区大小为1毫秒。所以看起来MF Framework加速了图像,所以音频听起来很好。在我修正了缓冲区大小后,视频具有我期望的持续时间,声音也没有错误。