5.1 PCM Sound to 5.1 Dolby Digital in Windows C++ for SPDIF Playback - Stack Overflow

I try to write a program, that can convert 5.1 PCM Sound to 5.1 Dolby Digital Compressed Audio to use w

I try to write a program, that can convert 5.1 PCM Sound to 5.1 Dolby Digital Compressed Audio to use with my optical Toslink/SPDIF USB Interface. For Debugging reasons I use a sine-wave PCM signal on the LF and RF channels, which will later be replaced with a capture of a windows audio render/sink. The Problem: After the compression with libavcodec the buffer is to large for windows ( & the usb interface) to handle. So the memcpy in the loop throws a buffer overflow. Here is the code:

#include <iostream>
#include <vector>
#include <cmath>

#include <windows.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <ksmedia.h>
#include <propkey.h>
#include <functiondiscoverykeys_devpkey.h>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
}
// UUID defintion by the videolan vlc player project
#define DEFINE_VLC_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
        EXTERN_C const GUID DECLSPEC_SELECTANY name \
                = { l, w1, w2, { b1, b2,  b3,  b4,  b5,  b6,  b7,  b8 } }


#define WAVE_FORMAT_DOLBY_AC3_SPDIF     0x0092 /* Sonic Foundry */
static const GUID __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF = { WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} };



// Function to get the default audio endpoint
IMMDevice* GetDefaultAudioEndpoint(EDataFlow dataFlow, ERole role) {
    HRESULT hr;
    IMMDeviceEnumerator* enumerator = nullptr;
    IMMDevice* device = nullptr;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&enumerator);
    if (FAILED(hr)) {
        std::cerr << "Could not create MMDeviceEnumerator." << std::endl;
        return nullptr;
    }

    hr = enumerator->GetDefaultAudioEndpoint(dataFlow, role, &device);
    enumerator->Release();
    if (FAILED(hr)) {
        std::cerr << "Could not get default audio endpoint." << std::endl;
        return nullptr;
    }

    return device;
}


// Function to configure WAVEFORMATEXTENSIBLE for IEC 61937 (S/PDIF)
void configureSpdifWaveFormat(WAVEFORMATEXTENSIBLE* wf, AVSampleFormat input_sample_fmt, int input_sample_rate, int input_channels, GUID subtype) {
    wf->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    wf->Format.nChannels = 2; // Stereo for S/PDIF
    wf->Format.nSamplesPerSec = 48000;
    wf->Format.wBitsPerSample = 16;
    wf->Format.nBlockAlign = 6* 4; // wf->Format.wBitsPerSample / 8 * wf->Format.nChannels
    wf->Format.nAvgBytesPerSec = wf->Format.nSamplesPerSec * wf->Format.nBlockAlign;
    wf->Format.cbSize = sizeof(*wf) - sizeof(wf->Format);
    wf->Samples.wValidBitsPerSample = wf->Format.wBitsPerSample;
    wf->dwChannelMask = KSAUDIO_SPEAKER_5POINT1;
    wf->SubFormat = subtype;
}



int main() {
    // Sine wave parameters
    double frequency = 220.0;
    int sample_rate = 48000;
    AVSampleFormat input_sample_fmt = AV_SAMPLE_FMT_S16;
    int input_channels = 2;

    // Output AC-3 parameters
    int output_sample_rate = 44100;
    AVSampleFormat output_sample_fmt = AV_SAMPLE_FMT_FLTP;
    int output_channels = 6; // 5.1
    int output_bit_rate = 64000;




    // Initialize libavcodec
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AC3);
    AVCodecContext* c = avcodec_alloc_context3(codec);


// Set output buffer size from IAudioClient::GetBufferSize UINT32 bufferFrameCount; audioClient->GetBufferSize(&bufferFrameCount); UINT32 bufferSizeInBit = bufferFrameCount /*Number of Frames*/ * waveFormat.Format.nBlockAlign /*Size of a Frame in Byte*/ * 8 /*Bits*/;   c->bit_rate = bufferSizeInBit; c->sample_rate = 44100; c->sample_fmt = AV_SAMPLE_FMT_FLTP;  c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;  avcodec_open2(c, codec, nullptr);

    c->bit_rate = 384000;
    c->sample_rate = output_sample_rate;
    c->sample_fmt = output_sample_fmt;
    c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;



    avcodec_open2(c, codec, nullptr);

    AVFrame* frame = av_frame_alloc();
    frame->nb_samples = c->frame_size;
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    av_frame_get_buffer(frame, 0);

    
    AVPacket& pkt = *av_packet_alloc();

    // Initialize COM and WASAPI
    CoInitialize(nullptr);

    IMMDevice* renderDevice = GetDefaultAudioEndpoint(eRender, eMultimedia);
    IAudioClient* audioClient;
    renderDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    WAVEFORMATEXTENSIBLE waveFormat;
    WAVEFORMATEX* closestMatch = nullptr;
    GUID ac3Subtype = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF; // AC-3 GUID
    configureSpdifWaveFormat(&waveFormat, output_sample_fmt, output_sample_rate, output_channels, ac3Subtype);
    HRESULT hr = audioClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&waveFormat, &closestMatch);

    if (FAILED(hr)) {
        std::cerr << "IAudioClient_IsFormatSupported failed: " << hr << std::endl;

        return 1;
    }

    if (hr == S_FALSE) {
        std::cerr << "Closest matching format found: " << closestMatch->nChannels << std::endl;
        waveFormat = *(WAVEFORMATEXTENSIBLE*)closestMatch;
        CoTaskMemFree(closestMatch);
    }
    audioClient->Start();
    IAudioRenderClient* renderClient;
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 0, 0, 0, (WAVEFORMATEX*)&waveFormat, nullptr);
    if (FAILED(hr)) {
        std::cerr << "audioClient->Initialize failed: " << std::hex << (uint32_t) hr << std::endl;
        return 1;
    }
    hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
    if (FAILED(hr)) {
        std::cerr << "audioClient->GetService failed: " << hr << std::endl;
        audioClient->Release();
        return 1;
    }

    std::vector<int16_t> output_buffer(c->frame_size * output_channels);
    

    double time = 0.0;
    while (true) {
        for (int i = 0; i < c->frame_size; ++i) {
            double sample = sin(2.0 * M_PI * frequency * time);
            int16_t sample_int = static_cast<int16_t>(sample * 32767.0);

            output_buffer[i * 6 + 0] = sample_int; // Front L
            output_buffer[i * 6 + 1] = sample_int; // Front R
            output_buffer[i * 6 + 2] = 0; // Center
            output_buffer[i * 6 + 3] = 0; // LFE
            output_buffer[i * 6 + 4] = 0; // Rear L
            output_buffer[i * 6 + 5] = 0; // Rear R

            time += 1.0 / sample_rate;
        }
        memcpy(frame->data[0], output_buffer.data(), output_buffer.size() * sizeof(int16_t));

        int ret = avcodec_send_frame(c, frame);
        if (ret < 0) break;

        

        while (ret >= 0) {
            ret = avcodec_receive_packet(c, &pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
            if (ret < 0) break;

       

       
                BYTE* renderData;
                renderClient->GetBuffer(pkt.size, &renderData);
                std::cout << "Rendering " << pkt.size << " bytes" << std::endl;
                memcpy(renderData, pkt.data, pkt.size);
                renderClient->ReleaseBuffer(pkt.size, 0);
 
       
            av_packet_unref(&pkt);
        }
    }

    audioClient->Stop();
    renderClient->Release();
    audioClient->Release();
    CoTaskMemFree(&waveFormat);
    CoUninitialize();

    av_frame_free(&frame);

    avcodec_free_context(&c);

    return 0;
}

The updated code:

#include <iostream>
#include <vector>
#include <cmath>

#include <windows.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <ksmedia.h>
#include <propkey.h>
#include <functiondiscoverykeys_devpkey.h>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
}

#define DEFINE_VLC_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
        EXTERN_C const GUID DECLSPEC_SELECTANY name \
                = { l, w1, w2, { b1, b2,  b3,  b4,  b5,  b6,  b7,  b8 } }
#define BIT_TO_BYTE / 8

#define WAVE_FORMAT_DOLBY_AC3_SPDIF     0x0092 /* Sonic Foundry */
static const GUID __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF = { WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} };



// Function to get the default audio endpoint
IMMDevice* GetDefaultAudioEndpoint(EDataFlow dataFlow, ERole role) {
    HRESULT hr;
    IMMDeviceEnumerator* enumerator = nullptr;
    IMMDevice* device = nullptr;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&enumerator);
    if (FAILED(hr)) {
        std::cerr << "Could not create MMDeviceEnumerator." << std::endl;
        return nullptr;
    }

    hr = enumerator->GetDefaultAudioEndpoint(dataFlow, role, &device);
    enumerator->Release();
    if (FAILED(hr)) {
        std::cerr << "Could not get default audio endpoint." << std::endl;
        return nullptr;
    }

    return device;
}


// Function to configure WAVEFORMATEXTENSIBLE for IEC 61937 (S/PDIF)
void configureSpdifWaveFormat(WAVEFORMATEXTENSIBLE* wf, AVSampleFormat input_sample_fmt, int input_sample_rate, int input_channels, GUID subtype) {
    wf->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    wf->Format.nChannels = 2; // Stereo for S/PDIF
    wf->Format.nSamplesPerSec = 48000;
    wf->Format.wBitsPerSample = 16;
    wf->Format.nBlockAlign = wf->Format.wBitsPerSample / 8 * wf->Format.nChannels;
    wf->Format.nAvgBytesPerSec = wf->Format.nSamplesPerSec * wf->Format.nBlockAlign;
    wf->Format.cbSize = sizeof(*wf) - sizeof(wf->Format);
    wf->Samples.wValidBitsPerSample = wf->Format.wBitsPerSample;
    wf->dwChannelMask = KSAUDIO_SPEAKER_5POINT1;
    wf->SubFormat = subtype;
}



int main() {
    // Sine wave parameters
    double frequency = 220.0;
    int sample_rate = 48000;
    AVSampleFormat input_sample_fmt = AV_SAMPLE_FMT_S16;
    int input_channels = 2;

    // Output AC-3 parameters
    int output_sample_rate = 44100;
    AVSampleFormat output_sample_fmt = AV_SAMPLE_FMT_FLTP;
    int output_channels = 6; // 5.1
    
    // Initialize COM and WASAPI
    CoInitialize(nullptr);

    IMMDevice* renderDevice = GetDefaultAudioEndpoint(eRender, eMultimedia);
    IAudioClient* audioClient;
    renderDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    WAVEFORMATEXTENSIBLE waveFormat;
    WAVEFORMATEX* closestMatch = nullptr;
    GUID ac3Subtype = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF; // AC-3 GUID
    configureSpdifWaveFormat(&waveFormat, output_sample_fmt, output_sample_rate, output_channels, ac3Subtype);
    HRESULT hr = audioClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&waveFormat, &closestMatch);

    if (FAILED(hr)) {
        std::cerr << "IAudioClient_IsFormatSupported failed: " << hr << std::endl;

        return 1;
    }

    if (hr == S_FALSE) {
        std::cerr << "Closest matching format found: " << closestMatch->nChannels << std::endl;
        waveFormat = *(WAVEFORMATEXTENSIBLE*)closestMatch;
        CoTaskMemFree(closestMatch);
    }
    audioClient->Start();
    IAudioRenderClient* renderClient;
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 0, 0, 0, (WAVEFORMATEX*)&waveFormat, nullptr);
    if (FAILED(hr)) {
        std::cerr << "audioClient->Initialize failed: " << std::hex << (uint32_t) hr << std::endl;
        return 1;
    }
    hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
    if (FAILED(hr)) {
        std::cerr << "audioClient->GetService failed: " << hr << std::endl;
        audioClient->Release();
        return 1;
    }

    

    // Initialize libavcodec
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AC3);
    AVCodecContext* c = avcodec_alloc_context3(codec);


    // Set output buffer size from IAudioClient::GetBufferSize
    UINT32 bufferFrameCount;
    audioClient->GetBufferSize(&bufferFrameCount);
    UINT32 bufferSizeInBit = bufferFrameCount * waveFormat.Format.nBlockAlign * 8;


    c->bit_rate = bufferSizeInBit;
    c->sample_rate = 44100;
    c->sample_fmt = AV_SAMPLE_FMT_FLTP;

    c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;

    avcodec_open2(c, codec, nullptr);

    AVFrame* frame = av_frame_alloc();
    frame->nb_samples = c->frame_size;
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    av_frame_get_buffer(frame, 0);

    
    std::vector<int16_t> output_buffer(c->frame_size * output_channels);




    AVPacket& pkt = *av_packet_alloc();


    double time = 0.0;
    while (true) {
        for (int i = 0; i < c->frame_size; ++i) {
            double sample = sin(2.0 * M_PI * frequency * time);
            int16_t sample_int = static_cast<int16_t>(sample * 32767.0);

            output_buffer[i * 6 + 0] = sample_int; // Front L
            output_buffer[i * 6 + 1] = sample_int; // Front R
            output_buffer[i * 6 + 2] = 0; // Center
            output_buffer[i * 6 + 3] = 0; // LFE
            output_buffer[i * 6 + 4] = 0; // Rear L
            output_buffer[i * 6 + 5] = 0; // Rear R

            time += 1.0 / sample_rate;
        }
        memcpy(frame->data[0], output_buffer.data(), output_buffer.size() * sizeof(int16_t));

        int ret = avcodec_send_frame(c, frame);
        if (ret < 0) break;

        

        while (ret >= 0) {
            ret = avcodec_receive_packet(c, &pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
            if (ret < 0) break;
       
                BYTE* renderData;
                HRESULT hj = renderClient->GetBuffer(pkt.size, &renderData);
                std::cout << "Result:" << hj << "\n";
                if (hj == AUDCLNT_E_BUFFER_TOO_LARGE) {
                    std::cout << "AUDCLNT_E_BUFFER_TOO_LARGE" << std::endl;
                }
                //std::cout << "Rendering " << pkt.size << " bytes" << std::endl;
                memcpy(renderData, pkt.data, pkt.size);
                renderClient->ReleaseBuffer(pkt.size, 0);
 
       
            av_packet_unref(&pkt);
        }
    }

    audioClient->Stop();
    renderClient->Release();
    audioClient->Release();
    CoTaskMemFree(&waveFormat);
    CoUninitialize();

    av_frame_free(&frame);

    avcodec_free_context(&c);

    return 0;

with the result being:

Result:0
Result:0
Result:0
Result:0
Result:0
Result:0
Result:0
Result:-2004287482
AUDCLNT_E_BUFFER_TOO_LARGE

I try to write a program, that can convert 5.1 PCM Sound to 5.1 Dolby Digital Compressed Audio to use with my optical Toslink/SPDIF USB Interface. For Debugging reasons I use a sine-wave PCM signal on the LF and RF channels, which will later be replaced with a capture of a windows audio render/sink. The Problem: After the compression with libavcodec the buffer is to large for windows ( & the usb interface) to handle. So the memcpy in the loop throws a buffer overflow. Here is the code:

#include <iostream>
#include <vector>
#include <cmath>

#include <windows.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <ksmedia.h>
#include <propkey.h>
#include <functiondiscoverykeys_devpkey.h>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
}
// UUID defintion by the videolan vlc player project
#define DEFINE_VLC_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
        EXTERN_C const GUID DECLSPEC_SELECTANY name \
                = { l, w1, w2, { b1, b2,  b3,  b4,  b5,  b6,  b7,  b8 } }


#define WAVE_FORMAT_DOLBY_AC3_SPDIF     0x0092 /* Sonic Foundry */
static const GUID __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF = { WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} };



// Function to get the default audio endpoint
IMMDevice* GetDefaultAudioEndpoint(EDataFlow dataFlow, ERole role) {
    HRESULT hr;
    IMMDeviceEnumerator* enumerator = nullptr;
    IMMDevice* device = nullptr;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&enumerator);
    if (FAILED(hr)) {
        std::cerr << "Could not create MMDeviceEnumerator." << std::endl;
        return nullptr;
    }

    hr = enumerator->GetDefaultAudioEndpoint(dataFlow, role, &device);
    enumerator->Release();
    if (FAILED(hr)) {
        std::cerr << "Could not get default audio endpoint." << std::endl;
        return nullptr;
    }

    return device;
}


// Function to configure WAVEFORMATEXTENSIBLE for IEC 61937 (S/PDIF)
void configureSpdifWaveFormat(WAVEFORMATEXTENSIBLE* wf, AVSampleFormat input_sample_fmt, int input_sample_rate, int input_channels, GUID subtype) {
    wf->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    wf->Format.nChannels = 2; // Stereo for S/PDIF
    wf->Format.nSamplesPerSec = 48000;
    wf->Format.wBitsPerSample = 16;
    wf->Format.nBlockAlign = 6* 4; // wf->Format.wBitsPerSample / 8 * wf->Format.nChannels
    wf->Format.nAvgBytesPerSec = wf->Format.nSamplesPerSec * wf->Format.nBlockAlign;
    wf->Format.cbSize = sizeof(*wf) - sizeof(wf->Format);
    wf->Samples.wValidBitsPerSample = wf->Format.wBitsPerSample;
    wf->dwChannelMask = KSAUDIO_SPEAKER_5POINT1;
    wf->SubFormat = subtype;
}



int main() {
    // Sine wave parameters
    double frequency = 220.0;
    int sample_rate = 48000;
    AVSampleFormat input_sample_fmt = AV_SAMPLE_FMT_S16;
    int input_channels = 2;

    // Output AC-3 parameters
    int output_sample_rate = 44100;
    AVSampleFormat output_sample_fmt = AV_SAMPLE_FMT_FLTP;
    int output_channels = 6; // 5.1
    int output_bit_rate = 64000;




    // Initialize libavcodec
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AC3);
    AVCodecContext* c = avcodec_alloc_context3(codec);


// Set output buffer size from IAudioClient::GetBufferSize UINT32 bufferFrameCount; audioClient->GetBufferSize(&bufferFrameCount); UINT32 bufferSizeInBit = bufferFrameCount /*Number of Frames*/ * waveFormat.Format.nBlockAlign /*Size of a Frame in Byte*/ * 8 /*Bits*/;   c->bit_rate = bufferSizeInBit; c->sample_rate = 44100; c->sample_fmt = AV_SAMPLE_FMT_FLTP;  c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;  avcodec_open2(c, codec, nullptr);

    c->bit_rate = 384000;
    c->sample_rate = output_sample_rate;
    c->sample_fmt = output_sample_fmt;
    c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;



    avcodec_open2(c, codec, nullptr);

    AVFrame* frame = av_frame_alloc();
    frame->nb_samples = c->frame_size;
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    av_frame_get_buffer(frame, 0);

    
    AVPacket& pkt = *av_packet_alloc();

    // Initialize COM and WASAPI
    CoInitialize(nullptr);

    IMMDevice* renderDevice = GetDefaultAudioEndpoint(eRender, eMultimedia);
    IAudioClient* audioClient;
    renderDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    WAVEFORMATEXTENSIBLE waveFormat;
    WAVEFORMATEX* closestMatch = nullptr;
    GUID ac3Subtype = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF; // AC-3 GUID
    configureSpdifWaveFormat(&waveFormat, output_sample_fmt, output_sample_rate, output_channels, ac3Subtype);
    HRESULT hr = audioClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&waveFormat, &closestMatch);

    if (FAILED(hr)) {
        std::cerr << "IAudioClient_IsFormatSupported failed: " << hr << std::endl;

        return 1;
    }

    if (hr == S_FALSE) {
        std::cerr << "Closest matching format found: " << closestMatch->nChannels << std::endl;
        waveFormat = *(WAVEFORMATEXTENSIBLE*)closestMatch;
        CoTaskMemFree(closestMatch);
    }
    audioClient->Start();
    IAudioRenderClient* renderClient;
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 0, 0, 0, (WAVEFORMATEX*)&waveFormat, nullptr);
    if (FAILED(hr)) {
        std::cerr << "audioClient->Initialize failed: " << std::hex << (uint32_t) hr << std::endl;
        return 1;
    }
    hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
    if (FAILED(hr)) {
        std::cerr << "audioClient->GetService failed: " << hr << std::endl;
        audioClient->Release();
        return 1;
    }

    std::vector<int16_t> output_buffer(c->frame_size * output_channels);
    

    double time = 0.0;
    while (true) {
        for (int i = 0; i < c->frame_size; ++i) {
            double sample = sin(2.0 * M_PI * frequency * time);
            int16_t sample_int = static_cast<int16_t>(sample * 32767.0);

            output_buffer[i * 6 + 0] = sample_int; // Front L
            output_buffer[i * 6 + 1] = sample_int; // Front R
            output_buffer[i * 6 + 2] = 0; // Center
            output_buffer[i * 6 + 3] = 0; // LFE
            output_buffer[i * 6 + 4] = 0; // Rear L
            output_buffer[i * 6 + 5] = 0; // Rear R

            time += 1.0 / sample_rate;
        }
        memcpy(frame->data[0], output_buffer.data(), output_buffer.size() * sizeof(int16_t));

        int ret = avcodec_send_frame(c, frame);
        if (ret < 0) break;

        

        while (ret >= 0) {
            ret = avcodec_receive_packet(c, &pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
            if (ret < 0) break;

       

       
                BYTE* renderData;
                renderClient->GetBuffer(pkt.size, &renderData);
                std::cout << "Rendering " << pkt.size << " bytes" << std::endl;
                memcpy(renderData, pkt.data, pkt.size);
                renderClient->ReleaseBuffer(pkt.size, 0);
 
       
            av_packet_unref(&pkt);
        }
    }

    audioClient->Stop();
    renderClient->Release();
    audioClient->Release();
    CoTaskMemFree(&waveFormat);
    CoUninitialize();

    av_frame_free(&frame);

    avcodec_free_context(&c);

    return 0;
}

The updated code:

#include <iostream>
#include <vector>
#include <cmath>

#include <windows.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <ksmedia.h>
#include <propkey.h>
#include <functiondiscoverykeys_devpkey.h>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
}

#define DEFINE_VLC_GUID(name, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8) \
        EXTERN_C const GUID DECLSPEC_SELECTANY name \
                = { l, w1, w2, { b1, b2,  b3,  b4,  b5,  b6,  b7,  b8 } }
#define BIT_TO_BYTE / 8

#define WAVE_FORMAT_DOLBY_AC3_SPDIF     0x0092 /* Sonic Foundry */
static const GUID __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF = { WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} };



// Function to get the default audio endpoint
IMMDevice* GetDefaultAudioEndpoint(EDataFlow dataFlow, ERole role) {
    HRESULT hr;
    IMMDeviceEnumerator* enumerator = nullptr;
    IMMDevice* device = nullptr;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&enumerator);
    if (FAILED(hr)) {
        std::cerr << "Could not create MMDeviceEnumerator." << std::endl;
        return nullptr;
    }

    hr = enumerator->GetDefaultAudioEndpoint(dataFlow, role, &device);
    enumerator->Release();
    if (FAILED(hr)) {
        std::cerr << "Could not get default audio endpoint." << std::endl;
        return nullptr;
    }

    return device;
}


// Function to configure WAVEFORMATEXTENSIBLE for IEC 61937 (S/PDIF)
void configureSpdifWaveFormat(WAVEFORMATEXTENSIBLE* wf, AVSampleFormat input_sample_fmt, int input_sample_rate, int input_channels, GUID subtype) {
    wf->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    wf->Format.nChannels = 2; // Stereo for S/PDIF
    wf->Format.nSamplesPerSec = 48000;
    wf->Format.wBitsPerSample = 16;
    wf->Format.nBlockAlign = wf->Format.wBitsPerSample / 8 * wf->Format.nChannels;
    wf->Format.nAvgBytesPerSec = wf->Format.nSamplesPerSec * wf->Format.nBlockAlign;
    wf->Format.cbSize = sizeof(*wf) - sizeof(wf->Format);
    wf->Samples.wValidBitsPerSample = wf->Format.wBitsPerSample;
    wf->dwChannelMask = KSAUDIO_SPEAKER_5POINT1;
    wf->SubFormat = subtype;
}



int main() {
    // Sine wave parameters
    double frequency = 220.0;
    int sample_rate = 48000;
    AVSampleFormat input_sample_fmt = AV_SAMPLE_FMT_S16;
    int input_channels = 2;

    // Output AC-3 parameters
    int output_sample_rate = 44100;
    AVSampleFormat output_sample_fmt = AV_SAMPLE_FMT_FLTP;
    int output_channels = 6; // 5.1
    
    // Initialize COM and WASAPI
    CoInitialize(nullptr);

    IMMDevice* renderDevice = GetDefaultAudioEndpoint(eRender, eMultimedia);
    IAudioClient* audioClient;
    renderDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    WAVEFORMATEXTENSIBLE waveFormat;
    WAVEFORMATEX* closestMatch = nullptr;
    GUID ac3Subtype = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF; // AC-3 GUID
    configureSpdifWaveFormat(&waveFormat, output_sample_fmt, output_sample_rate, output_channels, ac3Subtype);
    HRESULT hr = audioClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&waveFormat, &closestMatch);

    if (FAILED(hr)) {
        std::cerr << "IAudioClient_IsFormatSupported failed: " << hr << std::endl;

        return 1;
    }

    if (hr == S_FALSE) {
        std::cerr << "Closest matching format found: " << closestMatch->nChannels << std::endl;
        waveFormat = *(WAVEFORMATEXTENSIBLE*)closestMatch;
        CoTaskMemFree(closestMatch);
    }
    audioClient->Start();
    IAudioRenderClient* renderClient;
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 0, 0, 0, (WAVEFORMATEX*)&waveFormat, nullptr);
    if (FAILED(hr)) {
        std::cerr << "audioClient->Initialize failed: " << std::hex << (uint32_t) hr << std::endl;
        return 1;
    }
    hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
    if (FAILED(hr)) {
        std::cerr << "audioClient->GetService failed: " << hr << std::endl;
        audioClient->Release();
        return 1;
    }

    

    // Initialize libavcodec
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AC3);
    AVCodecContext* c = avcodec_alloc_context3(codec);


    // Set output buffer size from IAudioClient::GetBufferSize
    UINT32 bufferFrameCount;
    audioClient->GetBufferSize(&bufferFrameCount);
    UINT32 bufferSizeInBit = bufferFrameCount * waveFormat.Format.nBlockAlign * 8;


    c->bit_rate = bufferSizeInBit;
    c->sample_rate = 44100;
    c->sample_fmt = AV_SAMPLE_FMT_FLTP;

    c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;

    avcodec_open2(c, codec, nullptr);

    AVFrame* frame = av_frame_alloc();
    frame->nb_samples = c->frame_size;
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    av_frame_get_buffer(frame, 0);

    
    std::vector<int16_t> output_buffer(c->frame_size * output_channels);




    AVPacket& pkt = *av_packet_alloc();


    double time = 0.0;
    while (true) {
        for (int i = 0; i < c->frame_size; ++i) {
            double sample = sin(2.0 * M_PI * frequency * time);
            int16_t sample_int = static_cast<int16_t>(sample * 32767.0);

            output_buffer[i * 6 + 0] = sample_int; // Front L
            output_buffer[i * 6 + 1] = sample_int; // Front R
            output_buffer[i * 6 + 2] = 0; // Center
            output_buffer[i * 6 + 3] = 0; // LFE
            output_buffer[i * 6 + 4] = 0; // Rear L
            output_buffer[i * 6 + 5] = 0; // Rear R

            time += 1.0 / sample_rate;
        }
        memcpy(frame->data[0], output_buffer.data(), output_buffer.size() * sizeof(int16_t));

        int ret = avcodec_send_frame(c, frame);
        if (ret < 0) break;

        

        while (ret >= 0) {
            ret = avcodec_receive_packet(c, &pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
            if (ret < 0) break;
       
                BYTE* renderData;
                HRESULT hj = renderClient->GetBuffer(pkt.size, &renderData);
                std::cout << "Result:" << hj << "\n";
                if (hj == AUDCLNT_E_BUFFER_TOO_LARGE) {
                    std::cout << "AUDCLNT_E_BUFFER_TOO_LARGE" << std::endl;
                }
                //std::cout << "Rendering " << pkt.size << " bytes" << std::endl;
                memcpy(renderData, pkt.data, pkt.size);
                renderClient->ReleaseBuffer(pkt.size, 0);
 
       
            av_packet_unref(&pkt);
        }
    }

    audioClient->Stop();
    renderClient->Release();
    audioClient->Release();
    CoTaskMemFree(&waveFormat);
    CoUninitialize();

    av_frame_free(&frame);

    avcodec_free_context(&c);

    return 0;

with the result being:

Result:0
Result:0
Result:0
Result:0
Result:0
Result:0
Result:0
Result:-2004287482
AUDCLNT_E_BUFFER_TOO_LARGE
Share Improve this question edited Mar 30 at 12:55 NprogramDev asked Mar 22 at 20:04 NprogramDevNprogramDev 213 bronze badges 6
  • Is GetBuffer() returning AUDCLNT_E_BUFFER_TOO_LARGE? Instead of letting libav decide the buffer size, I think you should be submitting buffers of size IAudioClient::GetBufferSize(). – Gordon Childs Commented Mar 25 at 11:21
  • Thank you for your answer @GordonChilds! GetBuffer() returns AUDCLNT_E_BUFFER_TOO_LARGE. The Idea to give libav a size to output crossed my mind too, but I didn't know how to get the windows buffer size and still don't know how to convert from frames to something libav can use and configure it for that. – NprogramDev Commented Mar 25 at 20:02
  • You can get the buffer size with audioClient-> GetBufferSize() and then supply up to that many frames and it looks like you could also request a larger buffer size in audioClient->Initialize(). – Gordon Childs Commented Mar 25 at 20:46
  • I changed the configuration for libav a little, but didn't get it to work. It still outputs to large packets. // Set output buffer size from IAudioClient::GetBufferSize UINT32 bufferFrameCount; audioClient->GetBufferSize(&bufferFrameCount); UINT32 bufferSizeInBit = bufferFrameCount /*Number of Frames*/ * waveFormat.Format.nBlockAlign /*Size of a Frame in Byte*/ * 8 /*Bits*/; c->bit_rate = bufferSizeInBit; c->sample_rate = 44100; c->sample_fmt = AV_SAMPLE_FMT_FLTP; c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1; avcodec_open2(c, codec, nullptr); @GordonChilds – NprogramDev Commented Mar 29 at 11:33
  • Can you add that code to your question? Also what device are you outputting to? – Gordon Childs Commented Mar 29 at 20:42
 |  Show 1 more comment

1 Answer 1

Reset to default 0

For me the audio client buffer size (3840 bytes) is bigger than the typical output of avcodec_receive_packet (~1672 bytes) so in this case, when renderClient->GetBuffer() returns AUDCLNT_E_BUFFER_TOO_LARGE it doesn't mean the buffer will always be too big, it just means that it's too big now so you can simply wait a short amount of time and try again. I waited 20ms and that seems to work fine, although I'm short of SPDIF input devices right now, so I don't know how this sounds. If you hear dropouts, you can decrease that amount.

If the situation were reversed and avcodec_receive_packet always returned more data than GetBuffer() could take, then you would need to dole out smaller amounts and adjust your encode / render loop accordingly.

Here is the code - there's some extra error checking now, and client Start() no longer fails because I moved it after client Initialize() .

#include <iostream>
#include <vector>
#include <cmath>

#include <windows.h>
#include <mmdeviceapi.h>
#include <audioclient.h>
#include <ksmedia.h>
#include <propkey.h>
#include <functiondiscoverykeys_devpkey.h>
#include <sstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libavutil/frame.h>
#include <libavutil/mem.h>
}

#include <cassert>

static const GUID __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF = { WAVE_FORMAT_DOLBY_AC3_SPDIF, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} };

// Function to get the default audio endpoint
IMMDevice* GetDefaultAudioEndpoint(EDataFlow dataFlow, ERole role) {
    HRESULT hr;
    IMMDeviceEnumerator* enumerator = nullptr;
    IMMDevice* device = nullptr;

    hr = CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)&enumerator);
    if (FAILED(hr)) {
        std::cerr << "Could not create MMDeviceEnumerator." << std::endl;
        return nullptr;
    }

    hr = enumerator->GetDefaultAudioEndpoint(dataFlow, role, &device);
    enumerator->Release();
    if (FAILED(hr)) {
        std::cerr << "Could not get default audio endpoint." << std::endl;
        return nullptr;
    }

    return device;
}


// Function to configure WAVEFORMATEXTENSIBLE for IEC 61937 (S/PDIF)
void configureSpdifWaveFormat(WAVEFORMATEXTENSIBLE* wf, AVSampleFormat input_sample_fmt, int input_sample_rate, int input_channels, GUID subtype) {
    wf->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
    wf->Format.nChannels = 2; // Stereo for S/PDIF
    wf->Format.nSamplesPerSec = 48000;
    wf->Format.wBitsPerSample = 16;
    wf->Format.nBlockAlign = 6 * 4; // wf->Format.wBitsPerSample / 8 * wf->Format.nChannels

    wf->Format.nAvgBytesPerSec = wf->Format.nSamplesPerSec * wf->Format.nBlockAlign;
    wf->Format.cbSize = sizeof(*wf) - sizeof(wf->Format);
    wf->Samples.wValidBitsPerSample = wf->Format.wBitsPerSample;
    wf->dwChannelMask = KSAUDIO_SPEAKER_5POINT1;
    wf->SubFormat = subtype;
}

int main() {
    // Sine wave parameters
    double frequency = 220.0;
    int sample_rate = 48000;
    AVSampleFormat input_sample_fmt = AV_SAMPLE_FMT_S16;
    int input_channels = 2;

    // Output AC-3 parameters
    int output_sample_rate = 44100;
    AVSampleFormat output_sample_fmt = AV_SAMPLE_FMT_FLTP;
    int output_channels = 6; // 5.1
    int output_bit_rate = 64000;

    // Initialize libavcodec
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AC3);
    assert(codec);
    AVCodecContext* c = avcodec_alloc_context3(codec);
    assert(c);

    c->bit_rate = 384000;
    c->sample_rate = output_sample_rate;
    c->sample_fmt = output_sample_fmt;
    c->ch_layout = AV_CHANNEL_LAYOUT_5POINT1;

    HRESULT hr;
    int res;

    res = avcodec_open2(c, codec, nullptr);
    assert(res == 0);

    AVFrame* frame = av_frame_alloc();
    frame->nb_samples = c->frame_size;
    frame->format = c->sample_fmt;
    frame->ch_layout = c->ch_layout;
    av_frame_get_buffer(frame, 0);

    
    AVPacket& pkt = *av_packet_alloc();

    // Initialize COM and WASAPI
    hr = CoInitialize(nullptr);
    assert(hr == S_OK);

    IMMDevice* renderDevice = GetDefaultAudioEndpoint(eRender, eMultimedia);
    assert(renderDevice);

    IAudioClient* audioClient;
    hr = renderDevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr, (void**)&audioClient);
    assert(hr == S_OK);

    WAVEFORMATEXTENSIBLE waveFormat;
    WAVEFORMATEX* closestMatch = nullptr;
    GUID ac3Subtype = __KSDATAFORMAT_SUBTYPE_DOLBY_AC3_SPDIF; // AC-3 GUID
    configureSpdifWaveFormat(&waveFormat, output_sample_fmt, output_sample_rate, output_channels, ac3Subtype);
    // when ac3 spdif not available it's  AUDCLNT_E_UNSUPPORTED_FORMAT = 0x88890008
    // AUDCLNT_SHAREMODE_SHARED?
    hr = audioClient->IsFormatSupported(AUDCLNT_SHAREMODE_EXCLUSIVE, (WAVEFORMATEX*)&waveFormat, &closestMatch);
    assert(hr == S_OK);

    if (FAILED(hr)) {
        std::cerr << "IAudioClient_IsFormatSupported failed: " << hr << std::endl;

        return 1;
    }

    if (hr == S_FALSE) {
        std::cerr << "Closest matching format found: " << closestMatch->nChannels << std::endl;
        waveFormat = *(WAVEFORMATEXTENSIBLE*)closestMatch;
        CoTaskMemFree(closestMatch);
    }

    IAudioRenderClient* renderClient;
    hr = audioClient->Initialize(AUDCLNT_SHAREMODE_EXCLUSIVE, 0, 0, 0, (WAVEFORMATEX*)&waveFormat, nullptr);
    if (FAILED(hr)) {
        std::cerr << "audioClient->Initialize failed: " << std::hex << (uint32_t) hr << std::endl;
        return 1;
    }

    UINT32 audioClientBufferSizeInFrames;
    hr = audioClient->GetBufferSize(&audioClientBufferSizeInFrames);
    assert(hr == S_OK);
    std::cout << "audioClientBufferSizeInFrames: " << audioClientBufferSizeInFrames << std::endl;
    // if avcodec_receive_packet gives you packets larger than this then you need to rethink the encode / render loop.

    hr = audioClient->GetService(__uuidof(IAudioRenderClient), (void**)&renderClient);
    if (FAILED(hr)) {
        std::cerr << "audioClient->GetService failed: " << hr << std::endl;
        audioClient->Release();
        return 1;
    }

    std::cout << "allocating output buffer of " << c->frame_size << " * " << output_channels << " = " << c->frame_size * output_channels << " bytes" << std::endl;
    std::vector<int16_t> output_buffer(c->frame_size * output_channels);
    
    // NOTE: call this AFTER Initialize() otherwise it returns
    // 0x88890001 - AUDCLNT_E_NOT_INITIALIZED
    hr = audioClient->Start();
    assert(hr == S_OK);

    double time = 0.0;
    while (true) {
        for (int i = 0; i < c->frame_size; ++i) {
            double sample = sin(2.0 * M_PI * frequency * time);
            int16_t sample_int = static_cast<int16_t>(sample * 32767.0);

            output_buffer[i * 6 + 0] = sample_int; // Front L
            output_buffer[i * 6 + 1] = sample_int; // Front R
            output_buffer[i * 6 + 2] = 0; // Center
            output_buffer[i * 6 + 3] = 0; // LFE
            output_buffer[i * 6 + 4] = 0; // Rear L
            output_buffer[i * 6 + 5] = 0; // Rear R

            time += 1.0 / sample_rate;
        }
        memcpy(frame->data[0], output_buffer.data(), output_buffer.size() * sizeof(int16_t));

        int ret = avcodec_send_frame(c, frame);
        // can return AVERROR(EAGAIN), but this flow doesn't need to handle that.

        if (ret < 0) break;

        while (ret >= 0) {
            ret = avcodec_receive_packet(c, &pkt);

            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break;
            if (ret < 0) break;
       
            BYTE* renderData;
            while (1) {
                hr = renderClient->GetBuffer(pkt.size, &renderData);

                if (hr == AUDCLNT_E_BUFFER_TOO_LARGE) { // 0x88890006
                    Sleep(20); // sleep 20ms
                    continue;
                }
                else if (hr == S_OK) {
                    break;
                }
                exit(1);
            }
            memcpy(renderData, pkt.data, pkt.size);
            hr = renderClient->ReleaseBuffer(pkt.size, 0);
            assert(hr == S_OK);

            std::cout << "rendered " << pkt.size << " bytes" << std::endl;

            av_packet_unref(&pkt);
        }
    }

    hr = audioClient->Stop();
    assert(hr == S_OK);

    renderClient->Release();
    audioClient->Release();
    CoTaskMemFree(&waveFormat);
    CoUninitialize();

    av_frame_free(&frame);

    avcodec_free_context(&c);

    return 0;
}

发布者:admin,转转请注明出处:http://www.yc00.com/questions/1744305207a4567695.html

相关推荐

发表回复

评论列表(0条)

  • 暂无评论

联系我们

400-800-8888

在线咨询: QQ交谈

邮件:admin@example.com

工作时间:周一至周五,9:30-18:30,节假日休息

关注微信