Huge update - Fixed memory leak, read description

changed frames to unreference instead of allocate and free on the load frame loop, changed frameConvert to use vCodecCtx data, added reinit and dealt with memory leak from it, adjusted the destructor
This commit is contained in:
2025-10-24 02:09:21 +03:00
parent cf8575a5c4
commit c424ec2c65
2 changed files with 58 additions and 36 deletions

View File

@@ -104,6 +104,13 @@ private:
Flags videoFlags; Flags videoFlags;
uint16_t ID = 0; uint16_t ID = 0;
// Frames
_ffmpeg::AVFrame *frame = _ffmpeg::av_frame_alloc(),
*convertedFrame = _ffmpeg::av_frame_alloc(),
*audioFrame = _ffmpeg::av_frame_alloc(),
*convertedAudioFrame = _ffmpeg::av_frame_alloc();
_ffmpeg::AVPacket *packet = _ffmpeg::av_packet_alloc();
// Time specific // Time specific
uint32_t currentFrameNumber = 0; uint32_t currentFrameNumber = 0;
double timeBase = 0, clock = 0; double timeBase = 0, clock = 0;
@@ -132,8 +139,7 @@ private:
std::pair<double, Containers::Array<char>> loadNextFrame(); std::pair<double, Containers::Array<char>> loadNextFrame();
inline void frameDebug(_ffmpeg::AVFrame *frame); inline void frameDebug(_ffmpeg::AVFrame *frame);
inline void frameSetScaleSAR(_ffmpeg::AVFrame *frame); inline void frameSetScaleSAR(_ffmpeg::AVFrame *frame);
inline void frameConvert(_ffmpeg::AVFrame *sourceFrame, inline void frameConvert();
_ffmpeg::AVFrame *convertedFrame);
inline void frameFlip(_ffmpeg::AVFrame *frame); inline void frameFlip(_ffmpeg::AVFrame *frame);
inline void restartVideo(); inline void restartVideo();
@@ -142,6 +148,8 @@ private:
void loadTexture(Containers::Array<char> data); void loadTexture(Containers::Array<char> data);
void loadTexture(ImageView2D image); void loadTexture(ImageView2D image);
Image2D loadImage(Containers::Array<char> data); Image2D loadImage(Containers::Array<char> data);
void reinitSound();
}; };
inline Video::Flags operator|(Video::Flags x, Video::Flags y) { inline Video::Flags operator|(Video::Flags x, Video::Flags y) {

View File

@@ -11,10 +11,16 @@
#include <Magnum/Image.h> #include <Magnum/Image.h>
#include <Magnum/Math/Functions.h> #include <Magnum/Math/Functions.h>
#include <Magnum/PixelFormat.h> #include <Magnum/PixelFormat.h>
#include <libavcodec/avcodec.h>
#include <libavcodec/packet.h>
#include <libavformat/avformat.h>
#include <libavutil/channel_layout.h> #include <libavutil/channel_layout.h>
#include <libavutil/frame.h>
#include <libavutil/pixfmt.h>
#include <libavutil/rational.h> #include <libavutil/rational.h>
#include <libavutil/samplefmt.h> #include <libavutil/samplefmt.h>
#include <libswresample/swresample.h> #include <libswresample/swresample.h>
#include <libswscale/swscale.h>
#include <map> #include <map>
#include <utility> #include <utility>
@@ -40,6 +46,7 @@ Video::Video(std::string path, ChargeAudio::Engine *engine, Flags videoF,
if (avformat_open_input(&ctx, path.c_str(), NULL, NULL) != 0) { if (avformat_open_input(&ctx, path.c_str(), NULL, NULL) != 0) {
Utility::Error{} << "Could not open file " << path.c_str(); Utility::Error{} << "Could not open file " << path.c_str();
avformat_close_input(&ctx);
avformat_free_context(ctx); avformat_free_context(ctx);
return; return;
} }
@@ -95,13 +102,15 @@ Video::Video(std::string path, ChargeAudio::Engine *engine, Flags videoF,
outLayout = AV_CHANNEL_LAYOUT_MONO; outLayout = AV_CHANNEL_LAYOUT_MONO;
} }
// Resampling
swr_alloc_set_opts2(&swrCtx, &outLayout, sampleFormat, swr_alloc_set_opts2(&swrCtx, &outLayout, sampleFormat,
audioEngine->GetSampleRate(), &aCodecCtx->ch_layout, audioEngine->GetSampleRate(), &aCodecCtx->ch_layout,
aCodecCtx->sample_fmt, aCodecCtx->sample_rate, 0, NULL); aCodecCtx->sample_fmt, aCodecCtx->sample_rate, 0, NULL);
swr_init(swrCtx); swr_init(swrCtx);
// Creating buffered audio
Sound = audioEngine->CreateSound(10); Sound = audioEngine->CreateSound(10);
// Frame init
} }
bufferMaxFrames = av_q2d(videoStream->avg_frame_rate) * bufferLenghtInSeconds; bufferMaxFrames = av_q2d(videoStream->avg_frame_rate) * bufferLenghtInSeconds;
@@ -110,7 +119,16 @@ Video::Video(std::string path, ChargeAudio::Engine *engine, Flags videoF,
Video::~Video() { Video::~Video() {
sws_freeContext(swsCtx); sws_freeContext(swsCtx);
if (audioStreamNum != -1) {
swr_free(&swrCtx); swr_free(&swrCtx);
av_frame_free(&audioFrame);
av_frame_free(&convertedAudioFrame);
}
av_frame_free(&frame);
av_frame_free(&convertedFrame);
av_packet_free(&packet);
avformat_close_input(&ctx);
avformat_free_context(ctx); avformat_free_context(ctx);
avcodec_free_context(&vCodecCtx); avcodec_free_context(&vCodecCtx);
avcodec_free_context(&aCodecCtx); avcodec_free_context(&aCodecCtx);
@@ -124,6 +142,7 @@ void Video::Play() {
return; return;
} }
ID = Manager::hookVideo(std::bind(&Video::continueVideo, this)); ID = Manager::hookVideo(std::bind(&Video::continueVideo, this));
reinitSound();
if (audioStreamNum != -1) { if (audioStreamNum != -1) {
Sound->Play(); Sound->Play();
} }
@@ -135,9 +154,7 @@ void Video::Pause() {
return; return;
} }
Manager::unhookVideo(ID); Manager::unhookVideo(ID);
if (audioStreamNum != -1) { reinitSound();
Sound->Pause();
}
ID = 0; ID = 0;
videoState = State::Paused; videoState = State::Paused;
} }
@@ -200,10 +217,10 @@ void Video::continueVideo() {
// ======================== HELPERS ======================== // ======================== HELPERS ========================
std::pair<double, Containers::Array<char>> Video::loadNextFrame() { std::pair<double, Containers::Array<char>> Video::loadNextFrame() {
AVFrame *frame = av_frame_alloc(), *convertedFrame = av_frame_alloc(), av_frame_unref(convertedFrame);
*audioFrame = av_frame_alloc(), av_frame_unref(convertedAudioFrame);
*convertedAudioFrame = av_frame_alloc(); av_frame_unref(frame);
AVPacket *packet = av_packet_alloc(); av_frame_unref(audioFrame);
// A hard stop if we are out of frames to read // A hard stop if we are out of frames to read
while (av_read_frame(ctx, packet) >= 0) { while (av_read_frame(ctx, packet) >= 0) {
@@ -211,13 +228,14 @@ std::pair<double, Containers::Array<char>> Video::loadNextFrame() {
static_cast<int8_t>(packet->stream_index) == audioStreamNum) { static_cast<int8_t>(packet->stream_index) == audioStreamNum) {
avcodec_send_packet(aCodecCtx, packet); avcodec_send_packet(aCodecCtx, packet);
avcodec_receive_frame(aCodecCtx, audioFrame); avcodec_receive_frame(aCodecCtx, audioFrame);
if (audioFrame->format != -1 && audioEngine) { if (audioFrame->format != -1 && audioEngine) {
convertedAudioFrame->format = sampleFormat; convertedAudioFrame->format = sampleFormat;
convertedAudioFrame->sample_rate = audioEngine->GetSampleRate(); convertedAudioFrame->sample_rate = audioEngine->GetSampleRate();
convertedAudioFrame->ch_layout = outLayout; convertedAudioFrame->ch_layout = outLayout;
convertedAudioFrame->nb_samples = convertedAudioFrame->nb_samples =
swr_get_out_samples(swrCtx, audioFrame->nb_samples); swr_get_out_samples(swrCtx, audioFrame->nb_samples);
av_frame_get_buffer(convertedAudioFrame, 0); av_frame_get_buffer(convertedAudioFrame, 2);
swr_convert_frame(swrCtx, convertedAudioFrame, audioFrame); swr_convert_frame(swrCtx, convertedAudioFrame, audioFrame);
@@ -227,18 +245,19 @@ std::pair<double, Containers::Array<char>> Video::loadNextFrame() {
} }
if (static_cast<int8_t>(packet->stream_index) == videoStreamNum) { if (static_cast<int8_t>(packet->stream_index) == videoStreamNum) {
// Requests a frame from the decoder
avcodec_send_packet(vCodecCtx, packet); avcodec_send_packet(vCodecCtx, packet);
avcodec_receive_frame(vCodecCtx, frame); avcodec_receive_frame(vCodecCtx, frame);
av_packet_unref(packet); av_packet_unref(packet);
if (frame->format != -1) { if (frame->format != AV_PIX_FMT_NONE) {
frameSetScaleSAR(frame); frameSetScaleSAR(frame);
frameFlip(frame); frameFlip(frame);
frameConvert(frame, convertedFrame); frameConvert();
break; break;
} }
} }
// You don't know what you are doing, do not touch this
av_packet_unref(packet); av_packet_unref(packet);
} }
@@ -252,11 +271,6 @@ std::pair<double, Containers::Array<char>> Video::loadNextFrame() {
double ptsInSeconds = timeBase * frame->pts; double ptsInSeconds = timeBase * frame->pts;
// Cleanup time cus this is a C library yay (ironic) // Cleanup time cus this is a C library yay (ironic)
av_frame_free(&convertedFrame);
av_frame_free(&convertedAudioFrame);
av_frame_free(&frame);
av_frame_free(&audioFrame);
av_packet_free(&packet);
return {ptsInSeconds, std::move(data)}; return {ptsInSeconds, std::move(data)};
} }
@@ -313,7 +327,7 @@ void Video::frameFlip(AVFrame *frame) {
frame->linesize[2] = -frame->linesize[2]; frame->linesize[2] = -frame->linesize[2];
} }
void Video::frameConvert(AVFrame *sourceFrame, AVFrame *convertedFrame) { void Video::frameConvert() {
// Converting YUV420p to RGB24 // Converting YUV420p to RGB24
convertedFrame->format = AV_PIX_FMT_RGB24; convertedFrame->format = AV_PIX_FMT_RGB24;
convertedFrame->colorspace = AVCOL_SPC_BT709; convertedFrame->colorspace = AVCOL_SPC_BT709;
@@ -324,20 +338,16 @@ void Video::frameConvert(AVFrame *sourceFrame, AVFrame *convertedFrame) {
3); // Proper way to allocate space for data 3); // Proper way to allocate space for data
if (swsCtx == NULL) { if (swsCtx == NULL) {
swsCtx = sws_getContext(Dimensions.x(), Dimensions.y(), swsCtx = sws_getContext(Dimensions.x(), Dimensions.y(), vCodecCtx->pix_fmt,
static_cast<AVPixelFormat>(sourceFrame->format), Dimensions.x(), Dimensions.y(), AV_PIX_FMT_RGB24,
Dimensions.x(), Dimensions.y(), SWS_BILINEAR, NULL, NULL, NULL);
static_cast<AVPixelFormat>(convertedFrame->format),
SWS_BICUBIC, NULL, NULL, NULL);
} }
// TO DO: DO THIS PROPERLY sws_setColorspaceDetails(swsCtx, sws_getCoefficients(vCodecCtx->colorspace),
sws_setColorspaceDetails(swsCtx, sws_getCoefficients(SWS_CS_ITU709), frame->color_range,
sourceFrame->color_range, sws_getCoefficients(convertedFrame->colorspace),
sws_getCoefficients(SWS_CS_ITU709),
convertedFrame->color_range, 0, 1 << 16, 1 << 16); convertedFrame->color_range, 0, 1 << 16, 1 << 16);
// -----------------------------
sws_scale(swsCtx, sourceFrame->data, sourceFrame->linesize, 0, Dimensions.y(), sws_scale(swsCtx, frame->data, frame->linesize, 0, Dimensions.y(),
convertedFrame->data, convertedFrame->linesize); convertedFrame->data, convertedFrame->linesize);
} }
@@ -373,9 +383,13 @@ void Video::restartVideo() {
void Video::dumpAndRefillBuffer() { void Video::dumpAndRefillBuffer() {
std::map<double, Image2D>().swap(frameBuffer); std::map<double, Image2D>().swap(frameBuffer);
if (audioStreamNum != -1) { reinitSound();
Sound.release();
Sound = audioEngine->CreateSound(10);
}
loadTexture(loadNextFrame().second); loadTexture(loadNextFrame().second);
} }
void Video::reinitSound() {
if (audioStreamNum != -1) {
delete Sound.release();
Sound = std::move(audioEngine->CreateSound(10));
}
}