Gensis Commit
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -46,3 +46,4 @@ CMakeUserPresets.json
|
|||||||
*.out
|
*.out
|
||||||
*.app
|
*.app
|
||||||
|
|
||||||
|
build/
|
||||||
|
64
CMakeLists.txt
Normal file
64
CMakeLists.txt
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.10)
|
||||||
|
|
||||||
|
project(ChargeVideo VERSION 1.0)
|
||||||
|
|
||||||
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
|
set(CMAKE_MODULE_PATH "modules/" ${CMAKE_MODULE_PATH})
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
|
||||||
|
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -fsanitize=address")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_package(Corrade REQUIRED Main)
|
||||||
|
find_package(Magnum REQUIRED GL)
|
||||||
|
find_package(PkgConfig REQUIRED)
|
||||||
|
pkg_check_modules(AVFORMAT REQUIRED libavformat)
|
||||||
|
pkg_check_modules(AVCODEC REQUIRED libavcodec)
|
||||||
|
pkg_check_modules(AVUTIL REQUIRED libavutil)
|
||||||
|
pkg_check_modules(SWSCALE REQUIRED libswscale)
|
||||||
|
pkg_check_modules(SWRESAMPLE REQUIRED libswresample)
|
||||||
|
|
||||||
|
add_library(ChargeVideo SHARED "src/ChargeVideo.hpp" "src/Time.cpp"
|
||||||
|
"src/Video.cpp")
|
||||||
|
|
||||||
|
target_link_libraries(
|
||||||
|
ChargeVideo
|
||||||
|
PRIVATE Corrade::Main
|
||||||
|
Magnum::GL
|
||||||
|
Magnum::Magnum
|
||||||
|
${AVFORMAT_LIBRARIES}
|
||||||
|
${AVCODEC_LIBRARIES}
|
||||||
|
${AVUTIL_LIBRARIES}
|
||||||
|
${SWSCALE_LIBRARIES}
|
||||||
|
${SWRESAMPLE_LIBRARIES})
|
||||||
|
|
||||||
|
target_include_directories(
|
||||||
|
ChargeVideo PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
|
||||||
|
$<INSTALL_INTERFACE:include>)
|
||||||
|
|
||||||
|
# Library
|
||||||
|
install(
|
||||||
|
TARGETS ChargeVideo
|
||||||
|
EXPORT ChargeVideoTargets
|
||||||
|
LIBRARY DESTINATION lib
|
||||||
|
ARCHIVE DESTINATION lib
|
||||||
|
RUNTIME DESTINATION bin)
|
||||||
|
|
||||||
|
# include
|
||||||
|
install(FILES src/ChargeVideo.hpp DESTINATION include/Charge)
|
||||||
|
|
||||||
|
install(
|
||||||
|
EXPORT ChargeVideoTargets
|
||||||
|
FILE ChargeVideoTargets.cmake
|
||||||
|
NAMESPACE ChargeVideo::
|
||||||
|
DESTINATION lib/cmake/ChargeVideo)
|
||||||
|
|
||||||
|
include(CMakePackageConfigHelpers)
|
||||||
|
|
||||||
|
configure_package_config_file(
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/ChargeVideoConfig.cmake.in
|
||||||
|
"${CMAKE_CURRENT_BINARY_DIR}/ChargeVideoConfig.cmake"
|
||||||
|
INSTALL_DESTINATION lib/cmake/ChargeVideo)
|
||||||
|
|
||||||
|
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/ChargeVideoConfig.cmake"
|
||||||
|
DESTINATION lib/cmake/ChargeVideo)
|
6
ChargeVideoConfig.cmake.in
Normal file
6
ChargeVideoConfig.cmake.in
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
@PACKAGE_INIT@
|
||||||
|
|
||||||
|
include("${CMAKE_CURRENT_LIST_DIR}/ChargeVideoTargets.cmake")
|
||||||
|
message(STATUS "Found ChargeVideo!")
|
||||||
|
|
||||||
|
check_required_components(ChargeVideo)
|
125
src/ChargeVideo.hpp
Normal file
125
src/ChargeVideo.hpp
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <libavcodec/avcodec.h>
|
||||||
|
#include <libavcodec/codec.h>
|
||||||
|
#include <libavcodec/packet.h>
|
||||||
|
#include <libavformat/avformat.h>
|
||||||
|
#include <libavutil/avutil.h>
|
||||||
|
#include <libavutil/frame.h>
|
||||||
|
#include <libavutil/imgutils.h>
|
||||||
|
#include <libavutil/pixdesc.h>
|
||||||
|
#include <libavutil/pixfmt.h>
|
||||||
|
#include <libavutil/samplefmt.h>
|
||||||
|
#include <libswresample/swresample.h>
|
||||||
|
#include <libswscale/swscale.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
#include <Corrade/Containers/Array.h>
|
||||||
|
|
||||||
|
#include <Magnum/GL/Texture.h>
|
||||||
|
#include <Magnum/Image.h>
|
||||||
|
#include <Magnum/ImageView.h>
|
||||||
|
#include <Magnum/Magnum.h>
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <functional>
|
||||||
|
#include <queue>
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using namespace Corrade;
|
||||||
|
using namespace Magnum;
|
||||||
|
using namespace Math::Literals;
|
||||||
|
|
||||||
|
namespace ChargeVideo {
|
||||||
|
// ======================== CLASSES ========================
|
||||||
|
class Time {
|
||||||
|
public:
|
||||||
|
static void AdvanceTime();
|
||||||
|
static float DeltaTime;
|
||||||
|
static float AverageDeltaTime;
|
||||||
|
static uint16_t ADTMaxSample;
|
||||||
|
|
||||||
|
private:
|
||||||
|
static Timeline time;
|
||||||
|
static float rollingSum;
|
||||||
|
static uint16_t ADTIndex, videoIDCounter;
|
||||||
|
static bool ADTFirstCycle;
|
||||||
|
static std::unordered_map<uint16_t, std::function<void()>> videoPlayMethods;
|
||||||
|
static std::vector<float> deltaAverage;
|
||||||
|
static std::vector<uint16_t> toUnhook;
|
||||||
|
|
||||||
|
// Specific for internal controls
|
||||||
|
static uint16_t hookVideo(std::function<void()> videoPlay);
|
||||||
|
static void unhookVideo(uint16_t ID);
|
||||||
|
friend class Video; // friend allows other classes to use private methods of a
|
||||||
|
// class without having to make it public for all
|
||||||
|
};
|
||||||
|
|
||||||
|
class Video {
|
||||||
|
public:
|
||||||
|
Video(std::string path, bool ShouldVideoLoop = true,
|
||||||
|
float BufferSizeInSeconds = 1.0f);
|
||||||
|
~Video();
|
||||||
|
|
||||||
|
// Manual Control
|
||||||
|
void AdvanceToNextFrame();
|
||||||
|
|
||||||
|
// Automatic play
|
||||||
|
void Play();
|
||||||
|
void Pause();
|
||||||
|
void StopLooping();
|
||||||
|
void StartLooping();
|
||||||
|
void Restart();
|
||||||
|
|
||||||
|
// Frame and buffer
|
||||||
|
GL::Texture2D CurrentFrame;
|
||||||
|
|
||||||
|
float BufferLenghtInSeconds = 1;
|
||||||
|
bool isVideoLooping = true, isVideoOver = false, isVideoPaused = false;
|
||||||
|
|
||||||
|
// SAR and Scaling
|
||||||
|
Vector2i Dimensions{0, 0};
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Contextes
|
||||||
|
AVFormatContext *ctx;
|
||||||
|
const AVCodec *vCodec;
|
||||||
|
const AVCodec *aCodec;
|
||||||
|
AVCodecContext *vCodecCtx, *aCodecCtx;
|
||||||
|
AVStream *videoStream, *audioStream;
|
||||||
|
struct SwsContext *swsCtx = NULL; // Visual
|
||||||
|
struct SwrContext *swrCtx = NULL; // Audio
|
||||||
|
uint16_t ID = 0;
|
||||||
|
|
||||||
|
// Time specific
|
||||||
|
int8_t videoStreamNum = -1, audioStreamNum = -1;
|
||||||
|
uint32_t currentFrameNumber = 0;
|
||||||
|
float timeSink = 0.0f, frameTime = 0.0f;
|
||||||
|
|
||||||
|
// Buffering
|
||||||
|
std::queue<Image2D> frameBuffer;
|
||||||
|
uint32_t bufferMaxFrames = 0, p = 0, z = 0;
|
||||||
|
|
||||||
|
// SAR / Sizing
|
||||||
|
uint32_t scaleFactor = 1;
|
||||||
|
|
||||||
|
// Frame handling
|
||||||
|
bool frameSet = false;
|
||||||
|
void continueVideo();
|
||||||
|
Containers::Array<char> loadNextFrame();
|
||||||
|
inline void frameDebug(AVFrame *frame);
|
||||||
|
inline void frameSetScaleSAR(AVFrame *frame);
|
||||||
|
inline void frameConvert(AVFrame *sourceFrame, AVFrame *convertedFrame);
|
||||||
|
inline void frameFlip(AVFrame *frame);
|
||||||
|
|
||||||
|
inline void restartVideo();
|
||||||
|
void dumpAndRefillBuffer();
|
||||||
|
|
||||||
|
void loadTexture(Containers::Array<char> data);
|
||||||
|
void loadTexture(ImageView2D image);
|
||||||
|
Image2D loadImage(Containers::Array<char> data);
|
||||||
|
};
|
||||||
|
} // namespace ChargeVideo
|
56
src/Time.cpp
Normal file
56
src/Time.cpp
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
#include "ChargeVideo.hpp"
|
||||||
|
#include <Magnum/Timeline.h>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
using namespace ChargeVideo;
|
||||||
|
|
||||||
|
// ================== Video Timing ==================
|
||||||
|
float Time::DeltaTime = 0.0f, Time::AverageDeltaTime = 0.0f,
|
||||||
|
Time::rollingSum = 0.0f;
|
||||||
|
uint16_t Time::ADTMaxSample = 90, Time::ADTIndex = 0, Time::videoIDCounter = 0;
|
||||||
|
bool Time::ADTFirstCycle = true;
|
||||||
|
std::vector<float> Time::deltaAverage;
|
||||||
|
std::unordered_map<uint16_t, std::function<void()>> Time::videoPlayMethods;
|
||||||
|
std::vector<uint16_t> Time::toUnhook;
|
||||||
|
|
||||||
|
Timeline Time::time{};
|
||||||
|
|
||||||
|
void Time::AdvanceTime() {
|
||||||
|
if (deltaAverage.size() != ADTMaxSample) {
|
||||||
|
deltaAverage.resize(ADTMaxSample, 0.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (time.currentFrameTime() == 0.0f) {
|
||||||
|
time.start();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are giving average delta for frame timing stablisation
|
||||||
|
DeltaTime = time.currentFrameDuration();
|
||||||
|
rollingSum += DeltaTime - deltaAverage[ADTIndex];
|
||||||
|
deltaAverage[ADTIndex] = DeltaTime;
|
||||||
|
|
||||||
|
// First cycle would be ruined if we use MaxSample since not all the slots
|
||||||
|
// would be filled yet
|
||||||
|
if (ADTFirstCycle && ADTIndex == ADTMaxSample - 1) {
|
||||||
|
ADTFirstCycle = false;
|
||||||
|
}
|
||||||
|
AverageDeltaTime = rollingSum / (ADTFirstCycle ? ADTIndex + 1 : ADTMaxSample);
|
||||||
|
ADTIndex = (ADTIndex + 1) % ADTMaxSample;
|
||||||
|
|
||||||
|
for (auto processVideo : videoPlayMethods) {
|
||||||
|
processVideo.second();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint16_t id : toUnhook) {
|
||||||
|
videoPlayMethods.erase(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
time.nextFrame();
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t Time::hookVideo(std::function<void()> videoPlay) {
|
||||||
|
videoPlayMethods.insert({++videoIDCounter, videoPlay});
|
||||||
|
return videoIDCounter;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Time::unhookVideo(uint16_t ID) { toUnhook.push_back(ID); }
|
337
src/Video.cpp
Normal file
337
src/Video.cpp
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
#include "ChargeVideo.hpp"
|
||||||
|
|
||||||
|
#include <Magnum/GL/TextureFormat.h>
|
||||||
|
#include <Magnum/Math/Functions.h>
|
||||||
|
#include <Magnum/PixelFormat.h>
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
using namespace ChargeVideo;
|
||||||
|
|
||||||
|
// ================== Video Construct/Destruct ==================
|
||||||
|
// ShouldVideoLoop default is true
|
||||||
|
Video::Video(std::string path, bool ShouldVideoLoop, float BufferSizeInSeconds)
|
||||||
|
: BufferLenghtInSeconds(BufferSizeInSeconds),
|
||||||
|
isVideoLooping(ShouldVideoLoop) {
|
||||||
|
// Context to hold our data
|
||||||
|
ctx = avformat_alloc_context();
|
||||||
|
if (!ctx) {
|
||||||
|
Utility::Error{} << "Could not allocate space for " << path.c_str();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (avformat_open_input(&ctx, path.c_str(), NULL, NULL) != 0) {
|
||||||
|
Utility::Error{} << "Could not open file " << path.c_str();
|
||||||
|
avformat_free_context(ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uint8_t x = 0; x < ctx->nb_streams; x++) {
|
||||||
|
if (ctx->streams[x]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||||
|
videoStreamNum = x;
|
||||||
|
videoStream = ctx->streams[x];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (ctx->streams[x]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
|
audioStreamNum = x;
|
||||||
|
audioStream = ctx->streams[x];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (videoStreamNum == -1) {
|
||||||
|
Utility::Error{} << "Could not find a video stream! " << path.c_str();
|
||||||
|
avformat_free_context(ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (audioStreamNum == -1) {
|
||||||
|
Utility::Debug{} << "No audio stream was found! Continuing anyway";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Actual stream
|
||||||
|
vCodec = avcodec_find_decoder(videoStream->codecpar->codec_id);
|
||||||
|
vCodecCtx = avcodec_alloc_context3(vCodec);
|
||||||
|
avcodec_parameters_to_context(vCodecCtx, videoStream->codecpar);
|
||||||
|
|
||||||
|
vCodecCtx->thread_count = 0;
|
||||||
|
vCodecCtx->thread_type = FF_THREAD_SLICE;
|
||||||
|
avcodec_open2(vCodecCtx, vCodec,
|
||||||
|
NULL); // open2 is such a stupid name
|
||||||
|
|
||||||
|
// Some videos do not have audio streams
|
||||||
|
if (audioStreamNum != -1) {
|
||||||
|
aCodec = avcodec_find_decoder(audioStream->codecpar->codec_id);
|
||||||
|
aCodecCtx = avcodec_alloc_context3(aCodec);
|
||||||
|
avcodec_parameters_to_context(aCodecCtx, audioStream->codecpar);
|
||||||
|
|
||||||
|
avcodec_open2(aCodecCtx, aCodec, NULL);
|
||||||
|
|
||||||
|
// Hoo boy I love bunch of different ways to do the same thing!!!!!!
|
||||||
|
// Now we have to deal with all of the ways to do that thing!!!!!
|
||||||
|
AVChannelLayout outLayout = AV_CHANNEL_LAYOUT_STEREO;
|
||||||
|
swr_alloc_set_opts2(&swrCtx, &outLayout, AV_SAMPLE_FMT_S16, 44100,
|
||||||
|
&aCodecCtx->ch_layout, aCodecCtx->sample_fmt,
|
||||||
|
aCodecCtx->sample_rate, 0, NULL);
|
||||||
|
swr_init(swrCtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timing stuff
|
||||||
|
frameTime = 1 / av_q2d(videoStream->avg_frame_rate);
|
||||||
|
bufferMaxFrames = (1 / frameTime) * BufferLenghtInSeconds;
|
||||||
|
}
|
||||||
|
|
||||||
|
Video::~Video() {
|
||||||
|
sws_freeContext(swsCtx);
|
||||||
|
swr_free(&swrCtx);
|
||||||
|
avformat_free_context(ctx);
|
||||||
|
avcodec_free_context(&vCodecCtx);
|
||||||
|
avcodec_free_context(&aCodecCtx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ================== Public Video Controls ==================
|
||||||
|
void Video::AdvanceToNextFrame() { loadTexture(loadNextFrame()); }
|
||||||
|
|
||||||
|
void Video::Play() {
|
||||||
|
if (ID != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ID = Time::hookVideo(std::bind(&Video::continueVideo, this));
|
||||||
|
isVideoPaused = false;
|
||||||
|
isVideoOver = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::Pause() {
|
||||||
|
if (ID == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Time::unhookVideo(ID);
|
||||||
|
ID = 0;
|
||||||
|
isVideoPaused = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::Restart() {
|
||||||
|
if (ID == 0) {
|
||||||
|
Play();
|
||||||
|
}
|
||||||
|
restartVideo();
|
||||||
|
dumpAndRefillBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::StopLooping() { isVideoLooping = false; }
|
||||||
|
|
||||||
|
void Video::StartLooping() { isVideoLooping = true; }
|
||||||
|
|
||||||
|
// ================== Private Video Controls ==================
|
||||||
|
void Video::continueVideo() {
|
||||||
|
// Looping handling
|
||||||
|
if (currentFrameNumber >= videoStream->nb_frames - 2) {
|
||||||
|
if (!isVideoLooping) {
|
||||||
|
isVideoOver = true;
|
||||||
|
Pause(); // Here we did that (check comment below)
|
||||||
|
return; // We remove what we are returning TO
|
||||||
|
}
|
||||||
|
Utility::Debug{} << "Audio" << p << "Video" << z;
|
||||||
|
restartVideo();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timing
|
||||||
|
float variableFrameTime = frameTime - Time::AverageDeltaTime;
|
||||||
|
if (timeSink < variableFrameTime) {
|
||||||
|
timeSink += Time::DeltaTime;
|
||||||
|
|
||||||
|
if (!isVideoOver && frameBuffer.size() < bufferMaxFrames) {
|
||||||
|
frameBuffer.push(loadImage(loadNextFrame()));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// This allows the lag to not accumillate
|
||||||
|
timeSink -= variableFrameTime;
|
||||||
|
|
||||||
|
if (frameBuffer.size() == 0) {
|
||||||
|
frameBuffer.push(loadImage(loadNextFrame()));
|
||||||
|
}
|
||||||
|
|
||||||
|
loadTexture(frameBuffer.front());
|
||||||
|
frameBuffer.pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================== HELPERS ========================
|
||||||
|
Containers::Array<char> Video::loadNextFrame() {
|
||||||
|
AVFrame *frame = av_frame_alloc(), *convertedFrame = av_frame_alloc(),
|
||||||
|
*audioFrame = av_frame_alloc(),
|
||||||
|
*convertedAudioFrame = av_frame_alloc();
|
||||||
|
AVPacket *packet = av_packet_alloc();
|
||||||
|
|
||||||
|
// A hard stop if we are out of frames to read
|
||||||
|
while (av_read_frame(ctx, packet) >= 0) {
|
||||||
|
if (static_cast<int8_t>(packet->stream_index) == audioStreamNum) {
|
||||||
|
avcodec_send_packet(aCodecCtx, packet);
|
||||||
|
avcodec_receive_frame(aCodecCtx, audioFrame);
|
||||||
|
if (audioFrame->format != -1) {
|
||||||
|
convertedAudioFrame->format = AV_SAMPLE_FMT_S16;
|
||||||
|
convertedAudioFrame->nb_samples =
|
||||||
|
swr_get_out_samples(swrCtx, audioFrame->nb_samples);
|
||||||
|
convertedAudioFrame->ch_layout = AV_CHANNEL_LAYOUT_STEREO;
|
||||||
|
av_frame_get_buffer(convertedAudioFrame,
|
||||||
|
2); // since it is LRLRLRLRLRLRLR
|
||||||
|
|
||||||
|
swr_convert(swrCtx, convertedAudioFrame->data,
|
||||||
|
convertedAudioFrame->nb_samples, audioFrame->data,
|
||||||
|
audioFrame->nb_samples);
|
||||||
|
|
||||||
|
p++;
|
||||||
|
Utility::Debug{} << "Loaded an audio frame";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (static_cast<int8_t>(packet->stream_index) == videoStreamNum) {
|
||||||
|
// Requests a frame from the decoder
|
||||||
|
avcodec_send_packet(vCodecCtx, packet);
|
||||||
|
avcodec_receive_frame(vCodecCtx, frame);
|
||||||
|
av_packet_unref(packet);
|
||||||
|
|
||||||
|
if (frame->format != -1) {
|
||||||
|
// FrameDebug(frame);
|
||||||
|
frameSetScaleSAR(frame);
|
||||||
|
frameFlip(frame);
|
||||||
|
|
||||||
|
frameConvert(frame, convertedFrame);
|
||||||
|
// FrameDebug(convertedFrame);
|
||||||
|
z++;
|
||||||
|
Utility::Debug{} << "Loaded a video frame";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
av_packet_unref(packet);
|
||||||
|
}
|
||||||
|
Utility::Debug{} << "Finished Load";
|
||||||
|
// You cannot use strlen(data) it does not work
|
||||||
|
size_t dataSize = av_image_get_buffer_size(
|
||||||
|
static_cast<AVPixelFormat>(convertedFrame->format), Dimensions.x(),
|
||||||
|
Dimensions.y(), 3);
|
||||||
|
Containers::Array<char> data = Containers::Array<char>{NoInit, dataSize};
|
||||||
|
std::memcpy(data.data(), convertedFrame->data[0], dataSize);
|
||||||
|
currentFrameNumber++;
|
||||||
|
|
||||||
|
// Cleanup time cus this is a C library yay (ironic)
|
||||||
|
av_frame_free(
|
||||||
|
&convertedFrame); // Data[0] from here needs to be owned by someone else
|
||||||
|
av_frame_free(&convertedAudioFrame);
|
||||||
|
av_frame_free(&frame);
|
||||||
|
av_frame_free(&audioFrame);
|
||||||
|
av_packet_free(&packet);
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
Image2D Video::loadImage(Containers::Array<char> data) {
|
||||||
|
Image2D image{PixelFormat::RGB8Unorm, Dimensions, std::move(data)};
|
||||||
|
return image;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::loadTexture(Containers::Array<char> data) {
|
||||||
|
ImageView2D image{PixelFormat::RGB8Unorm, Dimensions, data};
|
||||||
|
loadTexture(image);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::loadTexture(ImageView2D image) {
|
||||||
|
if (!frameSet) {
|
||||||
|
CurrentFrame.setWrapping(GL::SamplerWrapping::ClampToEdge)
|
||||||
|
.setMagnificationFilter(GL::SamplerFilter::Nearest)
|
||||||
|
.setMinificationFilter(GL::SamplerFilter::Nearest)
|
||||||
|
.setStorage(1, GL::textureFormat(image.format()), image.size());
|
||||||
|
frameSet = true;
|
||||||
|
}
|
||||||
|
CurrentFrame.setSubImage(0, {}, image).generateMipmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ======================== INLINES ========================
|
||||||
|
void Video::frameDebug(AVFrame *frame) {
|
||||||
|
Utility::Debug{} << "Frame" << currentFrameNumber << "/"
|
||||||
|
<< videoStream->nb_frames - 2
|
||||||
|
<< "codec:" << avcodec_get_name(vCodecCtx->codec_id)
|
||||||
|
<< "colourspace:"
|
||||||
|
<< av_get_pix_fmt_name(
|
||||||
|
static_cast<AVPixelFormat>(frame->format))
|
||||||
|
<< "SAR:" << frame->sample_aspect_ratio.num << ":"
|
||||||
|
<< frame->sample_aspect_ratio.den
|
||||||
|
<< "strides:" << frame->linesize[0] << frame->linesize[1]
|
||||||
|
<< frame->linesize[2] << frame->linesize[3]
|
||||||
|
<< "Ratio:" << frame->width << "x" << frame->height;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::frameFlip(AVFrame *frame) {
|
||||||
|
// Thank you so much to
|
||||||
|
// https://ffmpeg-user.ffmpeg.narkive.com/t6y9mIOC/flip-in-sws-scale#post10
|
||||||
|
//
|
||||||
|
// Flips image 180 deg due to origin points of YUV420p and RGB24 being
|
||||||
|
// different cus of course it is.
|
||||||
|
// I had to figure out that U and V channels also need to be flipped but
|
||||||
|
// we know that U and V are half of the size of Y so height/2
|
||||||
|
frame->data[0] += frame->linesize[0] * (frame->height - 1);
|
||||||
|
frame->data[1] += frame->linesize[1] * (frame->height / 2 - 1);
|
||||||
|
frame->data[2] += frame->linesize[2] * (frame->height / 2 - 1);
|
||||||
|
frame->linesize[0] = -frame->linesize[0];
|
||||||
|
frame->linesize[1] = -frame->linesize[1];
|
||||||
|
frame->linesize[2] = -frame->linesize[2];
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::frameConvert(AVFrame *sourceFrame, AVFrame *convertedFrame) {
|
||||||
|
// Converting YUV420p to RGB24
|
||||||
|
convertedFrame->format = AV_PIX_FMT_RGB24;
|
||||||
|
convertedFrame->colorspace = AVCOL_SPC_BT709;
|
||||||
|
convertedFrame->color_range = AVCOL_RANGE_JPEG;
|
||||||
|
convertedFrame->width = Dimensions.x();
|
||||||
|
convertedFrame->height = Dimensions.y();
|
||||||
|
av_frame_get_buffer(convertedFrame,
|
||||||
|
3); // Proper way to allocate space for data
|
||||||
|
|
||||||
|
if (swsCtx == NULL) {
|
||||||
|
swsCtx = sws_getContext(Dimensions.x(), Dimensions.y(),
|
||||||
|
static_cast<AVPixelFormat>(sourceFrame->format),
|
||||||
|
Dimensions.x(), Dimensions.y(),
|
||||||
|
static_cast<AVPixelFormat>(convertedFrame->format),
|
||||||
|
SWS_BICUBIC, NULL, NULL, NULL);
|
||||||
|
}
|
||||||
|
// TO DO: DO THIS PROPERLY
|
||||||
|
sws_setColorspaceDetails(swsCtx, sws_getCoefficients(SWS_CS_ITU709),
|
||||||
|
sourceFrame->color_range,
|
||||||
|
sws_getCoefficients(SWS_CS_ITU709),
|
||||||
|
convertedFrame->color_range, 0, 1 << 16, 1 << 16);
|
||||||
|
// -----------------------------
|
||||||
|
|
||||||
|
sws_scale(swsCtx, sourceFrame->data, sourceFrame->linesize, 0, Dimensions.y(),
|
||||||
|
convertedFrame->data, convertedFrame->linesize);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::frameSetScaleSAR(AVFrame *frame) {
|
||||||
|
// SAR calculations
|
||||||
|
if (Dimensions.x() == 0) {
|
||||||
|
Dimensions.x() = frame->width;
|
||||||
|
Dimensions.y() = frame->height;
|
||||||
|
if (vCodecCtx->sample_aspect_ratio.num != 0) {
|
||||||
|
AVRational SAR = vCodecCtx->sample_aspect_ratio, DAR;
|
||||||
|
av_reduce(&DAR.num, &DAR.den, SAR.num * Dimensions.x(),
|
||||||
|
SAR.den * Dimensions.y(), INT64_MAX);
|
||||||
|
// Just to let the programmer know we have scaling happening due to
|
||||||
|
// SAR
|
||||||
|
scaleFactor = Math::min(Math::floor(Dimensions.x() / DAR.num),
|
||||||
|
Math::floor(Dimensions.y() / DAR.den));
|
||||||
|
Dimensions.x() = DAR.num * scaleFactor;
|
||||||
|
Dimensions.y() = DAR.den * scaleFactor;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::restartVideo() {
|
||||||
|
av_seek_frame(ctx, videoStreamNum, 0, AVSEEK_FLAG_BACKWARD);
|
||||||
|
avcodec_flush_buffers(vCodecCtx);
|
||||||
|
avcodec_flush_buffers(aCodecCtx);
|
||||||
|
currentFrameNumber = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Video::dumpAndRefillBuffer() {
|
||||||
|
std::queue<Image2D>().swap(frameBuffer);
|
||||||
|
loadTexture(loadNextFrame());
|
||||||
|
}
|
Reference in New Issue
Block a user