34 #include "../include/FFmpegReader.h"
36 #define ENABLE_VAAPI 0
39 #pragma message "You are compiling with experimental hardware decode"
41 #pragma message "You are compiling only with software decode"
45 #define MAX_SUPPORTED_WIDTH 1950
46 #define MAX_SUPPORTED_HEIGHT 1100
49 #include "libavutil/hwcontext_vaapi.h"
51 typedef struct VAAPIDecodeContext {
53 VAEntrypoint va_entrypoint;
55 VAContextID va_context;
57 #if FF_API_STRUCT_VAAPI_CONTEXT
60 struct vaapi_context *old_context;
61 AVBufferRef *device_ref;
65 AVHWDeviceContext *device;
66 AVVAAPIDeviceContext *hwctx;
68 AVHWFramesContext *frames;
69 AVVAAPIFramesContext *hwfc;
71 enum AVPixelFormat surface_format;
82 AVPixelFormat hw_de_av_pix_fmt_global = AV_PIX_FMT_NONE;
83 AVHWDeviceType hw_de_av_device_type_global = AV_HWDEVICE_TYPE_NONE;
87 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
88 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
89 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
90 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
91 current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
109 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
110 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
111 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
112 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
113 current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
126 if (inspect_reader) {
148 if (abs(diff) <= amount)
159 static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx,
const enum AVPixelFormat *pix_fmts)
161 const enum AVPixelFormat *p;
163 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
165 #if defined(__linux__)
167 case AV_PIX_FMT_VAAPI:
168 hw_de_av_pix_fmt_global = AV_PIX_FMT_VAAPI;
169 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VAAPI;
172 case AV_PIX_FMT_VDPAU:
173 hw_de_av_pix_fmt_global = AV_PIX_FMT_VDPAU;
174 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VDPAU;
180 case AV_PIX_FMT_DXVA2_VLD:
181 hw_de_av_pix_fmt_global = AV_PIX_FMT_DXVA2_VLD;
182 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_DXVA2;
185 case AV_PIX_FMT_D3D11:
186 hw_de_av_pix_fmt_global = AV_PIX_FMT_D3D11;
187 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_D3D11VA;
191 #if defined(__APPLE__)
193 case AV_PIX_FMT_VIDEOTOOLBOX:
194 hw_de_av_pix_fmt_global = AV_PIX_FMT_VIDEOTOOLBOX;
195 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
200 case AV_PIX_FMT_CUDA:
201 hw_de_av_pix_fmt_global = AV_PIX_FMT_CUDA;
202 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_CUDA;
206 hw_de_av_pix_fmt_global = AV_PIX_FMT_QSV;
207 hw_de_av_device_type_global = AV_HWDEVICE_TYPE_QSV;
216 return AV_PIX_FMT_NONE;
219 int FFmpegReader::IsHardwareDecodeSupported(
int codecid)
223 case AV_CODEC_ID_H264:
224 case AV_CODEC_ID_MPEG2VIDEO:
225 case AV_CODEC_ID_VC1:
226 case AV_CODEC_ID_WMV1:
227 case AV_CODEC_ID_WMV2:
228 case AV_CODEC_ID_WMV3:
249 if (avformat_open_input(&pFormatCtx, path.c_str(), NULL, NULL) != 0)
250 throw InvalidFile(
"File could not be opened.", path);
253 if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
259 for (
unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
261 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
265 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
269 if (videoStream == -1 && audioStream == -1)
270 throw NoStreamsFound(
"No video or audio streams found in this file.", path);
273 if (videoStream != -1) {
278 pStream = pFormatCtx->streams[videoStream];
284 AVCodec *pCodec = avcodec_find_decoder(codecId);
285 AVDictionary *opts = NULL;
286 int retry_decode_open = 2;
291 if (
hw_de_on && (retry_decode_open==2)) {
293 hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
296 retry_decode_open = 0;
301 if (pCodec == NULL) {
302 throw InvalidCodec(
"A valid video codec could not be found for this file.", path);
306 av_dict_set(&opts,
"strict",
"experimental", 0);
310 int i_decoder_hw = 0;
312 char *adapter_ptr = NULL;
315 fprintf(stderr,
"Hardware decoding device number: %d\n", adapter_num);
318 pCodecCtx->get_format = get_hw_dec_format;
320 if (adapter_num < 3 && adapter_num >=0) {
321 #if defined(__linux__)
322 snprintf(adapter,
sizeof(adapter),
"/dev/dri/renderD%d", adapter_num+128);
323 adapter_ptr = adapter;
325 switch (i_decoder_hw) {
327 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
330 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
333 hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
336 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
339 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
343 #elif defined(_WIN32)
346 switch (i_decoder_hw) {
348 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
351 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
354 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
357 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
360 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
363 #elif defined(__APPLE__)
366 switch (i_decoder_hw) {
368 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
371 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
374 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
384 #if defined(__linux__)
385 if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
386 #elif defined(_WIN32)
387 if( adapter_ptr != NULL ) {
388 #elif defined(__APPLE__)
389 if( adapter_ptr != NULL ) {
398 hw_device_ctx = NULL;
400 if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
401 if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
402 throw InvalidCodec(
"Hardware device reference create failed.", path);
433 throw InvalidCodec(
"Hardware device create failed.", path);
439 if (avcodec_open2(pCodecCtx, pCodec, &opts) < 0)
440 throw InvalidCodec(
"A video codec was found, but could not be opened.", path);
444 AVHWFramesConstraints *constraints = NULL;
445 void *hwconfig = NULL;
446 hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
450 ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
451 constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
454 if (pCodecCtx->coded_width < constraints->min_width ||
455 pCodecCtx->coded_height < constraints->min_height ||
456 pCodecCtx->coded_width > constraints->max_width ||
457 pCodecCtx->coded_height > constraints->max_height) {
460 retry_decode_open = 1;
463 av_buffer_unref(&hw_device_ctx);
464 hw_device_ctx = NULL;
469 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Min width :", constraints->min_width,
"Min Height :", constraints->min_height,
"MaxWidth :", constraints->max_width,
"MaxHeight :", constraints->max_height,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
470 retry_decode_open = 0;
472 av_hwframe_constraints_free(&constraints);
485 if (pCodecCtx->coded_width < 0 ||
486 pCodecCtx->coded_height < 0 ||
487 pCodecCtx->coded_width > max_w ||
488 pCodecCtx->coded_height > max_h ) {
489 ZmqLogger::Instance()->
AppendDebugMethod(
"DIMENSIONS ARE TOO LARGE for hardware acceleration\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
491 retry_decode_open = 1;
494 av_buffer_unref(&hw_device_ctx);
495 hw_device_ctx = NULL;
499 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
500 retry_decode_open = 0;
508 retry_decode_open = 0;
510 }
while (retry_decode_open);
519 if (audioStream != -1) {
524 aStream = pFormatCtx->streams[audioStream];
530 AVCodec *aCodec = avcodec_find_decoder(codecId);
536 if (aCodec == NULL) {
537 throw InvalidCodec(
"A valid audio codec could not be found for this file.", path);
541 AVDictionary *opts = NULL;
542 av_dict_set(&opts,
"strict",
"experimental", 0);
545 if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
546 throw InvalidCodec(
"An audio codec was found, but could not be opened.", path);
556 AVDictionaryEntry *tag = NULL;
557 while ((tag = av_dict_get(pFormatCtx->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
558 QString str_key = tag->key;
559 QString str_value = tag->value;
560 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
564 previous_packet_location.
frame = -1;
587 RemoveAVPacket(packet);
593 avcodec_flush_buffers(pCodecCtx);
598 av_buffer_unref(&hw_device_ctx);
599 hw_device_ctx = NULL;
605 avcodec_flush_buffers(aCodecCtx);
611 working_cache.
Clear();
612 missing_frames.
Clear();
617 processed_video_frames.clear();
618 processed_audio_frames.clear();
619 processing_video_frames.clear();
620 processing_audio_frames.clear();
621 missing_audio_frames.clear();
622 missing_video_frames.clear();
623 missing_audio_frames_source.clear();
624 missing_video_frames_source.clear();
625 checked_frames.clear();
629 avformat_close_input(&pFormatCtx);
630 av_freep(&pFormatCtx);
634 largest_frame_processed = 0;
635 seek_audio_frame_found = 0;
636 seek_video_frame_found = 0;
637 current_video_frame = 0;
638 has_missing_frames =
false;
640 last_video_frame.reset();
644 void FFmpegReader::UpdateAudioInfo() {
647 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
661 if (aStream->duration > 0.0f && aStream->duration >
info.
duration)
688 AVDictionaryEntry *tag = NULL;
689 while ((tag = av_dict_get(aStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
690 QString str_key = tag->key;
691 QString str_value = tag->value;
692 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
696 void FFmpegReader::UpdateVideoInfo() {
703 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
713 if (pStream->sample_aspect_ratio.num != 0) {
736 if (!check_interlace) {
737 check_interlace =
true;
739 switch(field_order) {
740 case AV_FIELD_PROGRESSIVE:
753 case AV_FIELD_UNKNOWN:
755 check_interlace =
false;
784 is_duration_known =
false;
787 is_duration_known =
true;
801 AVDictionaryEntry *tag = NULL;
802 while ((tag = av_dict_get(pStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
803 QString str_key = tag->key;
804 QString str_value = tag->value;
805 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
813 throw ReaderClosed(
"The FFmpegReader is closed. Call Open() before calling this method.", path);
816 if (requested_frame < 1)
822 throw InvalidFile(
"Could not detect the duration of the video or audio stream.", path);
836 #pragma omp critical (ReadStream)
851 if (last_frame == 0 && requested_frame != 1)
856 int64_t diff = requested_frame - last_frame;
857 if (diff >= 1 && diff <= 20) {
859 frame = ReadStream(requested_frame);
864 Seek(requested_frame);
873 frame = ReadStream(requested_frame);
882 std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
884 bool end_of_stream =
false;
885 bool check_seek =
false;
886 bool frame_finished =
false;
887 int packet_error = -1;
890 int packets_processed = 0;
892 int max_packets = 4096;
897 omp_set_nested(
true);
909 packet_error = GetNextPacket();
911 int processing_video_frames_size = 0;
912 int processing_audio_frames_size = 0;
915 processing_video_frames_size = processing_video_frames.size();
916 processing_audio_frames_size = processing_audio_frames.size();
920 while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
923 processing_video_frames_size = processing_video_frames.size();
924 processing_audio_frames_size = processing_audio_frames.size();
928 if (packet_error < 0) {
930 end_of_stream =
true;
935 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (GetNextPacket)",
"requested_frame", requested_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"minimum_packets", minimum_packets,
"packets_processed", packets_processed,
"is_seeking", is_seeking);
940 num_packets_since_video_frame = 0;
944 #pragma omp critical (openshot_seek)
945 check_seek = CheckSeek(
true);
960 frame_finished = GetAVFrame();
963 if (frame_finished) {
965 UpdatePTSOffset(
true);
968 ProcessVideoPacket(requested_frame);
979 else if (
info.
has_audio && packet->stream_index == audioStream) {
981 num_packets_since_video_frame++;
985 #pragma omp critical (openshot_seek)
986 check_seek = CheckSeek(
false);
1001 UpdatePTSOffset(
false);
1013 CheckWorkingFrames(
false, requested_frame);
1020 packets_processed++;
1023 if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets)
1033 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (Completed)",
"packets_processed", packets_processed,
"end_of_stream", end_of_stream,
"largest_frame_processed", largest_frame_processed,
"Working Cache Count", working_cache.
Count());
1038 CheckWorkingFrames(end_of_stream, requested_frame);
1054 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1063 int FFmpegReader::GetNextPacket() {
1064 int found_packet = 0;
1065 AVPacket *next_packet;
1066 #pragma omp critical(getnextpacket)
1068 next_packet =
new AVPacket();
1069 found_packet = av_read_frame(pFormatCtx, next_packet);
1074 RemoveAVPacket(packet);
1078 if (found_packet >= 0) {
1080 packet = next_packet;
1086 return found_packet;
1090 bool FFmpegReader::GetAVFrame() {
1091 int frameFinished = -1;
1096 #pragma omp critical (packet_cache)
1101 ret = avcodec_send_packet(pCodecCtx, packet);
1105 hw_de_av_pix_fmt = hw_de_av_pix_fmt_global;
1106 hw_de_av_device_type = hw_de_av_device_type_global;
1108 if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1112 AVFrame *next_frame2;
1120 next_frame2 = next_frame;
1124 ret = avcodec_receive_frame(pCodecCtx, next_frame2);
1125 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1134 if (next_frame2->format == hw_de_av_pix_fmt) {
1135 next_frame->format = AV_PIX_FMT_YUV420P;
1136 if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
1139 if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
1147 next_frame = next_frame2;
1152 if (frameFinished == 0 ) {
1154 av_image_alloc(pFrame->data, pFrame->linesize,
info.
width,
info.
height, (AVPixelFormat)(pStream->codecpar->format), 1);
1155 av_image_copy(pFrame->data, pFrame->linesize, (
const uint8_t**)next_frame->data, next_frame->linesize,
1166 avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
1172 if (frameFinished) {
1176 av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt,
info.
width,
1186 return frameFinished;
1190 bool FFmpegReader::CheckSeek(
bool is_video) {
1195 if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
1203 int64_t max_seeked_frame = seek_audio_frame_found;
1204 if (seek_video_frame_found > max_seeked_frame)
1205 max_seeked_frame = seek_video_frame_found;
1208 if (max_seeked_frame >= seeking_frame) {
1210 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Too far, seek again)",
"is_video_seek", is_video_seek,
"max_seeked_frame", max_seeked_frame,
"seeking_frame", seeking_frame,
"seeking_pts", seeking_pts,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1213 Seek(seeking_frame - (10 * seek_count * seek_count));
1216 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Successful)",
"is_video_seek", is_video_seek,
"current_pts", packet->pts,
"seeking_pts", seeking_pts,
"seeking_frame", seeking_frame,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1230 void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
1232 int64_t current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
1235 if (!seek_video_frame_found && is_seeking)
1236 seek_video_frame_found = current_frame;
1239 if ((current_frame < (requested_frame - 20)) or (current_frame == -1)) {
1241 RemoveAVFrame(pFrame);
1258 AVFrame *my_frame = pFrame;
1263 processing_video_frames[current_frame] = current_frame;
1265 #pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt)
1268 AVFrame *pFrameRGB = NULL;
1270 uint8_t *buffer = NULL;
1274 if (pFrameRGB == NULL)
1275 throw OutOfBoundsFrame(
"Convert Image Broke!", current_frame, video_length);
1286 if (max_height <= 0)
1293 float max_scale_x =
parent->scale_x.GetMaxPoint().co.Y;
1294 float max_scale_y =
parent->scale_y.GetMaxPoint().co.Y;
1295 max_width = std::max(
float(max_width), max_width * max_scale_x);
1296 max_height = std::max(
float(max_height), max_height * max_scale_y);
1300 float max_scale_x =
parent->scale_x.GetMaxPoint().co.Y;
1301 float max_scale_y =
parent->scale_y.GetMaxPoint().co.Y;
1302 QSize width_size(max_width * max_scale_x,
1305 max_height * max_scale_y);
1307 if (width_size.width() >= max_width && width_size.height() >= max_height) {
1308 max_width = std::max(max_width, width_size.width());
1309 max_height = std::max(max_height, width_size.height());
1311 max_width = std::max(max_width, height_size.width());
1312 max_height = std::max(max_height, height_size.height());
1323 int original_height = height;
1324 if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
1326 float ratio = float(width) / float(height);
1327 int possible_width = round(max_height * ratio);
1328 int possible_height = round(max_width / ratio);
1330 if (possible_width <= max_width) {
1332 width = possible_width;
1333 height = max_height;
1337 height = possible_height;
1344 #pragma omp critical (video_buffer)
1345 buffer = (uint8_t *) av_malloc(numBytes *
sizeof(uint8_t));
1350 int scale_mode = SWS_FAST_BILINEAR;
1352 scale_mode = SWS_BICUBIC;
1358 sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
1359 original_height, pFrameRGB->data, pFrameRGB->linesize);
1362 std::shared_ptr<Frame> f = CreateFrame(current_frame);
1365 f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer);
1368 working_cache.
Add(f);
1371 #pragma omp critical (video_buffer)
1372 last_video_frame = f;
1379 RemoveAVFrame(my_frame);
1380 sws_freeContext(img_convert_ctx);
1385 processing_video_frames.erase(current_frame);
1386 processed_video_frames[current_frame] = current_frame;
1390 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessVideoPacket (After)",
"requested_frame", requested_frame,
"current_frame", current_frame,
"f->number", f->number);
1397 void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_frame,
int starting_sample) {
1399 if (!seek_audio_frame_found && is_seeking)
1400 seek_audio_frame_found = target_frame;
1403 if (target_frame < (requested_frame - 20)) {
1405 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Skipped)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1412 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Before)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1415 int frame_finished = 0;
1419 int packet_samples = 0;
1424 #pragma omp critical (ProcessAudioPacket)
1429 while((packet->size > 0 || (!packet->data && frame_finished)) && ret >= 0) {
1431 ret = avcodec_send_packet(aCodecCtx, packet);
1432 if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
1433 avcodec_send_packet(aCodecCtx, NULL);
1438 ret = avcodec_receive_frame(aCodecCtx, audio_frame);
1441 if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
1442 avcodec_flush_buffers(aCodecCtx);
1446 ret = frame_finished;
1449 if (!packet->data && !frame_finished)
1454 int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
1458 if (frame_finished) {
1462 int plane_size = -1;
1463 data_size = av_samples_get_buffer_size(&plane_size,
1465 audio_frame->nb_samples,
1473 int pts_remaining_samples = packet_samples /
info.
channels;
1476 int64_t adjusted_pts = packet->pts + audio_pts_offset;
1481 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info A)",
"pts_counter", pts_counter,
"PTS", adjusted_pts,
"Offset", audio_pts_offset,
"PTS Diff", adjusted_pts - prev_pts,
"Samples", pts_remaining_samples,
"Sample PTS ratio",
float(adjusted_pts - prev_pts) / pts_remaining_samples);
1482 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info B)",
"Sample Diff", pts_remaining_samples - prev_samples - prev_pts,
"Total", pts_total,
"PTS Seconds", audio_seconds,
"Sample Seconds", sample_seconds,
"Seconds Diff", audio_seconds - sample_seconds,
"raw samples", packet_samples);
1485 prev_pts = adjusted_pts;
1486 pts_total += pts_remaining_samples;
1488 prev_samples = pts_remaining_samples;
1493 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1496 while (pts_remaining_samples) {
1501 int samples = samples_per_frame - previous_packet_location.
sample_start;
1502 if (samples > pts_remaining_samples)
1503 samples = pts_remaining_samples;
1506 pts_remaining_samples -= samples;
1508 if (pts_remaining_samples > 0) {
1510 previous_packet_location.
frame++;
1516 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1529 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (ReSample)",
"packet_samples", packet_samples,
"info.channels",
info.
channels,
"info.sample_rate",
info.
sample_rate,
"aCodecCtx->sample_fmt",
AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx),
"AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16);
1534 audio_converted->nb_samples = audio_frame->nb_samples;
1535 av_samples_alloc(audio_converted->data, audio_converted->linesize,
info.
channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
1545 av_opt_set_int(avr,
"out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
1554 audio_converted->data,
1555 audio_converted->linesize[0],
1556 audio_converted->nb_samples,
1558 audio_frame->linesize[0],
1559 audio_frame->nb_samples);
1562 memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *
info.
channels);
1570 av_free(audio_converted->data[0]);
1573 int64_t starting_frame_number = -1;
1574 bool partial_frame =
true;
1575 for (
int channel_filter = 0; channel_filter <
info.
channels; channel_filter++) {
1577 starting_frame_number = target_frame;
1578 int channel_buffer_size = packet_samples /
info.
channels;
1579 float *channel_buffer =
new float[channel_buffer_size];
1582 for (
int z = 0; z < channel_buffer_size; z++)
1583 channel_buffer[z] = 0.0f;
1589 for (
int sample = 0; sample < packet_samples; sample++) {
1591 if (channel_filter == channel) {
1593 channel_buffer[position] = audio_buf[sample] * (1.0f / (1 << 15));
1609 int start = starting_sample;
1610 int remaining_samples = channel_buffer_size;
1611 float *iterate_channel_buffer = channel_buffer;
1612 while (remaining_samples > 0) {
1617 int samples = samples_per_frame - start;
1618 if (samples > remaining_samples)
1619 samples = remaining_samples;
1622 std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
1625 if (samples_per_frame == start + samples)
1626 partial_frame =
false;
1628 partial_frame =
true;
1632 f->AddAudio(
true, channel_filter, start, iterate_channel_buffer, samples, 0.98f);
1635 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (f->AddAudio)",
"frame", starting_frame_number,
"start", start,
"samples", samples,
"channel", channel_filter,
"partial_frame", partial_frame,
"samples_per_frame", samples_per_frame);
1638 working_cache.
Add(f);
1641 remaining_samples -= samples;
1644 if (remaining_samples > 0)
1645 iterate_channel_buffer += samples;
1648 starting_frame_number++;
1655 delete[] channel_buffer;
1656 channel_buffer = NULL;
1657 iterate_channel_buffer = NULL;
1668 for (int64_t f = target_frame; f < starting_frame_number; f++) {
1672 processing_audio_frames.erase(processing_audio_frames.find(f));
1675 if (processing_audio_frames.count(f) == 0)
1677 processed_audio_frames[f] = f;
1680 if (target_frame == starting_frame_number) {
1682 processing_audio_frames.erase(processing_audio_frames.find(target_frame));
1690 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (After)",
"requested_frame", requested_frame,
"starting_frame", target_frame,
"end_frame", starting_frame_number - 1);
1696 void FFmpegReader::Seek(int64_t requested_frame) {
1698 if (requested_frame < 1)
1699 requested_frame = 1;
1703 int processing_video_frames_size = 0;
1704 int processing_audio_frames_size = 0;
1707 processing_video_frames_size = processing_video_frames.size();
1708 processing_audio_frames_size = processing_audio_frames.size();
1712 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::Seek",
"requested_frame", requested_frame,
"seek_count", seek_count,
"last_frame", last_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"video_pts_offset", video_pts_offset);
1715 while (processing_video_frames_size + processing_audio_frames_size > 0) {
1718 processing_video_frames_size = processing_video_frames.size();
1719 processing_audio_frames_size = processing_audio_frames.size();
1723 working_cache.
Clear();
1724 missing_frames.
Clear();
1729 processing_audio_frames.clear();
1730 processing_video_frames.clear();
1731 processed_video_frames.clear();
1732 processed_audio_frames.clear();
1733 missing_audio_frames.clear();
1734 missing_video_frames.clear();
1735 missing_audio_frames_source.clear();
1736 missing_video_frames_source.clear();
1737 checked_frames.clear();
1742 current_video_frame = 0;
1743 largest_frame_processed = 0;
1744 num_checks_since_final = 0;
1745 num_packets_since_video_frame = 0;
1746 has_missing_frames =
false;
1755 if (requested_frame - buffer_amount < 20) {
1766 if (seek_count == 1) {
1769 seeking_pts = ConvertFrameToVideoPTS(1);
1771 seek_audio_frame_found = 0;
1772 seek_video_frame_found = 0;
1776 bool seek_worked =
false;
1777 int64_t seek_target = 0;
1781 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
1783 fprintf(stderr,
"%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
1786 is_video_seek =
true;
1793 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
1795 fprintf(stderr,
"%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
1798 is_video_seek =
false;
1807 avcodec_flush_buffers(aCodecCtx);
1811 avcodec_flush_buffers(pCodecCtx);
1814 previous_packet_location.
frame = -1;
1819 if (seek_count == 1) {
1821 seeking_pts = seek_target;
1822 seeking_frame = requested_frame;
1824 seek_audio_frame_found = 0;
1825 seek_video_frame_found = 0;
1849 int64_t FFmpegReader::GetVideoPTS() {
1850 int64_t current_pts = 0;
1851 if (packet->dts != AV_NOPTS_VALUE)
1852 current_pts = packet->dts;
1859 void FFmpegReader::UpdatePTSOffset(
bool is_video) {
1863 if (video_pts_offset == 99999)
1873 if (audio_pts_offset == 99999)
1885 int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
1887 pts = pts + video_pts_offset;
1888 int64_t previous_video_frame = current_video_frame;
1897 if (current_video_frame == 0)
1898 current_video_frame = frame;
1902 if (frame == previous_video_frame) {
1907 current_video_frame++;
1910 if (current_video_frame < frame)
1912 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (detected missing frame)",
"calculated frame", frame,
"previous_video_frame", previous_video_frame,
"current_video_frame", current_video_frame);
1917 while (current_video_frame < frame) {
1918 if (!missing_video_frames.count(current_video_frame)) {
1919 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame)",
"current_video_frame", current_video_frame,
"previous_video_frame", previous_video_frame);
1920 missing_video_frames.insert(std::pair<int64_t, int64_t>(current_video_frame, previous_video_frame));
1921 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(previous_video_frame, current_video_frame));
1925 has_missing_frames =
true;
1928 current_video_frame++;
1937 int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
1945 return video_pts - video_pts_offset;
1949 int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
1957 return audio_pts - audio_pts_offset;
1961 AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
1963 pts = pts + audio_pts_offset;
1972 int64_t whole_frame = int64_t(frame);
1975 double sample_start_percentage = frame - double(whole_frame);
1981 int sample_start = round(
double(samples_per_frame) * sample_start_percentage);
1984 if (whole_frame < 1)
1986 if (sample_start < 0)
1993 if (previous_packet_location.
frame != -1) {
1994 if (location.
is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
1995 int64_t orig_frame = location.
frame;
2000 location.
frame = previous_packet_location.
frame;
2003 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)",
"Source Frame", orig_frame,
"Source Audio Sample", orig_start,
"Target Frame", location.
frame,
"Target Audio Sample", location.
sample_start,
"pts", pts);
2010 for (int64_t audio_frame = previous_packet_location.
frame; audio_frame < location.
frame; audio_frame++) {
2011 if (!missing_audio_frames.count(audio_frame)) {
2012 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (tracking missing frame)",
"missing_audio_frame", audio_frame,
"previous_audio_frame", previous_packet_location.
frame,
"new location frame", location.
frame);
2013 missing_audio_frames.insert(std::pair<int64_t, int64_t>(audio_frame, previous_packet_location.
frame - 1));
2020 previous_packet_location = location;
2027 std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
2029 std::shared_ptr<Frame> output = working_cache.
GetFrame(requested_frame);
2036 output = working_cache.
GetFrame(requested_frame);
2037 if(output)
return output;
2045 working_cache.
Add(output);
2048 if (requested_frame > largest_frame_processed)
2049 largest_frame_processed = requested_frame;
2056 bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
2059 bool seek_trash =
false;
2060 int64_t max_seeked_frame = seek_audio_frame_found;
2061 if (seek_video_frame_found > max_seeked_frame) {
2062 max_seeked_frame = seek_video_frame_found;
2064 if ((
info.
has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
2065 (
info.
has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
2073 bool FFmpegReader::CheckMissingFrame(int64_t requested_frame) {
2078 ++checked_frames[requested_frame];
2081 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame",
"requested_frame", requested_frame,
"has_missing_frames", has_missing_frames,
"missing_video_frames.size()", missing_video_frames.size(),
"checked_count", checked_frames[requested_frame]);
2084 std::map<int64_t, int64_t>::iterator itr;
2085 bool found_missing_frame =
false;
2094 if (checked_frames[requested_frame] > 8 && !missing_video_frames.count(requested_frame) &&
2095 !processing_audio_frames.count(requested_frame) && processed_audio_frames.count(requested_frame) &&
2096 last_frame && last_video_frame && last_video_frame->has_image_data && aCodecId == AV_CODEC_ID_MP3 && (vCodecId == AV_CODEC_ID_MJPEGB || vCodecId == AV_CODEC_ID_MJPEG)) {
2097 missing_video_frames.insert(std::pair<int64_t, int64_t>(requested_frame, last_video_frame->number));
2098 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(last_video_frame->number, requested_frame));
2099 missing_frames.
Add(last_video_frame);
2104 if (missing_video_frames.count(requested_frame)) {
2105 int64_t missing_source_frame = missing_video_frames.find(requested_frame)->second;
2108 ++checked_frames[missing_source_frame];
2111 std::shared_ptr<Frame> parent_frame = missing_frames.
GetFrame(missing_source_frame);
2112 if (parent_frame == NULL) {
2114 if (parent_frame != NULL) {
2116 missing_frames.
Add(parent_frame);
2121 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2124 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Is Previous Video Frame Final)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2127 if (parent_frame != NULL) {
2129 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (AddImage from Previous Video Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2132 std::shared_ptr<QImage> parent_image = parent_frame->GetImage();
2134 missing_frame->AddImage(std::shared_ptr<QImage>(
new QImage(*parent_image)));
2135 processed_video_frames[missing_frame->number] = missing_frame->number;
2141 if (missing_audio_frames.count(requested_frame)) {
2144 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2150 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Add Silence for Missing Audio Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"samples_per_frame", samples_per_frame);
2153 missing_frame->AddAudioSilence(samples_per_frame);
2154 processed_audio_frames[missing_frame->number] = missing_frame->number;
2157 return found_missing_frame;
2161 void FFmpegReader::CheckWorkingFrames(
bool end_of_stream, int64_t requested_frame) {
2163 bool checked_count_tripped =
false;
2164 int max_checked_count = 80;
2167 CheckMissingFrame(requested_frame);
2180 working_cache.
Remove(f->number);
2184 CheckMissingFrame(f->number);
2187 int checked_count = 0;
2188 int checked_frames_size = 0;
2190 bool is_video_ready =
false;
2191 bool is_audio_ready =
false;
2194 is_video_ready = processed_video_frames.count(f->number);
2195 is_audio_ready = processed_audio_frames.count(f->number);
2198 checked_frames_size = checked_frames.size();
2199 if (!checked_count_tripped || f->number >= requested_frame)
2200 checked_count = checked_frames[f->number];
2203 checked_count = max_checked_count;
2206 if (previous_packet_location.
frame == f->number && !end_of_stream)
2207 is_audio_ready =
false;
2208 bool is_seek_trash = IsPartialFrame(f->number);
2215 if (checked_count >= max_checked_count && (!is_video_ready || !is_audio_ready)) {
2217 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (exceeded checked_count)",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2220 checked_count_tripped =
true;
2222 if (
info.
has_video && !is_video_ready && last_video_frame) {
2224 f->AddImage(std::shared_ptr<QImage>(
new QImage(*last_video_frame->GetImage())));
2225 is_video_ready =
true;
2230 is_audio_ready =
true;
2235 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2238 if ((!end_of_stream && is_video_ready && is_audio_ready) || end_of_stream || is_seek_trash) {
2240 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (mark frame as final)",
"requested_frame", requested_frame,
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count(),
"end_of_stream", end_of_stream);
2242 if (!is_seek_trash) {
2246 f->AddImage(std::shared_ptr<QImage>(
new QImage(*last_video_frame->GetImage())));
2249 num_checks_since_final = 0;
2257 if (missing_video_frames_source.count(f->number)) {
2259 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (add frame to missing cache)",
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Missing Cache Count", missing_frames.
Count(),
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count());
2260 missing_frames.
Add(f);
2264 checked_frames.erase(f->number);
2268 working_cache.
Remove(f->number);
2271 last_frame = f->number;
2275 working_cache.
Remove(f->number);
2286 void FFmpegReader::CheckFPS() {
2290 int first_second_counter = 0;
2291 int second_second_counter = 0;
2292 int third_second_counter = 0;
2293 int forth_second_counter = 0;
2294 int fifth_second_counter = 0;
2295 int frames_detected = 0;
2301 if (GetNextPacket() < 0)
2306 if (packet->stream_index == videoStream) {
2310 UpdatePTSOffset(
true);
2313 pts = GetVideoPTS();
2316 RemoveAVFrame(pFrame);
2319 pts += video_pts_offset;
2325 if (video_seconds <= 1.0)
2326 first_second_counter++;
2327 else if (video_seconds > 1.0 && video_seconds <= 2.0)
2328 second_second_counter++;
2329 else if (video_seconds > 2.0 && video_seconds <= 3.0)
2330 third_second_counter++;
2331 else if (video_seconds > 3.0 && video_seconds <= 4.0)
2332 forth_second_counter++;
2333 else if (video_seconds > 4.0 && video_seconds <= 5.0)
2334 fifth_second_counter++;
2343 if (second_second_counter != 0 && third_second_counter != 0 && forth_second_counter != 0 && fifth_second_counter != 0) {
2345 int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter;
2346 int avg_fps = round(sum_fps / 4.0f);
2357 }
else if (second_second_counter != 0 && third_second_counter != 0) {
2359 int sum_fps = second_second_counter;
2383 void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
2387 #pragma omp critical (packet_cache)
2389 av_freep(&remove_frame->data[0]);
2398 void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
2403 delete remove_packet;
2407 int64_t FFmpegReader::GetSmallestVideoFrame() {
2409 std::map<int64_t, int64_t>::iterator itr;
2410 int64_t smallest_frame = -1;
2412 for (itr = processing_video_frames.begin(); itr != processing_video_frames.end(); ++itr) {
2413 if (itr->first < smallest_frame || smallest_frame == -1)
2414 smallest_frame = itr->first;
2418 return smallest_frame;
2422 int64_t FFmpegReader::GetSmallestAudioFrame() {
2424 std::map<int64_t, int64_t>::iterator itr;
2425 int64_t smallest_frame = -1;
2427 for (itr = processing_audio_frames.begin(); itr != processing_audio_frames.end(); ++itr) {
2428 if (itr->first < smallest_frame || smallest_frame == -1)
2429 smallest_frame = itr->first;
2433 return smallest_frame;
2448 root[
"type"] =
"FFmpegReader";
2449 root[
"path"] = path;
2464 catch (
const std::exception& e) {
2466 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
2477 if (!root[
"path"].isNull())
2478 path = root[
"path"].asString();
#define AV_FREE_CONTEXT(av_context)
#define AV_FREE_FRAME(av_frame)
#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count)
#define AV_GET_IMAGE_SIZE(pix_fmt, width, height)
#define AV_GET_CODEC_TYPE(av_stream)
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
#define AV_GET_CODEC_CONTEXT(av_stream, av_codec)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
#define AV_ALLOCATE_FRAME()
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
#define AV_FREE_PACKET(av_packet)
#define AVCODEC_REGISTER_ALL
#define AVCODEC_MAX_AUDIO_FRAME_SIZE
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
#define MY_INPUT_BUFFER_PADDING_SIZE
#define AV_GET_SAMPLE_FORMAT(av_stream, av_context)
#define AV_RESET_FRAME(av_frame)
#define FF_NUM_PROCESSORS
#define OPEN_MP_NUM_PROCESSORS
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
int64_t Count()
Count the frames in the queue.
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
void Remove(int64_t frame_number)
Remove a specific frame.
void Clear()
Clear the cache of all frames.
std::shared_ptr< openshot::Frame > GetSmallestFrame()
Get the smallest frame number.
This class represents a clip (used to arrange readers on the timeline)
FFmpegReader(std::string path)
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Json::Value JsonValue() const override
Generate Json::Value for this object.
CacheMemory final_cache
Final cache object used to hold final frames.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
virtual ~FFmpegReader()
Destructor.
std::string Json() const override
Get and Set JSON methods.
void Open()
Open File - which is called by the constructor automatically.
void SetJson(const std::string value)
Load JSON string into this object.
This class represents a fraction.
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
int ToInt()
Return a rounded integer of the fraction (for example 30000/1001 returns 30)
int den
Denominator for the fraction.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Exception when no valid codec is found for a file.
Exception for files that can not be found or opened.
Exception for invalid JSON.
Exception when no streams are found in the file.
Exception for frames that are out of bounds.
openshot::ClipBase * parent
openshot::ReaderInfo info
Information about the current media file.
juce::CriticalSection processingCriticalSection
openshot::ClipBase * GetClip()
Parent clip object of this reader (which can be unparented and NULL)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Exception when a reader is closed, and a frame is requested.
int DE_LIMIT_WIDTH_MAX
Maximum columns that hardware decode can handle.
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render)
int HW_DE_DEVICE_SET
Which GPU to use to decode (0 is the first)
int DE_LIMIT_HEIGHT_MAX
Maximum rows that hardware decode can handle.
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
int HARDWARE_DECODER
Use video codec for faster video decoding (if supported)
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
const Json::Value stringToJson(const std::string value)
This struct holds the associated video frame and starting sample # for an audio packet.
bool is_near(AudioLocation location, int samples_per_frame, int64_t amount)
int audio_bit_rate
The bit rate of the audio stream (in bytes)
int video_bit_rate
The bit rate of the video stream (in bytes)
float duration
Length of time (in seconds)
openshot::Fraction audio_timebase
The audio timebase determines how long each audio packet should be played.
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
int height
The height of the video (in pixels)
int pixel_format
The pixel format (i.e. YUV420P, RGB24, etc...)
int64_t video_length
The number of frames in the video stream.
std::string acodec
The name of the audio codec used to encode / decode the video stream.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
std::string vcodec
The name of the video codec used to encode / decode the video stream.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
int video_stream_index
The index of the video stream.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
int audio_stream_index
The index of the audio stream.
int64_t file_size
Size of file (in bytes)