OpenShot Library | libopenshot
0.2.5
|
Go to the documentation of this file.
31 #include "../include/Clip.h"
32 #include "../include/FFmpegReader.h"
33 #include "../include/FrameMapper.h"
34 #ifdef USE_IMAGEMAGICK
35 #include "../include/ImageReader.h"
36 #include "../include/TextReader.h"
38 #include "../include/QtImageReader.h"
39 #include "../include/ChunkReader.h"
40 #include "../include/DummyReader.h"
45 void Clip::init_settings()
72 init_reader_rotation();
79 wave_color =
Color((
unsigned char)0, (
unsigned char)123, (
unsigned char)255, (
unsigned char)255);
110 void Clip::init_reader_rotation() {
120 if (reader && reader->
info.
metadata.count(
"rotate") > 0) {
124 float rotate_metadata = strtof(reader->
info.
metadata[
"rotate"].c_str(), 0);
126 }
catch (
const std::exception& e) {}
134 Clip::Clip() : resampler(NULL), audio_cache(NULL), reader(NULL), allocated_reader(NULL)
141 Clip::Clip(
ReaderBase* new_reader) : resampler(NULL), audio_cache(NULL), reader(new_reader), allocated_reader(NULL)
155 Clip::Clip(std::string path) : resampler(NULL), audio_cache(NULL), reader(NULL), allocated_reader(NULL)
161 std::string ext = get_file_extension(path);
162 transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
165 if (ext==
"avi" || ext==
"mov" || ext==
"mkv" || ext==
"mpg" || ext==
"mpeg" || ext==
"mp3" || ext==
"mp4" || ext==
"mts" ||
166 ext==
"ogg" || ext==
"wav" || ext==
"wmv" || ext==
"webm" || ext==
"vob")
197 allocated_reader = reader;
198 init_reader_rotation();
206 if (allocated_reader) {
207 delete allocated_reader;
208 allocated_reader = NULL;
228 init_reader_rotation();
238 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
255 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
269 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
285 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
300 requested_frame = adjust_frame_number_minimum(requested_frame);
306 else if (enabled_audio == -1 && reader && !reader->
info.
has_audio)
311 else if (enabled_video == -1 && reader && !reader->
info.
has_audio)
315 int64_t new_frame_number = requested_frame;
316 int64_t time_mapped_number = adjust_frame_number_minimum(
time.
GetLong(requested_frame));
318 new_frame_number = time_mapped_number;
321 std::shared_ptr<Frame> original_frame;
322 #pragma omp critical (Clip_GetFrame)
323 original_frame = GetOrCreateFrame(new_frame_number);
327 #pragma omp critical (Clip_GetFrame)
335 frame->
AddImage(std::shared_ptr<QImage>(
new QImage(*original_frame->
GetImage())));
343 get_time_mapped_frame(frame, requested_frame);
346 apply_effects(frame);
353 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
357 std::string Clip::get_file_extension(std::string path)
360 return path.substr(path.find_last_of(
".") + 1);
364 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
366 int number_of_samples = buffer->getNumSamples();
367 int channels = buffer->getNumChannels();
370 juce::AudioSampleBuffer *reversed =
new juce::AudioSampleBuffer(channels, number_of_samples);
373 for (
int channel = 0; channel < channels; channel++)
376 for (
int s = number_of_samples - 1; s >= 0; s--, n++)
377 reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
383 for (
int channel = 0; channel < channels; channel++)
385 buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
392 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
397 throw ReaderClosed(
"No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
405 juce::AudioSampleBuffer *samples = NULL;
410 int new_frame_number = frame->
number;
418 int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
426 juce::AudioSampleBuffer *resampled_buffer = NULL;
427 int resampled_buffer_size = 0;
430 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
434 for (
int channel = 0; channel < channels; channel++)
436 samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
437 number_of_samples, 1.0f);
441 reverse_buffer(samples);
450 resampled_buffer_size = resampled_buffer->getNumSamples();
456 for (
int channel = 0; channel < channels; channel++)
458 frame->
AddAudio(
true, channel, 0, resampled_buffer->getReadPointer(channel,
start),
459 number_of_samples, 1.0f);
462 resampled_buffer = NULL;
465 else if (abs(delta) > 1 && abs(delta) < 100) {
469 int total_delta_samples = 0;
470 for (
int delta_frame = new_frame_number - (delta - 1);
471 delta_frame <= new_frame_number; delta_frame++)
477 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
481 for (
int delta_frame = new_frame_number - (delta - 1);
482 delta_frame <= new_frame_number; delta_frame++) {
484 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
485 juce::AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
486 number_of_delta_samples);
487 delta_samples->clear();
489 for (
int channel = 0; channel < channels; channel++)
490 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
491 number_of_delta_samples, 1.0f);
495 reverse_buffer(delta_samples);
498 for (
int channel = 0; channel < channels; channel++)
500 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
501 number_of_delta_samples, 1.0f);
504 delete delta_samples;
505 delta_samples = NULL;
508 start += number_of_delta_samples;
513 int total_delta_samples = 0;
514 for (
int delta_frame = new_frame_number - (delta + 1);
515 delta_frame >= new_frame_number; delta_frame--)
521 samples =
new juce::AudioSampleBuffer(channels, total_delta_samples);
525 for (
int delta_frame = new_frame_number - (delta + 1);
526 delta_frame >= new_frame_number; delta_frame--) {
528 int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
529 juce::AudioSampleBuffer *delta_samples =
new juce::AudioSampleBuffer(channels,
530 number_of_delta_samples);
531 delta_samples->clear();
533 for (
int channel = 0; channel < channels; channel++)
534 delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
535 number_of_delta_samples, 1.0f);
539 reverse_buffer(delta_samples);
542 for (
int channel = 0; channel < channels; channel++)
544 samples->addFrom(channel,
start, delta_samples->getReadPointer(channel),
545 number_of_delta_samples, 1.0f);
548 delete delta_samples;
549 delta_samples = NULL;
552 start += number_of_delta_samples;
557 resampler->
SetBuffer(samples,
float(
start) /
float(number_of_samples));
561 int resampled_buffer_size = buffer->getNumSamples();
564 for (
int channel = 0; channel < channels; channel++)
566 frame->
AddAudio(
true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
573 samples =
new juce::AudioSampleBuffer(channels, number_of_samples);
577 for (
int channel = 0; channel < channels; channel++)
579 samples->addFrom(channel, 0, frame->
GetAudioSamples(channel), number_of_samples, 1.0f);
583 reverse_buffer(samples);
586 for (
int channel = 0; channel < channels; channel++)
587 frame->
AddAudio(
true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
599 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
602 if (frame_number < 1)
610 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
612 std::shared_ptr<Frame> new_frame;
622 new_frame = reader->
GetFrame(number);
659 root[
"id"] =
add_property_json(
"ID", 0.0,
"string",
Id(), NULL, -1, -1,
true, requested_frame);
660 root[
"position"] =
add_property_json(
"Position",
Position(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
662 root[
"start"] =
add_property_json(
"Start",
Start(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
663 root[
"end"] =
add_property_json(
"End",
End(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
false, requested_frame);
664 root[
"duration"] =
add_property_json(
"Duration",
Duration(),
"float",
"", NULL, 0, 30 * 60 * 60 * 48,
true, requested_frame);
669 root[
"waveform"] =
add_property_json(
"Waveform", waveform,
"int",
"", NULL, 0, 1,
false, requested_frame);
739 return root.toStyledString();
748 root[
"scale"] =
scale;
752 root[
"waveform"] = waveform;
782 root[
"effects"] = Json::Value(Json::arrayValue);
785 for (
auto existing_effect : effects)
787 root[
"effects"].append(existing_effect->JsonValue());
807 catch (
const std::exception& e)
810 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
821 if (!root[
"gravity"].isNull())
823 if (!root[
"scale"].isNull())
825 if (!root[
"anchor"].isNull())
827 if (!root[
"display"].isNull())
829 if (!root[
"mixing"].isNull())
831 if (!root[
"waveform"].isNull())
832 waveform = root[
"waveform"].asBool();
833 if (!root[
"scale_x"].isNull())
835 if (!root[
"scale_y"].isNull())
837 if (!root[
"location_x"].isNull())
839 if (!root[
"location_y"].isNull())
841 if (!root[
"alpha"].isNull())
843 if (!root[
"rotation"].isNull())
845 if (!root[
"time"].isNull())
847 if (!root[
"volume"].isNull())
849 if (!root[
"wave_color"].isNull())
851 if (!root[
"crop_width"].isNull())
853 if (!root[
"crop_height"].isNull())
855 if (!root[
"crop_x"].isNull())
857 if (!root[
"crop_y"].isNull())
859 if (!root[
"shear_x"].isNull())
861 if (!root[
"shear_y"].isNull())
863 if (!root[
"channel_filter"].isNull())
865 if (!root[
"channel_mapping"].isNull())
867 if (!root[
"has_audio"].isNull())
869 if (!root[
"has_video"].isNull())
871 if (!root[
"perspective_c1_x"].isNull())
873 if (!root[
"perspective_c1_y"].isNull())
875 if (!root[
"perspective_c2_x"].isNull())
877 if (!root[
"perspective_c2_y"].isNull())
879 if (!root[
"perspective_c3_x"].isNull())
881 if (!root[
"perspective_c3_y"].isNull())
883 if (!root[
"perspective_c4_x"].isNull())
885 if (!root[
"perspective_c4_y"].isNull())
887 if (!root[
"effects"].isNull()) {
893 for (
const auto existing_effect : root[
"effects"]) {
897 if (!existing_effect[
"type"].isNull()) {
899 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString())) ) {
910 if (!root[
"reader"].isNull())
912 if (!root[
"reader"][
"type"].isNull())
915 bool already_open =
false;
919 already_open = reader->
IsOpen();
928 std::string type = root[
"reader"][
"type"].asString();
930 if (type ==
"FFmpegReader") {
933 reader =
new FFmpegReader(root[
"reader"][
"path"].asString(),
false);
936 }
else if (type ==
"QtImageReader") {
939 reader =
new QtImageReader(root[
"reader"][
"path"].asString(),
false);
942 #ifdef USE_IMAGEMAGICK
943 }
else if (type ==
"ImageReader") {
946 reader =
new ImageReader(root[
"reader"][
"path"].asString(),
false);
949 }
else if (type ==
"TextReader") {
956 }
else if (type ==
"ChunkReader") {
959 reader =
new ChunkReader(root[
"reader"][
"path"].asString(), (
ChunkVersion) root[
"reader"][
"chunk_version"].asInt());
962 }
else if (type ==
"DummyReader") {
972 allocated_reader = reader;
984 void Clip::sort_effects()
994 effects.push_back(effect);
1003 effects.remove(effect);
1007 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1010 for (
auto effect : effects)
1013 frame = effect->GetFrame(frame, frame->
number);
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
const Json::Value stringToJson(const std::string value)
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
openshot::Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
int SampleRate()
Get the original sample rate of this frame's audio data.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This class returns a listing of all effects supported by libopenshot.
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
openshot::Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
This abstract class is the base class, used by all effects in libopenshot.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
void Open()
Open the internal reader.
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
float * GetAudioSamples(int channel)
Get an array of sample data.
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
openshot::Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
void Close()
Close the internal reader.
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
This namespace is the default namespace for all code in the openshot library.
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
std::string PropertiesJSON(int64_t requested_frame) const override
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
openshot::ReaderInfo info
Information about the current media file.
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
This class is used as a simple, dummy reader, which always returns a blank frame.
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
GravityType
This enumeration determines how clips are aligned to their parent container.
float duration
Length of time (in seconds)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
This class represents a single frame of video (i.e. image & audio data)
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
virtual ~Clip()
Destructor.
bool has_video
Determines if this file has a video stream.
int width
The width of the video (in pixesl)
std::string Json() const override
Get and Set JSON methods.
Exception when too many seek attempts happen.
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
void AddAudioSilence(int numSamples)
Add audio silence.
Exception for frames that are out of bounds.
Json::Value JsonValue() const
Generate Json::Value for this object.
@ GRAVITY_TOP
Align clip to the top center of its parent.
This class represents a color (used on the timeline and clips)
openshot::FrameDisplayType display
The format to display the frame number (if any)
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::string Id() const
Get basic properties.
This class is used to resample audio data for many sequential frames.
int height
The height of the video (in pixels)
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
int num
Numerator for the fraction.
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
int den
Denominator for the fraction.
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
bool has_audio
Determines if this file has an audio stream.
virtual bool IsOpen()=0
Determine if reader is open or closed.
float End() const
Override End() method.
Exception for invalid JSON.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
void AddImage(int new_width, int new_height, int bytes_per_pixel, QImage::Format type, const unsigned char *pixels_)
Add (or replace) pixel data to the frame.
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
openshot::Keyframe green
Curve representing the green value (0 - 255)
float Duration() const
Get the length of this clip (in seconds)
void SetClip(openshot::ClipBase *clip)
Set parent clip object of this reader.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
int GetAudioChannelsCount()
Get number of audio channels.
openshot::GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
float end
The position in seconds to end playing (used to trim the ending of a clip)
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
float start
The position in seconds to start playing (used to trim the beginning of a clip)
openshot::ReaderBase * Reader()
Get the current reader.
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Json::Value JsonValue() const
Generate Json::Value for this object.
int64_t GetLength() const
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
void AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float *source, int numSamples, float gainToApplyToSource)
Add audio samples to a specific channel.
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Exception when a reader is closed, and a frame is requested.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
openshot::Keyframe channel_filter
Audio channel filter and mappings.
std::shared_ptr< QImage > GetImage()
Get pointer to Qt QImage image object.
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
int64_t GetCount() const
Get the number of points (i.e. # of points)
juce::AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Clip()
Default Constructor.
This abstract class is the base class, used by all readers in libopenshot.
float Position() const
Get position on timeline (in seconds)
std::string previous_properties
This string contains the previous JSON properties.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
openshot::ChannelLayout ChannelsLayout()
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
virtual void Close()=0
Close the reader (and any resources it was consuming)
AnchorType
This enumeration determines what parent a clip should be aligned to.
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
openshot::Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
@ SCALE_NONE
Do not scale the clip.
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
float Start() const
Get start position (in seconds) of clip (trim start of video)
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
void SetJson(const std::string value)
Load JSON string into this object.
int64_t number
This is the frame number (starting at 1)
Json::Value JsonValue() const override
Generate Json::Value for this object.
openshot::Keyframe red
Curve representing the red value (0 - 255)
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
int GetAudioSamplesCount()
Get number of audio samples.
int channels
The number of audio channels used in the audio stream.
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
openshot::Color wave_color
Curve representing the color of the audio wave form.
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
openshot::Keyframe blue
Curve representing the red value (0 - 255)
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
double GetValue(int64_t index) const
Get the value at a specific index.
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....