OpenShot Library | libopenshot  0.2.5
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @ref License
7  */
8 
9 /* LICENSE
10  *
11  * Copyright (c) 2008-2019 OpenShot Studios, LLC
12  * <http://www.openshotstudios.com/>. This file is part of
13  * OpenShot Library (libopenshot), an open-source project dedicated to
14  * delivering high quality video editing and animation solutions to the
15  * world. For more information visit <http://www.openshot.org/>.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "../include/Timeline.h"
32 
33 using namespace openshot;
34 
35 // Default Constructor for the timeline (which sets the canvas width and height)
36 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
37  is_open(false), auto_map_clips(true), managed_cache(true)
38 {
39  // Create CrashHandler and Attach (incase of errors)
41 
42  // Init viewport size (curve based, because it can be animated)
43  viewport_scale = Keyframe(100.0);
44  viewport_x = Keyframe(0.0);
45  viewport_y = Keyframe(0.0);
46 
47  // Init background color
48  color.red = Keyframe(0.0);
49  color.green = Keyframe(0.0);
50  color.blue = Keyframe(0.0);
51 
52  // Init FileInfo struct (clear all values)
53  info.width = width;
54  info.height = height;
55  info.fps = fps;
56  info.sample_rate = sample_rate;
57  info.channels = channels;
58  info.channel_layout = channel_layout;
60  info.duration = 60 * 30; // 30 minute default duration
61  info.has_audio = true;
62  info.has_video = true;
64  info.display_ratio = openshot::Fraction(width, height);
67 
68  // Init max image size
70 
71  // Init cache
72  final_cache = new CacheMemory();
74 }
75 
77  if (is_open)
78  // Auto Close if not already
79  Close();
80 
81  // Free all allocated frame mappers
82  std::set<FrameMapper *>::iterator it;
83  for (it = allocated_frame_mappers.begin(); it != allocated_frame_mappers.end(); ) {
84  // Dereference and clean up FrameMapper object
85  FrameMapper *mapper = (*it);
86  mapper->Reader(NULL);
87  mapper->Close();
88  delete mapper;
89  // Remove reference and proceed to next element
90  it = allocated_frame_mappers.erase(it);
91  }
92 
93  // Destroy previous cache (if managed by timeline)
94  if (managed_cache && final_cache) {
95  delete final_cache;
96  final_cache = NULL;
97  }
98 }
99 
100 // Add an openshot::Clip to the timeline
102 {
103  // All clips should be converted to the frame rate of this timeline
104  if (auto_map_clips)
105  // Apply framemapper (or update existing framemapper)
106  apply_mapper_to_clip(clip);
107 
108  // Add clip to list
109  clips.push_back(clip);
110 
111  // Sort clips
112  sort_clips();
113 }
114 
115 // Add an effect to the timeline
117 {
118  // Add effect to list
119  effects.push_back(effect);
120 
121  // Sort effects
122  sort_effects();
123 }
124 
125 // Remove an effect from the timeline
127 {
128  effects.remove(effect);
129 }
130 
131 // Remove an openshot::Clip to the timeline
133 {
134  clips.remove(clip);
135 }
136 
137 // Apply a FrameMapper to a clip which matches the settings of this timeline
138 void Timeline::apply_mapper_to_clip(Clip* clip)
139 {
140  // Get lock (prevent getting frames while this happens)
141  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
142 
143  // Determine type of reader
144  ReaderBase* clip_reader = NULL;
145  if (clip->Reader()->Name() == "FrameMapper")
146  {
147  // Get the existing reader
148  clip_reader = (ReaderBase*) clip->Reader();
149 
150  } else {
151 
152  // Create a new FrameMapper to wrap the current reader
154  allocated_frame_mappers.insert(mapper);
155  clip_reader = (ReaderBase*) mapper;
156  }
157 
158  // Update the mapping
159  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
161 
162  // Update clip reader
163  clip->Reader(clip_reader);
164 }
165 
166 // Apply the timeline's framerate and samplerate to all clips
168 {
169  // Clear all cached frames
170  ClearAllCache();
171 
172  // Loop through all clips
173  for (auto clip : clips)
174  {
175  // Apply framemapper (or update existing framemapper)
176  apply_mapper_to_clip(clip);
177  }
178 }
179 
180 // Calculate time of a frame number, based on a framerate
181 double Timeline::calculate_time(int64_t number, Fraction rate)
182 {
183  // Get float version of fps fraction
184  double raw_fps = rate.ToFloat();
185 
186  // Return the time (in seconds) of this frame
187  return double(number - 1) / raw_fps;
188 }
189 
190 // Apply effects to the source frame (if any)
191 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
192 {
193  // Debug output
194  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer);
195 
196  // Find Effects at this position and layer
197  for (auto effect : effects)
198  {
199  // Does clip intersect the current requested time
200  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
201  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
202 
203  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
204 
205  // Debug output
206  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer);
207 
208  // Clip is visible
209  if (does_effect_intersect)
210  {
211  // Determine the frame needed for this clip (based on the position on the timeline)
212  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
213  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
214 
215  // Debug output
216  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect);
217 
218  // Apply the effect to this frame
219  frame = effect->GetFrame(frame, effect_frame_number);
220  }
221 
222  } // end effect loop
223 
224  // Return modified frame
225  return frame;
226 }
227 
228 // Get or generate a blank frame
229 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
230 {
231  std::shared_ptr<Frame> new_frame;
232 
233  // Init some basic properties about this frame
234  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
235 
236  try {
237  // Debug output
238  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame);
239 
240  // Attempt to get a frame (but this could fail if a reader has just been closed)
241  #pragma omp critical (T_GetOtCreateFrame)
242  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
243 
244  // Return real frame
245  return new_frame;
246 
247  } catch (const ReaderClosed & e) {
248  // ...
249  } catch (const TooManySeeks & e) {
250  // ...
251  } catch (const OutOfBoundsFrame & e) {
252  // ...
253  }
254 
255  // Debug output
256  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame);
257 
258  // Create blank frame
259  new_frame = std::make_shared<Frame>(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels);
260  #pragma omp critical (T_GetOtCreateFrame)
261  {
262  new_frame->SampleRate(info.sample_rate);
263  new_frame->ChannelsLayout(info.channel_layout);
264  }
265  return new_frame;
266 }
267 
268 // Process a new layer of video or audio
269 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
270 {
271  // Get the clip's frame & image
272  std::shared_ptr<Frame> source_frame;
273  #pragma omp critical (T_addLayer)
274  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
275 
276  // No frame found... so bail
277  if (!source_frame)
278  return;
279 
280  // Debug output
281  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
282 
283  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
284  if (source_clip->Waveform())
285  {
286  // Debug output
287  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
288 
289  // Get the color of the waveform
290  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
291  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
292  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
293  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
294 
295  // Generate Waveform Dynamically (the size of the timeline)
296  std::shared_ptr<QImage> source_image;
297  #pragma omp critical (T_addLayer)
298  source_image = source_frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha);
299  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
300  }
301 
302  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
303  * effects on the top clip. */
304  if (is_top_clip && source_frame) {
305  #pragma omp critical (T_addLayer)
306  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
307  }
308 
309  // Declare an image to hold the source frame's image
310  std::shared_ptr<QImage> source_image;
311 
312  /* COPY AUDIO - with correct volume */
313  if (source_clip->Reader()->info.has_audio) {
314  // Debug output
315  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
316 
317  if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
318  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
319  {
320  // Get volume from previous frame and this frame
321  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1);
322  float volume = source_clip->volume.GetValue(clip_frame_number);
323  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
324  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
325 
326  // Apply volume mixing strategy
327  if (source_clip->mixing == VOLUME_MIX_AVERAGE && max_volume > 1.0) {
328  // Don't allow this clip to exceed 100% (divide volume equally between all overlapping clips with volume
329  previous_volume = previous_volume / max_volume;
330  volume = volume / max_volume;
331  }
332  else if (source_clip->mixing == VOLUME_MIX_REDUCE && max_volume > 1.0) {
333  // Reduce clip volume by a bit, hoping it will prevent exceeding 100% (but it is very possible it will)
334  previous_volume = previous_volume * 0.77;
335  volume = volume * 0.77;
336  }
337 
338  // If channel filter enabled, check for correct channel (and skip non-matching channels)
339  if (channel_filter != -1 && channel_filter != channel)
340  continue; // skip to next channel
341 
342  // If no volume on this frame or previous frame, do nothing
343  if (previous_volume == 0.0 && volume == 0.0)
344  continue; // skip to next channel
345 
346  // If channel mapping disabled, just use the current channel
347  if (channel_mapping == -1)
348  channel_mapping = channel;
349 
350  // Apply ramp to source frame (if needed)
351  if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
352  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
353 
354  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
355  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
356  // number of samples returned is variable... and does not match the number expected.
357  // This is a crude solution at best. =)
358  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
359  // Force timeline frame to match the source frame
360  #pragma omp critical (T_addLayer)
361  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
362 
363  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
364  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
365  #pragma omp critical (T_addLayer)
366  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
367 
368  }
369  else
370  // Debug output
371  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
372 
373  }
374 
375  // Skip out if video was disabled or only an audio frame (no visualisation in use)
376  if (source_clip->has_video.GetInt(clip_frame_number) == 0 ||
377  (!source_clip->Waveform() && !source_clip->Reader()->info.has_video))
378  // Skip the rest of the image processing for performance reasons
379  return;
380 
381  // Debug output
382  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
383 
384  // Get actual frame image data
385  source_image = source_frame->GetImage();
386 
387  /* ALPHA & OPACITY */
388  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
389  {
390  float alpha = source_clip->alpha.GetValue(clip_frame_number);
391 
392  // Get source image's pixels
393  unsigned char *pixels = (unsigned char *) source_image->bits();
394 
395  // Loop through pixels
396  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
397  {
398  // Get the alpha values from the pixel
399  int A = pixels[byte_index + 3];
400 
401  // Apply alpha to pixel
402  pixels[byte_index + 3] *= alpha;
403  }
404 
405  // Debug output
406  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number);
407  }
408 
409  /* RESIZE SOURCE IMAGE - based on scale type */
410  QSize source_size = source_image->size();
411  switch (source_clip->scale)
412  {
413  case (SCALE_FIT): {
414  // keep aspect ratio
415  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio);
416 
417  // Debug output
418  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
419  break;
420  }
421  case (SCALE_STRETCH): {
422  // ignore aspect ratio
423  source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio);
424 
425  // Debug output
426  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
427  break;
428  }
429  case (SCALE_CROP): {
430  QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height()))));
431  QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT);
432 
433  // respect aspect ratio
434  if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT)
435  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
436  else
437  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
438 
439  // Debug output
440  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
441  break;
442  }
443  case (SCALE_NONE): {
444  // Calculate ratio of source size to project size
445  // Even with no scaling, previews need to be adjusted correctly
446  // (otherwise NONE scaling draws the frame image outside of the preview)
447  float source_width_ratio = source_size.width() / float(info.width);
448  float source_height_ratio = source_size.height() / float(info.height);
449  source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio);
450 
451  // Debug output
452  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_NONE)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height());
453  break;
454  }
455  }
456 
457  float crop_x = source_clip->crop_x.GetValue(clip_frame_number);
458  float crop_y = source_clip->crop_y.GetValue(clip_frame_number);
459  float crop_w = source_clip->crop_width.GetValue(clip_frame_number);
460  float crop_h = source_clip->crop_height.GetValue(clip_frame_number);
461  switch(source_clip->crop_gravity)
462  {
463  case (GRAVITY_TOP_LEFT):
464  // This is only here to prevent unused-enum warnings
465  break;
466  case (GRAVITY_TOP):
467  crop_x += 0.5;
468  break;
469  case (GRAVITY_TOP_RIGHT):
470  crop_x += 1.0;
471  break;
472  case (GRAVITY_LEFT):
473  crop_y += 0.5;
474  break;
475  case (GRAVITY_CENTER):
476  crop_x += 0.5;
477  crop_y += 0.5;
478  break;
479  case (GRAVITY_RIGHT):
480  crop_x += 1.0;
481  crop_y += 0.5;
482  break;
483  case (GRAVITY_BOTTOM_LEFT):
484  crop_y += 1.0;
485  break;
486  case (GRAVITY_BOTTOM):
487  crop_x += 0.5;
488  crop_y += 1.0;
489  break;
490  case (GRAVITY_BOTTOM_RIGHT):
491  crop_x += 1.0;
492  crop_y += 1.0;
493  break;
494  }
495 
496 
497  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
498  float x = 0.0; // left
499  float y = 0.0; // top
500 
501  // Adjust size for scale x and scale y
502  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
503  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
504  float scaled_source_width = source_size.width() * sx;
505  float scaled_source_height = source_size.height() * sy;
506 
507  switch (source_clip->gravity)
508  {
509  case (GRAVITY_TOP_LEFT):
510  // This is only here to prevent unused-enum warnings
511  break;
512  case (GRAVITY_TOP):
513  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
514  break;
515  case (GRAVITY_TOP_RIGHT):
516  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
517  break;
518  case (GRAVITY_LEFT):
519  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
520  break;
521  case (GRAVITY_CENTER):
522  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
523  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
524  break;
525  case (GRAVITY_RIGHT):
526  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
527  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center
528  break;
529  case (GRAVITY_BOTTOM_LEFT):
530  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
531  break;
532  case (GRAVITY_BOTTOM):
533  x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center
534  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
535  break;
536  case (GRAVITY_BOTTOM_RIGHT):
537  x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right
538  y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom
539  break;
540  }
541 
542  // Debug output
543  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
544 
545  /* LOCATION, ROTATION, AND SCALE */
546  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
547  x += (Settings::Instance()->MAX_WIDTH * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
548  y += (Settings::Instance()->MAX_HEIGHT * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
549  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
550  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
551 
552  bool transformed = false;
553  QTransform transform;
554 
555  // Transform source image (if needed)
556  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
557 
558  if (!isEqual(r, 0)) {
559  // ROTATE CLIP
560  float origin_x = x + (scaled_source_width / 2.0);
561  float origin_y = y + (scaled_source_height / 2.0);
562  transform.translate(origin_x, origin_y);
563  transform.rotate(r);
564  transform.translate(-origin_x,-origin_y);
565  transformed = true;
566  }
567 
568  if (!isEqual(x, 0) || !isEqual(y, 0)) {
569  // TRANSLATE/MOVE CLIP
570  transform.translate(x, y);
571  transformed = true;
572  }
573 
574  // SCALE CLIP (if needed)
575  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
576  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
577 
578  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
579  transform.scale(source_width_scale, source_height_scale);
580  transformed = true;
581  }
582 
583  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
584  // SHEAR HEIGHT/WIDTH
585  transform.shear(shear_x, shear_y);
586  transformed = true;
587  }
588 
589  // Debug output
590  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed);
591 
592  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
593  std::shared_ptr<QImage> new_image;
594  #pragma omp critical (T_addLayer)
595  new_image = new_frame->GetImage();
596 
597  // Load timeline's new frame image into a QPainter
598  QPainter painter(new_image.get());
599  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
600 
601  // Apply transform (translate, rotate, scale)... if any
602  if (transformed)
603  painter.setTransform(transform);
604 
605  // Composite a new layer onto the image
606  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
607  painter.drawImage(0, 0, *source_image, crop_x * source_image->width(), crop_y * source_image->height(), crop_w * source_image->width(), crop_h * source_image->height());
608 
609  // Draw frame #'s on top of image (if needed)
610  if (source_clip->display != FRAME_DISPLAY_NONE) {
611  std::stringstream frame_number_str;
612  switch (source_clip->display)
613  {
614  case (FRAME_DISPLAY_NONE):
615  // This is only here to prevent unused-enum warnings
616  break;
617 
618  case (FRAME_DISPLAY_CLIP):
619  frame_number_str << clip_frame_number;
620  break;
621 
622  case (FRAME_DISPLAY_TIMELINE):
623  frame_number_str << timeline_frame_number;
624  break;
625 
626  case (FRAME_DISPLAY_BOTH):
627  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
628  break;
629  }
630 
631  // Draw frame number on top of image
632  painter.setPen(QColor("#ffffff"));
633  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
634  }
635 
636  painter.end();
637 
638  // Debug output
639  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed);
640 }
641 
642 // Update the list of 'opened' clips
643 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
644 {
645  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size());
646 
647  // is clip already in list?
648  bool clip_found = open_clips.count(clip);
649 
650  if (clip_found && !does_clip_intersect)
651  {
652  // Remove clip from 'opened' list, because it's closed now
653  open_clips.erase(clip);
654 
655  // Close clip
656  clip->Close();
657  }
658  else if (!clip_found && does_clip_intersect)
659  {
660  // Add clip to 'opened' list, because it's missing
661  open_clips[clip] = clip;
662 
663  try {
664  // Open the clip
665  clip->Open();
666 
667  } catch (const InvalidFile & e) {
668  // ...
669  }
670  }
671 
672  // Debug output
673  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size());
674 }
675 
676 // Sort clips by position on the timeline
677 void Timeline::sort_clips()
678 {
679  // Debug output
680  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size());
681 
682  // sort clips
683  clips.sort(CompareClips());
684 }
685 
686 // Sort effects by position on the timeline
687 void Timeline::sort_effects()
688 {
689  // sort clips
690  effects.sort(CompareEffects());
691 }
692 
693 // Close the reader (and any resources it was consuming)
695 {
696  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close");
697 
698  // Close all open clips
699  for (auto clip : clips)
700  {
701  // Open or Close this clip, based on if it's intersecting or not
702  update_open_clips(clip, false);
703  }
704 
705  // Mark timeline as closed
706  is_open = false;
707 
708  // Clear cache
709  final_cache->Clear();
710 }
711 
712 // Open the reader (and start consuming resources)
714 {
715  is_open = true;
716 }
717 
718 // Compare 2 floating point numbers for equality
719 bool Timeline::isEqual(double a, double b)
720 {
721  return fabs(a - b) < 0.000001;
722 }
723 
724 // Get an openshot::Frame object for a specific frame number of this reader.
725 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
726 {
727  // Adjust out of bounds frame number
728  if (requested_frame < 1)
729  requested_frame = 1;
730 
731  // Check cache
732  std::shared_ptr<Frame> frame;
733  #pragma omp critical (T_GetFrame)
734  frame = final_cache->GetFrame(requested_frame);
735  if (frame) {
736  // Debug output
737  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame);
738 
739  // Return cached frame
740  return frame;
741  }
742  else
743  {
744  // Create a scoped lock, allowing only a single thread to run the following code at one time
745  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
746 
747  // Check for open reader (or throw exception)
748  if (!is_open)
749  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.");
750 
751  // Check cache again (due to locking)
752  #pragma omp critical (T_GetFrame)
753  frame = final_cache->GetFrame(requested_frame);
754  if (frame) {
755  // Debug output
756  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame);
757 
758  // Return cached frame
759  return frame;
760  }
761 
762  // Minimum number of frames to process (for performance reasons)
763  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
764 
765  // Get a list of clips that intersect with the requested section of timeline
766  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
767  std::vector<Clip*> nearby_clips;
768  #pragma omp critical (T_GetFrame)
769  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
770 
771  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
772  // Allow nested OpenMP sections
773  omp_set_nested(true);
774 
775  // Debug output
776  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS);
777 
778  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
779  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
780  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
781  {
782  // Loop through clips
783  for (auto clip : nearby_clips)
784  {
785  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
786  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
787 
788  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
789  if (does_clip_intersect)
790  {
791  // Get clip frame #
792  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
793  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
794  // Cache clip object
795  clip->GetFrame(clip_frame_number);
796  }
797  }
798  }
799 
800  #pragma omp parallel
801  {
802  // Loop through all requested frames
803  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
804  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
805  {
806  // Debug output
807  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num());
808 
809  // Init some basic properties about this frame
810  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
811 
812  // Create blank frame (which will become the requested frame)
813  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels));
814  #pragma omp critical (T_GetFrame)
815  {
816  new_frame->AddAudioSilence(samples_in_frame);
817  new_frame->SampleRate(info.sample_rate);
818  new_frame->ChannelsLayout(info.channel_layout);
819  }
820 
821  // Debug output
822  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
823 
824  // Add Background Color to 1st layer (if animated or not black)
825  if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
826  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
827  new_frame->AddColor(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, color.GetColorHex(frame_number));
828 
829  // Debug output
830  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
831 
832  // Find Clips near this time
833  for (auto clip : nearby_clips)
834  {
835  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
836  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
837 
838  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
839 
840  // Debug output
841  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
842 
843  // Clip is visible
844  if (does_clip_intersect)
845  {
846  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
847  bool is_top_clip = true;
848  float max_volume = 0.0;
849  for (auto nearby_clip : nearby_clips)
850  {
851  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
852  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
853  long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
854  long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
855 
856  // Determine if top clip
857  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
858  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
859  nearby_clip_start_position > clip_start_position && is_top_clip == true) {
860  is_top_clip = false;
861  }
862 
863  // Determine max volume of overlapping clips
864  if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
865  nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
866  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
867  max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
868  }
869  }
870 
871  // Determine the frame needed for this clip (based on the position on the timeline)
872  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
873  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
874 
875  // Debug output
876  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
877 
878  // Add clip's frame as layer
879  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
880 
881  } else
882  // Debug output
883  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect);
884 
885  } // end clip loop
886 
887  // Debug output
888  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
889 
890  // Set frame # on mapped frame
891  #pragma omp ordered
892  {
893  new_frame->SetFrameNumber(frame_number);
894 
895  // Add final frame to cache
896  final_cache->Add(new_frame);
897  }
898 
899  } // end frame loop
900  } // end parallel
901 
902  // Debug output
903  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
904 
905  // Return frame (or blank frame)
906  return final_cache->GetFrame(requested_frame);
907  }
908 }
909 
910 
911 // Find intersecting clips (or non intersecting clips)
912 std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
913 {
914  // Find matching clips
915  std::vector<Clip*> matching_clips;
916 
917  // Calculate time of frame
918  float min_requested_frame = requested_frame;
919  float max_requested_frame = requested_frame + (number_of_frames - 1);
920 
921  // Re-Sort Clips (since they likely changed)
922  sort_clips();
923 
924  // Find Clips at this time
925  for (auto clip : clips)
926  {
927  // Does clip intersect the current requested time
928  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
929  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
930 
931  bool does_clip_intersect =
932  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
933  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
934 
935  // Debug output
936  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect);
937 
938  // Open (or schedule for closing) this clip, based on if it's intersecting or not
939  #pragma omp critical (reader_lock)
940  update_open_clips(clip, does_clip_intersect);
941 
942  // Clip is visible
943  if (does_clip_intersect && include)
944  // Add the intersecting clip
945  matching_clips.push_back(clip);
946 
947  else if (!does_clip_intersect && !include)
948  // Add the non-intersecting clip
949  matching_clips.push_back(clip);
950 
951  } // end clip loop
952 
953  // return list
954  return matching_clips;
955 }
956 
957 // Set the cache object used by this reader
958 void Timeline::SetCache(CacheBase* new_cache) {
959  // Destroy previous cache (if managed by timeline)
960  if (managed_cache && final_cache) {
961  delete final_cache;
962  final_cache = NULL;
963  managed_cache = false;
964  }
965 
966  // Set new cache
967  final_cache = new_cache;
968 }
969 
970 // Generate JSON string of this object
971 std::string Timeline::Json() const {
972 
973  // Return formatted string
974  return JsonValue().toStyledString();
975 }
976 
977 // Generate Json::Value for this object
978 Json::Value Timeline::JsonValue() const {
979 
980  // Create root json object
981  Json::Value root = ReaderBase::JsonValue(); // get parent properties
982  root["type"] = "Timeline";
983  root["viewport_scale"] = viewport_scale.JsonValue();
984  root["viewport_x"] = viewport_x.JsonValue();
985  root["viewport_y"] = viewport_y.JsonValue();
986  root["color"] = color.JsonValue();
987 
988  // Add array of clips
989  root["clips"] = Json::Value(Json::arrayValue);
990 
991  // Find Clips at this time
992  for (const auto existing_clip : clips)
993  {
994  root["clips"].append(existing_clip->JsonValue());
995  }
996 
997  // Add array of effects
998  root["effects"] = Json::Value(Json::arrayValue);
999 
1000  // loop through effects
1001  for (const auto existing_effect: effects)
1002  {
1003  root["effects"].append(existing_effect->JsonValue());
1004  }
1005 
1006  // return JsonValue
1007  return root;
1008 }
1009 
1010 // Load JSON string into this object
1011 void Timeline::SetJson(const std::string value) {
1012 
1013  // Get lock (prevent getting frames while this happens)
1014  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1015 
1016  // Parse JSON string into JSON objects
1017  try
1018  {
1019  const Json::Value root = openshot::stringToJson(value);
1020  // Set all values that match
1021  SetJsonValue(root);
1022  }
1023  catch (const std::exception& e)
1024  {
1025  // Error parsing JSON (or missing keys)
1026  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1027  }
1028 }
1029 
1030 // Load Json::Value into this object
1031 void Timeline::SetJsonValue(const Json::Value root) {
1032 
1033  // Close timeline before we do anything (this also removes all open and closing clips)
1034  bool was_open = is_open;
1035  Close();
1036 
1037  // Set parent data
1039 
1040  if (!root["clips"].isNull()) {
1041  // Clear existing clips
1042  clips.clear();
1043 
1044  // loop through clips
1045  for (const Json::Value existing_clip : root["clips"]) {
1046  // Create Clip
1047  Clip *c = new Clip();
1048 
1049  // Load Json into Clip
1050  c->SetJsonValue(existing_clip);
1051 
1052  // Add Clip to Timeline
1053  AddClip(c);
1054  }
1055  }
1056 
1057  if (!root["effects"].isNull()) {
1058  // Clear existing effects
1059  effects.clear();
1060 
1061  // loop through effects
1062  for (const Json::Value existing_effect :root["effects"]) {
1063  // Create Effect
1064  EffectBase *e = NULL;
1065 
1066  if (!existing_effect["type"].isNull()) {
1067  // Create instance of effect
1068  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
1069 
1070  // Load Json into Effect
1071  e->SetJsonValue(existing_effect);
1072 
1073  // Add Effect to Timeline
1074  AddEffect(e);
1075  }
1076  }
1077  }
1078  }
1079 
1080  if (!root["duration"].isNull()) {
1081  // Update duration of timeline
1082  info.duration = root["duration"].asDouble();
1084  }
1085 
1086  // Re-open if needed
1087  if (was_open)
1088  Open();
1089 }
1090 
1091 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1092 void Timeline::ApplyJsonDiff(std::string value) {
1093 
1094  // Get lock (prevent getting frames while this happens)
1095  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1096 
1097  // Parse JSON string into JSON objects
1098  try
1099  {
1100  const Json::Value root = openshot::stringToJson(value);
1101  // Process the JSON change array, loop through each item
1102  for (const Json::Value change : root) {
1103  std::string change_key = change["key"][(uint)0].asString();
1104 
1105  // Process each type of change
1106  if (change_key == "clips")
1107  // Apply to CLIPS
1108  apply_json_to_clips(change);
1109 
1110  else if (change_key == "effects")
1111  // Apply to EFFECTS
1112  apply_json_to_effects(change);
1113 
1114  else
1115  // Apply to TIMELINE
1116  apply_json_to_timeline(change);
1117 
1118  }
1119  }
1120  catch (const std::exception& e)
1121  {
1122  // Error parsing JSON (or missing keys)
1123  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1124  }
1125 }
1126 
1127 // Apply JSON diff to clips
1128 void Timeline::apply_json_to_clips(Json::Value change) {
1129 
1130  // Get key and type of change
1131  std::string change_type = change["type"].asString();
1132  std::string clip_id = "";
1133  Clip *existing_clip = NULL;
1134 
1135  // Find id of clip (if any)
1136  for (auto key_part : change["key"]) {
1137  // Get each change
1138  if (key_part.isObject()) {
1139  // Check for id
1140  if (!key_part["id"].isNull()) {
1141  // Set the id
1142  clip_id = key_part["id"].asString();
1143 
1144  // Find matching clip in timeline (if any)
1145  for (auto c : clips)
1146  {
1147  if (c->Id() == clip_id) {
1148  existing_clip = c;
1149  break; // clip found, exit loop
1150  }
1151  }
1152  break; // id found, exit loop
1153  }
1154  }
1155  }
1156 
1157  // Check for a more specific key (targetting this clip's effects)
1158  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1159  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1160  {
1161  // This change is actually targetting a specific effect under a clip (and not the clip)
1162  Json::Value key_part = change["key"][3];
1163 
1164  if (key_part.isObject()) {
1165  // Check for id
1166  if (!key_part["id"].isNull())
1167  {
1168  // Set the id
1169  std::string effect_id = key_part["id"].asString();
1170 
1171  // Find matching effect in timeline (if any)
1172  std::list<EffectBase*> effect_list = existing_clip->Effects();
1173  for (auto e : effect_list)
1174  {
1175  if (e->Id() == effect_id) {
1176  // Apply the change to the effect directly
1177  apply_json_to_effects(change, e);
1178 
1179  // Calculate start and end frames that this impacts, and remove those frames from the cache
1180  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1181  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1182  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1183 
1184  return; // effect found, don't update clip
1185  }
1186  }
1187  }
1188  }
1189  }
1190 
1191  // Calculate start and end frames that this impacts, and remove those frames from the cache
1192  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1193  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1194  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1195  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1196  }
1197 
1198  // Determine type of change operation
1199  if (change_type == "insert") {
1200 
1201  // Create new clip
1202  Clip *clip = new Clip();
1203  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1204  AddClip(clip); // Add clip to timeline
1205 
1206  // Apply framemapper (or update existing framemapper)
1207  apply_mapper_to_clip(clip);
1208 
1209  } else if (change_type == "update") {
1210 
1211  // Update existing clip
1212  if (existing_clip) {
1213 
1214  // Calculate start and end frames that this impacts, and remove those frames from the cache
1215  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1216  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1217  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1218 
1219  // Remove cache on clip's Reader (if found)
1220  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1221  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1222 
1223  // Update clip properties from JSON
1224  existing_clip->SetJsonValue(change["value"]);
1225 
1226  // Apply framemapper (or update existing framemapper)
1227  apply_mapper_to_clip(existing_clip);
1228  }
1229 
1230  } else if (change_type == "delete") {
1231 
1232  // Remove existing clip
1233  if (existing_clip) {
1234 
1235  // Calculate start and end frames that this impacts, and remove those frames from the cache
1236  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1237  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1238  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1239 
1240  // Remove clip from timeline
1241  RemoveClip(existing_clip);
1242  }
1243 
1244  }
1245 
1246 }
1247 
1248 // Apply JSON diff to effects
1249 void Timeline::apply_json_to_effects(Json::Value change) {
1250 
1251  // Get key and type of change
1252  std::string change_type = change["type"].asString();
1253  EffectBase *existing_effect = NULL;
1254 
1255  // Find id of an effect (if any)
1256  for (auto key_part : change["key"]) {
1257 
1258  if (key_part.isObject()) {
1259  // Check for id
1260  if (!key_part["id"].isNull())
1261  {
1262  // Set the id
1263  std::string effect_id = key_part["id"].asString();
1264 
1265  // Find matching effect in timeline (if any)
1266  for (auto e : effects)
1267  {
1268  if (e->Id() == effect_id) {
1269  existing_effect = e;
1270  break; // effect found, exit loop
1271  }
1272  }
1273  break; // id found, exit loop
1274  }
1275  }
1276  }
1277 
1278  // Now that we found the effect, apply the change to it
1279  if (existing_effect || change_type == "insert")
1280  // Apply change to effect
1281  apply_json_to_effects(change, existing_effect);
1282 }
1283 
1284 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1285 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1286 
1287  // Get key and type of change
1288  std::string change_type = change["type"].asString();
1289 
1290  // Calculate start and end frames that this impacts, and remove those frames from the cache
1291  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1292  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1293  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1294  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1295  }
1296 
1297  // Determine type of change operation
1298  if (change_type == "insert") {
1299 
1300  // Determine type of effect
1301  std::string effect_type = change["value"]["type"].asString();
1302 
1303  // Create Effect
1304  EffectBase *e = NULL;
1305 
1306  // Init the matching effect object
1307  if ( (e = EffectInfo().CreateEffect(effect_type)) ) {
1308 
1309  // Load Json into Effect
1310  e->SetJsonValue(change["value"]);
1311 
1312  // Add Effect to Timeline
1313  AddEffect(e);
1314  }
1315 
1316  } else if (change_type == "update") {
1317 
1318  // Update existing effect
1319  if (existing_effect) {
1320 
1321  // Calculate start and end frames that this impacts, and remove those frames from the cache
1322  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1323  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1324  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1325 
1326  // Update effect properties from JSON
1327  existing_effect->SetJsonValue(change["value"]);
1328  }
1329 
1330  } else if (change_type == "delete") {
1331 
1332  // Remove existing effect
1333  if (existing_effect) {
1334 
1335  // Calculate start and end frames that this impacts, and remove those frames from the cache
1336  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1337  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1338  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1339 
1340  // Remove effect from timeline
1341  RemoveEffect(existing_effect);
1342  }
1343 
1344  }
1345 }
1346 
1347 // Apply JSON diff to timeline properties
1348 void Timeline::apply_json_to_timeline(Json::Value change) {
1349 
1350  // Get key and type of change
1351  std::string change_type = change["type"].asString();
1352  std::string root_key = change["key"][(uint)0].asString();
1353  std::string sub_key = "";
1354  if (change["key"].size() >= 2)
1355  sub_key = change["key"][(uint)1].asString();
1356 
1357  // Clear entire cache
1358  final_cache->Clear();
1359 
1360  // Determine type of change operation
1361  if (change_type == "insert" || change_type == "update") {
1362 
1363  // INSERT / UPDATE
1364  // Check for valid property
1365  if (root_key == "color")
1366  // Set color
1367  color.SetJsonValue(change["value"]);
1368  else if (root_key == "viewport_scale")
1369  // Set viewport scale
1370  viewport_scale.SetJsonValue(change["value"]);
1371  else if (root_key == "viewport_x")
1372  // Set viewport x offset
1373  viewport_x.SetJsonValue(change["value"]);
1374  else if (root_key == "viewport_y")
1375  // Set viewport y offset
1376  viewport_y.SetJsonValue(change["value"]);
1377  else if (root_key == "duration") {
1378  // Update duration of timeline
1379  info.duration = change["value"].asDouble();
1381  }
1382  else if (root_key == "width")
1383  // Set width
1384  info.width = change["value"].asInt();
1385  else if (root_key == "height")
1386  // Set height
1387  info.height = change["value"].asInt();
1388  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1389  // Set fps fraction
1390  if (!change["value"]["num"].isNull())
1391  info.fps.num = change["value"]["num"].asInt();
1392  if (!change["value"]["den"].isNull())
1393  info.fps.den = change["value"]["den"].asInt();
1394  }
1395  else if (root_key == "fps" && sub_key == "num")
1396  // Set fps.num
1397  info.fps.num = change["value"].asInt();
1398  else if (root_key == "fps" && sub_key == "den")
1399  // Set fps.den
1400  info.fps.den = change["value"].asInt();
1401  else if (root_key == "display_ratio" && sub_key == "" && change["value"].isObject()) {
1402  // Set display_ratio fraction
1403  if (!change["value"]["num"].isNull())
1404  info.display_ratio.num = change["value"]["num"].asInt();
1405  if (!change["value"]["den"].isNull())
1406  info.display_ratio.den = change["value"]["den"].asInt();
1407  }
1408  else if (root_key == "display_ratio" && sub_key == "num")
1409  // Set display_ratio.num
1410  info.display_ratio.num = change["value"].asInt();
1411  else if (root_key == "display_ratio" && sub_key == "den")
1412  // Set display_ratio.den
1413  info.display_ratio.den = change["value"].asInt();
1414  else if (root_key == "pixel_ratio" && sub_key == "" && change["value"].isObject()) {
1415  // Set pixel_ratio fraction
1416  if (!change["value"]["num"].isNull())
1417  info.pixel_ratio.num = change["value"]["num"].asInt();
1418  if (!change["value"]["den"].isNull())
1419  info.pixel_ratio.den = change["value"]["den"].asInt();
1420  }
1421  else if (root_key == "pixel_ratio" && sub_key == "num")
1422  // Set pixel_ratio.num
1423  info.pixel_ratio.num = change["value"].asInt();
1424  else if (root_key == "pixel_ratio" && sub_key == "den")
1425  // Set pixel_ratio.den
1426  info.pixel_ratio.den = change["value"].asInt();
1427 
1428  else if (root_key == "sample_rate")
1429  // Set sample rate
1430  info.sample_rate = change["value"].asInt();
1431  else if (root_key == "channels")
1432  // Set channels
1433  info.channels = change["value"].asInt();
1434  else if (root_key == "channel_layout")
1435  // Set channel layout
1436  info.channel_layout = (ChannelLayout) change["value"].asInt();
1437  else
1438  // Error parsing JSON (or missing keys)
1439  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1440 
1441 
1442  } else if (change["type"].asString() == "delete") {
1443 
1444  // DELETE / RESET
1445  // Reset the following properties (since we can't delete them)
1446  if (root_key == "color") {
1447  color = Color();
1448  color.red = Keyframe(0.0);
1449  color.green = Keyframe(0.0);
1450  color.blue = Keyframe(0.0);
1451  }
1452  else if (root_key == "viewport_scale")
1453  viewport_scale = Keyframe(1.0);
1454  else if (root_key == "viewport_x")
1455  viewport_x = Keyframe(0.0);
1456  else if (root_key == "viewport_y")
1457  viewport_y = Keyframe(0.0);
1458  else
1459  // Error parsing JSON (or missing keys)
1460  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1461 
1462  }
1463 
1464 }
1465 
1466 // Clear all caches
1468 
1469  // Get lock (prevent getting frames while this happens)
1470  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1471 
1472  // Clear primary cache
1473  final_cache->Clear();
1474 
1475  // Loop through all clips
1476  for (auto clip : clips)
1477  {
1478  // Clear cache on clip
1479  clip->Reader()->GetCache()->Clear();
1480 
1481  // Clear nested Reader (if any)
1482  if (clip->Reader()->Name() == "FrameMapper") {
1483  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1484  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1485  nested_reader->Reader()->GetCache()->Clear();
1486  }
1487 
1488  }
1489 }
1490 
1491 // Set Max Image Size (used for performance optimization). Convenience function for setting
1492 // Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT.
1493 void Timeline::SetMaxSize(int width, int height) {
1494  // Maintain aspect ratio regardless of what size is passed in
1495  QSize display_ratio_size = QSize(info.display_ratio.num * info.pixel_ratio.ToFloat(), info.display_ratio.den * info.pixel_ratio.ToFloat());
1496  QSize proposed_size = QSize(std::min(width, info.width), std::min(height, info.height));
1497 
1498  // Scale QSize up to proposed size
1499  display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
1500 
1501  // Set max size
1502  Settings::Instance()->MAX_WIDTH = display_ratio_size.width();
1503  Settings::Instance()->MAX_HEIGHT = display_ratio_size.height();
1504 }
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1092
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:126
Display the timeline&#39;s frame number.
Definition: Enums.h:72
void Close()
Close the internal reader.
Definition: Clip.cpp:259
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:101
int num
Numerator for the fraction.
Definition: Fraction.h:47
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:72
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:45
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:257
std::string Id() const
Get basic properties.
Definition: ClipBase.h:76
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:226
Align clip to the bottom right of its parent.
Definition: Enums.h:48
void SetCache(CacheBase *new_cache)
Definition: Timeline.cpp:958
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:253
float Start() const
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:79
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
Do not scale the clip.
Definition: Enums.h:57
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:44
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:215
openshot::Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:232
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Definition: Fraction.cpp:74
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:49
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:54
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%...
Definition: Enums.h:80
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:259
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:143
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:84
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:212
void Close()
Close the openshot::FrameMapper and internal reader.
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:33
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:97
#define OPEN_MP_NUM_PROCESSORS
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:146
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:223
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:213
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:337
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:236
Do not display the frame number.
Definition: Enums.h:70
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:295
Align clip to the top right of its parent.
Definition: Enums.h:42
Align clip to the bottom left of its parent.
Definition: Enums.h:46
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:171
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:286
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:179
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:50
Exception for missing JSON Change key.
Definition: Exceptions.h:253
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:547
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: EffectBase.cpp:117
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Clip.cpp:815
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:116
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:95
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render) ...
Definition: Settings.h:98
bool Waveform()
Waveform property.
Definition: Clip.h:208
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:75
openshot::Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:248
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:49
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
Align clip to the bottom center of its parent.
Definition: Enums.h:47
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
void SetMaxSize(int width, int height)
Definition: Timeline.cpp:1493
Align clip to the top left of its parent.
Definition: Enums.h:40
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:218
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:77
Exception for files that can not be found or opened.
Definition: Exceptions.h:173
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1467
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:219
static CrashHandler * Instance()
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Timeline.cpp:978
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:167
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:725
This class represents a fraction.
Definition: Fraction.h:45
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:67
openshot::Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:252
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:101
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:49
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:43
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:101
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:694
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:249
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:56
Display the clip&#39;s internal frame number.
Definition: Enums.h:71
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Timeline.cpp:1031
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:171
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
std::string Json() const override
Get and Set JSON methods.
Definition: Timeline.cpp:971
Exception for frames that are out of bounds.
Definition: Exceptions.h:285
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:242
This class represents a color (used on the timeline and clips)
Definition: Color.h:45
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:45
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) ...
Definition: Enums.h:81
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:44
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:713
float Duration() const
Get the length of this clip (in seconds)
Definition: ClipBase.h:81
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:362
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:73
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:329
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:132
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:126
void SetJson(const std::string value)
Load JSON string into this object.
Definition: Timeline.cpp:1011
Exception for invalid JSON.
Definition: Exceptions.h:205
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:510
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:262
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3) ...
Definition: ReaderBase.h:73
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:258
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:144
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:219
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:48
openshot::GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:229
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: Settings.cpp:41
openshot::Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:233
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square) ...
Definition: ReaderBase.h:72
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:237
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:214
Color color
Background color of timeline canvas.
Definition: Timeline.h:262
float Position() const
Get position on timeline (in seconds)
Definition: ClipBase.h:77
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:36
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:46
Align clip to the top center of its parent.
Definition: Enums.h:41
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:51
int den
Denominator for the fraction.
Definition: Fraction.h:48
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:55
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:116
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:147
openshot::Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:230
openshot::Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:231
virtual ~Timeline()
Definition: Timeline.cpp:76
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:51
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
int Layer() const
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:78
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:49
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
Exception when too many seek attempts happen.
Definition: Exceptions.h:369
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:95