OpenShot Library | libopenshot  0.1.9
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init max image size
64 
65  // Init cache
66  final_cache = new CacheMemory();
68 }
69 
70 // Add an openshot::Clip to the timeline
72 {
73  // All clips should be converted to the frame rate of this timeline
74  if (auto_map_clips)
75  // Apply framemapper (or update existing framemapper)
76  apply_mapper_to_clip(clip);
77 
78  // Add clip to list
79  clips.push_back(clip);
80 
81  // Sort clips
82  sort_clips();
83 }
84 
85 // Add an effect to the timeline
87 {
88  // Add effect to list
89  effects.push_back(effect);
90 
91  // Sort effects
92  sort_effects();
93 }
94 
95 // Remove an effect from the timeline
97 {
98  effects.remove(effect);
99 }
100 
101 // Remove an openshot::Clip to the timeline
103 {
104  clips.remove(clip);
105 }
106 
107 // Apply a FrameMapper to a clip which matches the settings of this timeline
108 void Timeline::apply_mapper_to_clip(Clip* clip)
109 {
110  // Get lock (prevent getting frames while this happens)
111  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
112 
113  // Determine type of reader
114  ReaderBase* clip_reader = NULL;
115  if (clip->Reader()->Name() == "FrameMapper")
116  {
117  // Get the existing reader
118  clip_reader = (ReaderBase*) clip->Reader();
119 
120  } else {
121 
122  // Create a new FrameMapper to wrap the current reader
124  }
125 
126  // Update the mapping
127  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
129 
130  // Update timeline offset
131  float time_diff = 0 - clip->Position() + clip->Start();
132  int clip_offset = -round(time_diff * info.fps.ToFloat());
133 
134  if (clip_offset != 0)
135  // Reduce negative offset by 1 (since we want to avoid frame 0)
136  clip_offset += 1;
137 
138  clip_mapped_reader->SetTimelineFrameOffset(clip_offset);
139 
140  // Update clip reader
141  clip->Reader(clip_reader);
142 }
143 
144 // Apply the timeline's framerate and samplerate to all clips
146 {
147  // Clear all cached frames
148  ClearAllCache();
149 
150  // Loop through all clips
151  list<Clip*>::iterator clip_itr;
152  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
153  {
154  // Get clip object from the iterator
155  Clip *clip = (*clip_itr);
156 
157  // Apply framemapper (or update existing framemapper)
158  apply_mapper_to_clip(clip);
159  }
160 }
161 
162 // Calculate time of a frame number, based on a framerate
163 double Timeline::calculate_time(int64_t number, Fraction rate)
164 {
165  // Get float version of fps fraction
166  double raw_fps = rate.ToFloat();
167 
168  // Return the time (in seconds) of this frame
169  return double(number - 1) / raw_fps;
170 }
171 
172 // Apply effects to the source frame (if any)
173 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
174 {
175  // Debug output
176  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
177 
178  // Find Effects at this position and layer
179  list<EffectBase*>::iterator effect_itr;
180  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
181  {
182  // Get effect object from the iterator
183  EffectBase *effect = (*effect_itr);
184 
185  // Does clip intersect the current requested time
186  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
187  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
188 
189  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
190 
191  // Debug output
192  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
193 
194  // Clip is visible
195  if (does_effect_intersect)
196  {
197  // Determine the frame needed for this clip (based on the position on the timeline)
198  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
199  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
200 
201  // Debug output
202  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect, "", -1, "", -1, "", -1, "", -1);
203 
204  // Apply the effect to this frame
205  frame = effect->GetFrame(frame, effect_frame_number);
206  }
207 
208  } // end effect loop
209 
210  // Return modified frame
211  return frame;
212 }
213 
214 // Get or generate a blank frame
215 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
216 {
217  std::shared_ptr<Frame> new_frame;
218 
219  // Init some basic properties about this frame
220  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
221 
222  try {
223  // Debug output
224  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
225 
226  // Set max image size (used for performance optimization)
227  clip->SetMaxSize(info.width, info.height);
228 
229  // Attempt to get a frame (but this could fail if a reader has just been closed)
230  #pragma omp critical (T_GetOtCreateFrame)
231  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
232 
233  // Return real frame
234  return new_frame;
235 
236  } catch (const ReaderClosed & e) {
237  // ...
238  } catch (const TooManySeeks & e) {
239  // ...
240  } catch (const OutOfBoundsFrame & e) {
241  // ...
242  }
243 
244  // Debug output
245  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
246 
247  // Create blank frame
248  new_frame = std::make_shared<Frame>(number, max_width, max_height, "#000000", samples_in_frame, info.channels);
249  #pragma omp critical (T_GetOtCreateFrame)
250  {
251  new_frame->SampleRate(info.sample_rate);
252  new_frame->ChannelsLayout(info.channel_layout);
253  }
254  return new_frame;
255 }
256 
257 // Process a new layer of video or audio
258 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip)
259 {
260  // Get the clip's frame & image
261  std::shared_ptr<Frame> source_frame;
262  #pragma omp critical (T_addLayer)
263  source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
264 
265  // No frame found... so bail
266  if (!source_frame)
267  return;
268 
269  // Debug output
270  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
271 
272  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
273  if (source_clip->Waveform())
274  {
275  // Debug output
276  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
277 
278  // Get the color of the waveform
279  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
280  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
281  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
282  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
283 
284  // Generate Waveform Dynamically (the size of the timeline)
285  std::shared_ptr<QImage> source_image;
286  #pragma omp critical (T_addLayer)
287  source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha);
288  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
289  }
290 
291  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
292  * effects on the top clip. */
293  if (is_top_clip && source_frame)
294  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
295 
296  // Declare an image to hold the source frame's image
297  std::shared_ptr<QImage> source_image;
298 
299  /* COPY AUDIO - with correct volume */
300  if (source_clip->Reader()->info.has_audio) {
301 
302  // Debug output
303  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
304 
305  if (source_frame->GetAudioChannelsCount() == info.channels)
306  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
307  {
308  float initial_volume = 1.0f;
309  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
310  float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
311  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
312  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
313 
314  // If channel filter enabled, check for correct channel (and skip non-matching channels)
315  if (channel_filter != -1 && channel_filter != channel)
316  continue; // skip to next channel
317 
318  // If channel mapping disabled, just use the current channel
319  if (channel_mapping == -1)
320  channel_mapping = channel;
321 
322  // If no ramp needed, set initial volume = clip's volume
323  if (isEqual(previous_volume, volume))
324  initial_volume = volume;
325 
326  // Apply ramp to source frame (if needed)
327  if (!isEqual(previous_volume, volume))
328  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
329 
330  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
331  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
332  // number of samples returned is variable... and does not match the number expected.
333  // This is a crude solution at best. =)
334  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
335  // Force timeline frame to match the source frame
336  #pragma omp critical (T_addLayer)
337  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
338 
339  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
340  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
341  #pragma omp critical (T_addLayer)
342  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
343 
344  }
345  else
346  // Debug output
347  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
348 
349  }
350 
351  // Skip out if only an audio frame
352  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
353  // Skip the rest of the image processing for performance reasons
354  return;
355 
356  // Debug output
357  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
358 
359  // Get actual frame image data
360  source_image = source_frame->GetImage();
361 
362  /* ALPHA & OPACITY */
363  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
364  {
365  float alpha = source_clip->alpha.GetValue(clip_frame_number);
366 
367  // Get source image's pixels
368  unsigned char *pixels = (unsigned char *) source_image->bits();
369 
370  // Loop through pixels
371  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
372  {
373  // Get the alpha values from the pixel
374  int A = pixels[byte_index + 3];
375 
376  // Apply alpha to pixel
377  pixels[byte_index + 3] *= alpha;
378  }
379 
380  // Debug output
381  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
382  }
383 
384  /* RESIZE SOURCE IMAGE - based on scale type */
385  QSize source_size = source_image->size();
386  switch (source_clip->scale)
387  {
388  case (SCALE_FIT):
389  // keep aspect ratio
390  source_size.scale(max_width, max_height, Qt::KeepAspectRatio);
391 
392  // Debug output
393  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
394  break;
395 
396  case (SCALE_STRETCH):
397  // ignore aspect ratio
398  source_size.scale(max_width, max_height, Qt::IgnoreAspectRatio);
399 
400  // Debug output
401  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
402  break;
403 
404  case (SCALE_CROP):
405  QSize width_size(max_width, round(max_width / (float(source_size.width()) / float(source_size.height()))));
406  QSize height_size(round(max_height / (float(source_size.height()) / float(source_size.width()))), max_height);
407 
408  // respect aspect ratio
409  if (width_size.width() >= max_width && width_size.height() >= max_height)
410  source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
411  else
412  source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
413 
414  // Debug output
415  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height(), "", -1, "", -1, "", -1);
416  break;
417  }
418 
419  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
420  float x = 0.0; // left
421  float y = 0.0; // top
422 
423  // Adjust size for scale x and scale y
424  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
425  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
426  float scaled_source_width = source_size.width() * sx;
427  float scaled_source_height = source_size.height() * sy;
428 
429  switch (source_clip->gravity)
430  {
431  case (GRAVITY_TOP):
432  x = (max_width - scaled_source_width) / 2.0; // center
433  break;
434  case (GRAVITY_TOP_RIGHT):
435  x = max_width - scaled_source_width; // right
436  break;
437  case (GRAVITY_LEFT):
438  y = (max_height - scaled_source_height) / 2.0; // center
439  break;
440  case (GRAVITY_CENTER):
441  x = (max_width - scaled_source_width) / 2.0; // center
442  y = (max_height - scaled_source_height) / 2.0; // center
443  break;
444  case (GRAVITY_RIGHT):
445  x = max_width - scaled_source_width; // right
446  y = (max_height - scaled_source_height) / 2.0; // center
447  break;
448  case (GRAVITY_BOTTOM_LEFT):
449  y = (max_height - scaled_source_height); // bottom
450  break;
451  case (GRAVITY_BOTTOM):
452  x = (max_width - scaled_source_width) / 2.0; // center
453  y = (max_height - scaled_source_height); // bottom
454  break;
455  case (GRAVITY_BOTTOM_RIGHT):
456  x = max_width - scaled_source_width; // right
457  y = (max_height - scaled_source_height); // bottom
458  break;
459  }
460 
461  // Debug output
462  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height);
463 
464  /* LOCATION, ROTATION, AND SCALE */
465  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
466  x += (max_width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
467  y += (max_height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
468  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
469  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
470 
471  bool transformed = false;
472  QTransform transform;
473 
474  // Transform source image (if needed)
475  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
476 
477  if (!isEqual(r, 0)) {
478  // ROTATE CLIP
479  float origin_x = x + (scaled_source_width / 2.0);
480  float origin_y = y + (scaled_source_height / 2.0);
481  transform.translate(origin_x, origin_y);
482  transform.rotate(r);
483  transform.translate(-origin_x,-origin_y);
484  transformed = true;
485  }
486 
487  if (!isEqual(x, 0) || !isEqual(y, 0)) {
488  // TRANSLATE/MOVE CLIP
489  transform.translate(x, y);
490  transformed = true;
491  }
492 
493  // SCALE CLIP (if needed)
494  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
495  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
496 
497  if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
498  transform.scale(source_width_scale, source_height_scale);
499  transformed = true;
500  }
501 
502  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
503  // SHEAR HEIGHT/WIDTH
504  transform.shear(shear_x, shear_y);
505  transformed = true;
506  }
507 
508  // Debug output
509  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
510 
511  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
512  std::shared_ptr<QImage> new_image;
513  #pragma omp critical (T_addLayer)
514  new_image = new_frame->GetImage();
515 
516  // Load timeline's new frame image into a QPainter
517  QPainter painter(new_image.get());
518  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
519 
520  // Apply transform (translate, rotate, scale)... if any
521  if (transformed)
522  painter.setTransform(transform);
523 
524  // Composite a new layer onto the image
525  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
526  painter.drawImage(0, 0, *source_image);
527 
528  // Draw frame #'s on top of image (if needed)
529  if (source_clip->display != FRAME_DISPLAY_NONE) {
530  stringstream frame_number_str;
531  switch (source_clip->display)
532  {
533  case (FRAME_DISPLAY_CLIP):
534  frame_number_str << clip_frame_number;
535  break;
536 
537  case (FRAME_DISPLAY_TIMELINE):
538  frame_number_str << timeline_frame_number;
539  break;
540 
541  case (FRAME_DISPLAY_BOTH):
542  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
543  break;
544  }
545 
546  // Draw frame number on top of image
547  painter.setPen(QColor("#ffffff"));
548  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
549  }
550 
551  painter.end();
552 
553  // Debug output
554  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1, "", -1, "", -1);
555 }
556 
557 // Update the list of 'opened' clips
558 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
559 {
560  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
561 
562  // is clip already in list?
563  bool clip_found = open_clips.count(clip);
564 
565  if (clip_found && !does_clip_intersect)
566  {
567  // Remove clip from 'opened' list, because it's closed now
568  open_clips.erase(clip);
569 
570  // Close clip
571  clip->Close();
572  }
573  else if (!clip_found && does_clip_intersect)
574  {
575  // Add clip to 'opened' list, because it's missing
576  open_clips[clip] = clip;
577 
578  // Open the clip
579  clip->Open();
580  }
581 
582  // Debug output
583  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
584 }
585 
586 // Sort clips by position on the timeline
587 void Timeline::sort_clips()
588 {
589  // Debug output
590  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
591 
592  // sort clips
593  clips.sort(CompareClips());
594 }
595 
596 // Sort effects by position on the timeline
597 void Timeline::sort_effects()
598 {
599  // sort clips
600  effects.sort(CompareEffects());
601 }
602 
603 // Close the reader (and any resources it was consuming)
605 {
606  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
607 
608  // Close all open clips
609  list<Clip*>::iterator clip_itr;
610  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
611  {
612  // Get clip object from the iterator
613  Clip *clip = (*clip_itr);
614 
615  // Open or Close this clip, based on if it's intersecting or not
616  update_open_clips(clip, false);
617  }
618 
619  // Mark timeline as closed
620  is_open = false;
621 
622  // Clear cache
623  final_cache->Clear();
624 }
625 
626 // Open the reader (and start consuming resources)
628 {
629  is_open = true;
630 }
631 
632 // Compare 2 floating point numbers for equality
633 bool Timeline::isEqual(double a, double b)
634 {
635  return fabs(a - b) < 0.000001;
636 }
637 
638 // Get an openshot::Frame object for a specific frame number of this reader.
639 std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
640 {
641  // Adjust out of bounds frame number
642  if (requested_frame < 1)
643  requested_frame = 1;
644 
645  // Check cache
646  std::shared_ptr<Frame> frame;
647  #pragma omp critical (T_GetFrame)
648  frame = final_cache->GetFrame(requested_frame);
649  if (frame) {
650  // Debug output
651  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
652 
653  // Return cached frame
654  return frame;
655  }
656  else
657  {
658  // Create a scoped lock, allowing only a single thread to run the following code at one time
659  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
660 
661  // Check for open reader (or throw exception)
662  if (!is_open)
663  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
664 
665  // Check cache again (due to locking)
666  #pragma omp critical (T_GetFrame)
667  frame = final_cache->GetFrame(requested_frame);
668  if (frame) {
669  // Debug output
670  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
671 
672  // Return cached frame
673  return frame;
674  }
675 
676  // Minimum number of frames to process (for performance reasons)
677  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
678 
679  // Get a list of clips that intersect with the requested section of timeline
680  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
681  vector<Clip*> nearby_clips;
682  #pragma omp critical (T_GetFrame)
683  nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
684 
685  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
686  // Allow nested OpenMP sections
687  omp_set_nested(true);
688 
689  // Debug output
690  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
691 
692  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
693  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
694  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
695  {
696  // Loop through clips
697  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
698  {
699  // Get clip object from the iterator
700  Clip *clip = nearby_clips[clip_index];
701  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
702  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
703 
704  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
705  if (does_clip_intersect)
706  {
707  // Get clip frame #
708  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
709  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
710  // Cache clip object
711  clip->GetFrame(clip_frame_number);
712  }
713  }
714  }
715 
716  #pragma omp parallel
717  {
718  // Loop through all requested frames
719  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
720  for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
721  {
722  // Debug output
723  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
724 
725  // Init some basic properties about this frame
726  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
727 
728  // Create blank frame (which will become the requested frame)
729  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, max_width, max_height, "#000000", samples_in_frame, info.channels));
730  #pragma omp critical (T_GetFrame)
731  {
732  new_frame->AddAudioSilence(samples_in_frame);
733  new_frame->SampleRate(info.sample_rate);
734  new_frame->ChannelsLayout(info.channel_layout);
735  }
736 
737  // Debug output
738  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
739 
740  // Add Background Color to 1st layer (if animated or not black)
741  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
742  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
743  new_frame->AddColor(max_width, max_height, color.GetColorHex(frame_number));
744 
745  // Debug output
746  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1);
747 
748  // Find Clips near this time
749  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
750  {
751  // Get clip object from the iterator
752  Clip *clip = nearby_clips[clip_index];
753  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
754  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
755 
756  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
757 
758  // Debug output
759  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1, "", -1);
760 
761  // Clip is visible
762  if (does_clip_intersect)
763  {
764  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
765  bool is_top_clip = true;
766  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
767  {
768  Clip *nearby_clip = nearby_clips[top_clip_index];
769  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
770  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
771 
772  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
773  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
774  nearby_clip_start_position > clip_start_position) {
775  is_top_clip = false;
776  break;
777  }
778  }
779 
780  // Determine the frame needed for this clip (based on the position on the timeline)
781  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
782  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
783 
784  // Debug output
785  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number, "", -1, "", -1);
786 
787  // Add clip's frame as layer
788  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
789 
790  } else
791  // Debug output
792  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1, "", -1);
793 
794  } // end clip loop
795 
796  // Debug output
797  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
798 
799  // Set frame # on mapped frame
800  #pragma omp critical (T_GetFrame)
801  {
802  new_frame->SetFrameNumber(frame_number);
803 
804  // Add final frame to cache
805  final_cache->Add(new_frame);
806  }
807 
808  } // end frame loop
809  } // end parallel
810 
811  // Debug output
812  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
813 
814  // Return frame (or blank frame)
815  return final_cache->GetFrame(requested_frame);
816  }
817 }
818 
819 
820 // Find intersecting clips (or non intersecting clips)
821 vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include)
822 {
823  // Find matching clips
824  vector<Clip*> matching_clips;
825 
826  // Calculate time of frame
827  float min_requested_frame = requested_frame;
828  float max_requested_frame = requested_frame + (number_of_frames - 1);
829 
830  // Re-Sort Clips (since they likely changed)
831  sort_clips();
832 
833  // Find Clips at this time
834  list<Clip*>::iterator clip_itr;
835  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
836  {
837  // Get clip object from the iterator
838  Clip *clip = (*clip_itr);
839 
840  // Does clip intersect the current requested time
841  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
842  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
843 
844  bool does_clip_intersect =
845  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
846  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
847 
848  // Debug output
849  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect, "", -1);
850 
851  // Open (or schedule for closing) this clip, based on if it's intersecting or not
852  #pragma omp critical (reader_lock)
853  update_open_clips(clip, does_clip_intersect);
854 
855  // Clip is visible
856  if (does_clip_intersect && include)
857  // Add the intersecting clip
858  matching_clips.push_back(clip);
859 
860  else if (!does_clip_intersect && !include)
861  // Add the non-intersecting clip
862  matching_clips.push_back(clip);
863 
864  } // end clip loop
865 
866  // return list
867  return matching_clips;
868 }
869 
870 // Get the cache object used by this reader
871 void Timeline::SetCache(CacheBase* new_cache) {
872  // Set new cache
873  final_cache = new_cache;
874 }
875 
876 // Generate JSON string of this object
877 string Timeline::Json() {
878 
879  // Return formatted string
880  return JsonValue().toStyledString();
881 }
882 
883 // Generate Json::JsonValue for this object
884 Json::Value Timeline::JsonValue() {
885 
886  // Create root json object
887  Json::Value root = ReaderBase::JsonValue(); // get parent properties
888  root["type"] = "Timeline";
889  root["viewport_scale"] = viewport_scale.JsonValue();
890  root["viewport_x"] = viewport_x.JsonValue();
891  root["viewport_y"] = viewport_y.JsonValue();
892  root["color"] = color.JsonValue();
893 
894  // Add array of clips
895  root["clips"] = Json::Value(Json::arrayValue);
896 
897  // Find Clips at this time
898  list<Clip*>::iterator clip_itr;
899  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
900  {
901  // Get clip object from the iterator
902  Clip *existing_clip = (*clip_itr);
903  root["clips"].append(existing_clip->JsonValue());
904  }
905 
906  // Add array of effects
907  root["effects"] = Json::Value(Json::arrayValue);
908 
909  // loop through effects
910  list<EffectBase*>::iterator effect_itr;
911  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
912  {
913  // Get clip object from the iterator
914  EffectBase *existing_effect = (*effect_itr);
915  root["effects"].append(existing_effect->JsonValue());
916  }
917 
918  // return JsonValue
919  return root;
920 }
921 
922 // Load JSON string into this object
923 void Timeline::SetJson(string value) {
924 
925  // Get lock (prevent getting frames while this happens)
926  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
927 
928  // Parse JSON string into JSON objects
929  Json::Value root;
930  Json::Reader reader;
931  bool success = reader.parse( value, root );
932  if (!success)
933  // Raise exception
934  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
935 
936  try
937  {
938  // Set all values that match
939  SetJsonValue(root);
940  }
941  catch (exception e)
942  {
943  // Error parsing JSON (or missing keys)
944  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
945  }
946 }
947 
948 // Load Json::JsonValue into this object
949 void Timeline::SetJsonValue(Json::Value root) {
950 
951  // Close timeline before we do anything (this also removes all open and closing clips)
952  bool was_open = is_open;
953  Close();
954 
955  // Set parent data
957 
958  if (!root["clips"].isNull()) {
959  // Clear existing clips
960  clips.clear();
961 
962  // loop through clips
963  for (int x = 0; x < root["clips"].size(); x++) {
964  // Get each clip
965  Json::Value existing_clip = root["clips"][x];
966 
967  // Create Clip
968  Clip *c = new Clip();
969 
970  // Load Json into Clip
971  c->SetJsonValue(existing_clip);
972 
973  // Add Clip to Timeline
974  AddClip(c);
975  }
976  }
977 
978  if (!root["effects"].isNull()) {
979  // Clear existing effects
980  effects.clear();
981 
982  // loop through effects
983  for (int x = 0; x < root["effects"].size(); x++) {
984  // Get each effect
985  Json::Value existing_effect = root["effects"][x];
986 
987  // Create Effect
988  EffectBase *e = NULL;
989 
990  if (!existing_effect["type"].isNull()) {
991  // Create instance of effect
992  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
993 
994  // Load Json into Effect
995  e->SetJsonValue(existing_effect);
996 
997  // Add Effect to Timeline
998  AddEffect(e);
999  }
1000  }
1001  }
1002 
1003  if (!root["duration"].isNull()) {
1004  // Update duration of timeline
1005  info.duration = root["duration"].asDouble();
1007  }
1008 
1009  // Re-open if needed
1010  if (was_open)
1011  Open();
1012 }
1013 
1014 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1015 void Timeline::ApplyJsonDiff(string value) {
1016 
1017  // Get lock (prevent getting frames while this happens)
1018  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1019 
1020  // Parse JSON string into JSON objects
1021  Json::Value root;
1022  Json::Reader reader;
1023  bool success = reader.parse( value, root );
1024  if (!success || !root.isArray())
1025  // Raise exception
1026  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
1027 
1028  try
1029  {
1030  // Process the JSON change array, loop through each item
1031  for (int x = 0; x < root.size(); x++) {
1032  // Get each change
1033  Json::Value change = root[x];
1034  string root_key = change["key"][(uint)0].asString();
1035 
1036  // Process each type of change
1037  if (root_key == "clips")
1038  // Apply to CLIPS
1039  apply_json_to_clips(change);
1040 
1041  else if (root_key == "effects")
1042  // Apply to EFFECTS
1043  apply_json_to_effects(change);
1044 
1045  else
1046  // Apply to TIMELINE
1047  apply_json_to_timeline(change);
1048 
1049  }
1050  }
1051  catch (exception e)
1052  {
1053  // Error parsing JSON (or missing keys)
1054  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1055  }
1056 }
1057 
1058 // Apply JSON diff to clips
1059 void Timeline::apply_json_to_clips(Json::Value change) {
1060 
1061  // Get key and type of change
1062  string change_type = change["type"].asString();
1063  string clip_id = "";
1064  Clip *existing_clip = NULL;
1065 
1066  // Find id of clip (if any)
1067  for (int x = 0; x < change["key"].size(); x++) {
1068  // Get each change
1069  Json::Value key_part = change["key"][x];
1070 
1071  if (key_part.isObject()) {
1072  // Check for id
1073  if (!key_part["id"].isNull()) {
1074  // Set the id
1075  clip_id = key_part["id"].asString();
1076 
1077  // Find matching clip in timeline (if any)
1078  list<Clip*>::iterator clip_itr;
1079  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1080  {
1081  // Get clip object from the iterator
1082  Clip *c = (*clip_itr);
1083  if (c->Id() == clip_id) {
1084  existing_clip = c;
1085  break; // clip found, exit loop
1086  }
1087  }
1088  break; // id found, exit loop
1089  }
1090  }
1091  }
1092 
1093  // Check for a more specific key (targetting this clip's effects)
1094  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1095  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1096  {
1097  // This change is actually targetting a specific effect under a clip (and not the clip)
1098  Json::Value key_part = change["key"][3];
1099 
1100  if (key_part.isObject()) {
1101  // Check for id
1102  if (!key_part["id"].isNull())
1103  {
1104  // Set the id
1105  string effect_id = key_part["id"].asString();
1106 
1107  // Find matching effect in timeline (if any)
1108  list<EffectBase*> effect_list = existing_clip->Effects();
1109  list<EffectBase*>::iterator effect_itr;
1110  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1111  {
1112  // Get effect object from the iterator
1113  EffectBase *e = (*effect_itr);
1114  if (e->Id() == effect_id) {
1115  // Apply the change to the effect directly
1116  apply_json_to_effects(change, e);
1117 
1118  // Calculate start and end frames that this impacts, and remove those frames from the cache
1119  int64_t new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1120  int64_t new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1121  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1122 
1123  return; // effect found, don't update clip
1124  }
1125  }
1126  }
1127  }
1128  }
1129 
1130  // Calculate start and end frames that this impacts, and remove those frames from the cache
1131  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1132  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1133  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1134  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1135  }
1136 
1137  // Determine type of change operation
1138  if (change_type == "insert") {
1139 
1140  // Create new clip
1141  Clip *clip = new Clip();
1142  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1143  AddClip(clip); // Add clip to timeline
1144 
1145  } else if (change_type == "update") {
1146 
1147  // Update existing clip
1148  if (existing_clip) {
1149 
1150  // Calculate start and end frames that this impacts, and remove those frames from the cache
1151  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1152  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1153  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1154 
1155  // Remove cache on clip's Reader (if found)
1156  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1157  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1158 
1159  // Update clip properties from JSON
1160  existing_clip->SetJsonValue(change["value"]);
1161 
1162  // Clear any cached image sizes (since size might have changed)
1163  existing_clip->SetMaxSize(0, 0); // force clearing of cached image size
1164  if (existing_clip->Reader()) {
1165  existing_clip->Reader()->SetMaxSize(0, 0);
1166  if (existing_clip->Reader()->Name() == "FrameMapper") {
1167  FrameMapper *nested_reader = (FrameMapper *) existing_clip->Reader();
1168  if (nested_reader->Reader())
1169  nested_reader->Reader()->SetMaxSize(0, 0);
1170  }
1171  }
1172  }
1173 
1174  } else if (change_type == "delete") {
1175 
1176  // Remove existing clip
1177  if (existing_clip) {
1178 
1179  // Calculate start and end frames that this impacts, and remove those frames from the cache
1180  int64_t old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1181  int64_t old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1182  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1183 
1184  // Remove clip from timeline
1185  RemoveClip(existing_clip);
1186  }
1187 
1188  }
1189 
1190 }
1191 
1192 // Apply JSON diff to effects
1193 void Timeline::apply_json_to_effects(Json::Value change) {
1194 
1195  // Get key and type of change
1196  string change_type = change["type"].asString();
1197  EffectBase *existing_effect = NULL;
1198 
1199  // Find id of an effect (if any)
1200  for (int x = 0; x < change["key"].size(); x++) {
1201  // Get each change
1202  Json::Value key_part = change["key"][x];
1203 
1204  if (key_part.isObject()) {
1205  // Check for id
1206  if (!key_part["id"].isNull())
1207  {
1208  // Set the id
1209  string effect_id = key_part["id"].asString();
1210 
1211  // Find matching effect in timeline (if any)
1212  list<EffectBase*>::iterator effect_itr;
1213  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1214  {
1215  // Get effect object from the iterator
1216  EffectBase *e = (*effect_itr);
1217  if (e->Id() == effect_id) {
1218  existing_effect = e;
1219  break; // effect found, exit loop
1220  }
1221  }
1222  break; // id found, exit loop
1223  }
1224  }
1225  }
1226 
1227  // Now that we found the effect, apply the change to it
1228  if (existing_effect || change_type == "insert")
1229  // Apply change to effect
1230  apply_json_to_effects(change, existing_effect);
1231 }
1232 
1233 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1234 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) {
1235 
1236  // Get key and type of change
1237  string change_type = change["type"].asString();
1238 
1239  // Calculate start and end frames that this impacts, and remove those frames from the cache
1240  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1241  int64_t new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1242  int64_t new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1243  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1244  }
1245 
1246  // Determine type of change operation
1247  if (change_type == "insert") {
1248 
1249  // Determine type of effect
1250  string effect_type = change["value"]["type"].asString();
1251 
1252  // Create Effect
1253  EffectBase *e = NULL;
1254 
1255  // Init the matching effect object
1256  e = EffectInfo().CreateEffect(effect_type);
1257 
1258  // Load Json into Effect
1259  e->SetJsonValue(change["value"]);
1260 
1261  // Add Effect to Timeline
1262  AddEffect(e);
1263 
1264  } else if (change_type == "update") {
1265 
1266  // Update existing effect
1267  if (existing_effect) {
1268 
1269  // Calculate start and end frames that this impacts, and remove those frames from the cache
1270  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1271  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1272  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1273 
1274  // Update effect properties from JSON
1275  existing_effect->SetJsonValue(change["value"]);
1276  }
1277 
1278  } else if (change_type == "delete") {
1279 
1280  // Remove existing effect
1281  if (existing_effect) {
1282 
1283  // Calculate start and end frames that this impacts, and remove those frames from the cache
1284  int64_t old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1285  int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1286  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1287 
1288  // Remove effect from timeline
1289  RemoveEffect(existing_effect);
1290  }
1291 
1292  }
1293 }
1294 
1295 // Apply JSON diff to timeline properties
1296 void Timeline::apply_json_to_timeline(Json::Value change) {
1297 
1298  // Get key and type of change
1299  string change_type = change["type"].asString();
1300  string root_key = change["key"][(uint)0].asString();
1301  string sub_key = "";
1302  if (change["key"].size() >= 2)
1303  sub_key = change["key"][(uint)1].asString();
1304 
1305  // Clear entire cache
1306  final_cache->Clear();
1307 
1308  // Determine type of change operation
1309  if (change_type == "insert" || change_type == "update") {
1310 
1311  // INSERT / UPDATE
1312  // Check for valid property
1313  if (root_key == "color")
1314  // Set color
1315  color.SetJsonValue(change["value"]);
1316  else if (root_key == "viewport_scale")
1317  // Set viewport scale
1318  viewport_scale.SetJsonValue(change["value"]);
1319  else if (root_key == "viewport_x")
1320  // Set viewport x offset
1321  viewport_x.SetJsonValue(change["value"]);
1322  else if (root_key == "viewport_y")
1323  // Set viewport y offset
1324  viewport_y.SetJsonValue(change["value"]);
1325  else if (root_key == "duration") {
1326  // Update duration of timeline
1327  info.duration = change["value"].asDouble();
1329  }
1330  else if (root_key == "width")
1331  // Set width
1332  info.width = change["value"].asInt();
1333  else if (root_key == "height")
1334  // Set height
1335  info.height = change["value"].asInt();
1336  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1337  // Set fps fraction
1338  if (!change["value"]["num"].isNull())
1339  info.fps.num = change["value"]["num"].asInt();
1340  if (!change["value"]["den"].isNull())
1341  info.fps.den = change["value"]["den"].asInt();
1342  }
1343  else if (root_key == "fps" && sub_key == "num")
1344  // Set fps.num
1345  info.fps.num = change["value"].asInt();
1346  else if (root_key == "fps" && sub_key == "den")
1347  // Set fps.den
1348  info.fps.den = change["value"].asInt();
1349  else if (root_key == "sample_rate")
1350  // Set sample rate
1351  info.sample_rate = change["value"].asInt();
1352  else if (root_key == "channels")
1353  // Set channels
1354  info.channels = change["value"].asInt();
1355  else if (root_key == "channel_layout")
1356  // Set channel layout
1357  info.channel_layout = (ChannelLayout) change["value"].asInt();
1358 
1359  else
1360 
1361  // Error parsing JSON (or missing keys)
1362  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1363 
1364 
1365  } else if (change["type"].asString() == "delete") {
1366 
1367  // DELETE / RESET
1368  // Reset the following properties (since we can't delete them)
1369  if (root_key == "color") {
1370  color = Color();
1371  color.red = Keyframe(0.0);
1372  color.green = Keyframe(0.0);
1373  color.blue = Keyframe(0.0);
1374  }
1375  else if (root_key == "viewport_scale")
1376  viewport_scale = Keyframe(1.0);
1377  else if (root_key == "viewport_x")
1378  viewport_x = Keyframe(0.0);
1379  else if (root_key == "viewport_y")
1380  viewport_y = Keyframe(0.0);
1381  else
1382  // Error parsing JSON (or missing keys)
1383  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1384 
1385  }
1386 
1387 }
1388 
1389 // Clear all caches
1391 
1392  // Get lock (prevent getting frames while this happens)
1393  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1394 
1395  // Clear primary cache
1396  final_cache->Clear();
1397 
1398  // Loop through all clips
1399  list<Clip*>::iterator clip_itr;
1400  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1401  {
1402  // Get clip object from the iterator
1403  Clip *clip = (*clip_itr);
1404 
1405  // Clear cache on clip
1406  clip->Reader()->GetCache()->Clear();
1407 
1408  // Clear nested Reader (if any)
1409  if (clip->Reader()->Name() == "FrameMapper") {
1410  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1411  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1412  nested_reader->Reader()->GetCache()->Clear();
1413  }
1414 
1415  }
1416 }
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:949
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ReaderBase.h:103
Display the timeline&#39;s frame number.
Definition: Enums.h:69
void Close()
Close the internal reader.
Definition: Clip.cpp:222
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:877
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:220
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:70
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:99
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:249
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:871
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:230
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, int64_t frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:884
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:251
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
Do not display the frame number.
Definition: Enums.h:67
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:258
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:233
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:815
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:222
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:143
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
virtual std::shared_ptr< Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:243
bool Waveform()
Waveform property.
Definition: Clip.h:215
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ClipBase.h:97
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
string Id()
Get basic properties.
Definition: ClipBase.h:82
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:255
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1390
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:145
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:188
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:178
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:154
std::shared_ptr< Frame > GetFrame(int64_t requested_frame)
Definition: Timeline.cpp:639
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:256
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:71
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:106
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:604
int GetInt(int64_t index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:226
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:155
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip&#39;s internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:244
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:205
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:627
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1015
double GetValue(int64_t index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:70
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:102
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:96
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:225
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:250
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:219
Color color
Background color of timeline canvas.
Definition: Timeline.h:254
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:923
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:86
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ReaderBase.h:102
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:505
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:735
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254
void SetTimelineFrameOffset(int64_t offset)