1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @ref License
7  */
8 
9 /* LICENSE
10  *
11  * Copyright (c) 2008-2019 OpenShot Studios, LLC
12  * <http://www.openshotstudios.com/>. This file is part of
13  * OpenShot Library (libopenshot), an open-source project dedicated to
14  * delivering high quality video editing and animation solutions to the
15  * world. For more information visit <http://www.openshot.org/>.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "Clip.h"
32 #include "Exceptions.h"
33 #include "FFmpegReader.h"
34 #include "FrameMapper.h"
35 #ifdef USE_IMAGEMAGICK
36 	#include "ImageReader.h"
37 	#include "TextReader.h"
38 #endif
39 #include "QtImageReader.h"
40 #include "ChunkReader.h"
41 #include "DummyReader.h"
42 #include "Timeline.h"
43 
44 using namespace openshot;
45 
46 // Init default settings for a clip
init_settings()47 void Clip::init_settings()
48 {
49 	// Init clip settings
50 	Position(0.0);
51 	Layer(0);
52 	Start(0.0);
53 	End(0.0);
54 	gravity = GRAVITY_CENTER;
55 	scale = SCALE_FIT;
56 	anchor = ANCHOR_CANVAS;
57 	display = FRAME_DISPLAY_NONE;
58 	mixing = VOLUME_MIX_NONE;
59 	waveform = false;
60 	previous_properties = "";
61 	parentObjectId = "";
62 
63 	// Init scale curves
64 	scale_x = Keyframe(1.0);
65 	scale_y = Keyframe(1.0);
66 
67 	// Init location curves
68 	location_x = Keyframe(0.0);
69 	location_y = Keyframe(0.0);
70 
71 	// Init alpha
72 	alpha = Keyframe(1.0);
73 
74 	// Init time & volume
75 	time = Keyframe(1.0);
76 	volume = Keyframe(1.0);
77 
78 	// Init audio waveform color
79 	wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
80 
81 	// Init shear and perspective curves
82 	shear_x = Keyframe(0.0);
83 	shear_y = Keyframe(0.0);
84 	origin_x = Keyframe(0.5);
85 	origin_y = Keyframe(0.5);
86 	perspective_c1_x = Keyframe(-1.0);
87 	perspective_c1_y = Keyframe(-1.0);
88 	perspective_c2_x = Keyframe(-1.0);
89 	perspective_c2_y = Keyframe(-1.0);
90 	perspective_c3_x = Keyframe(-1.0);
91 	perspective_c3_y = Keyframe(-1.0);
92 	perspective_c4_x = Keyframe(-1.0);
93 	perspective_c4_y = Keyframe(-1.0);
94 
95 	// Init audio channel filter and mappings
96 	channel_filter = Keyframe(-1.0);
97 	channel_mapping = Keyframe(-1.0);
98 
99 	// Init audio and video overrides
100 	has_audio = Keyframe(-1.0);
101 	has_video = Keyframe(-1.0);
102 
103 	// Initialize the attached object and attached clip as null pointers
104 	parentTrackedObject = nullptr;
105 	parentClipObject = NULL;
106 
107 	// Init reader info struct
108 	init_reader_settings();
109 }
110 
111 // Init reader info details
init_reader_settings()112 void Clip::init_reader_settings() {
113 	if (reader) {
114 		// Init rotation (if any)
115 		init_reader_rotation();
116 
117 		// Initialize info struct
118 		info = reader->info;
119 	}
120 }
121 
122 // Init reader's rotation (if any)
init_reader_rotation()123 void Clip::init_reader_rotation() {
124 	// Dont init rotation if clip has keyframes
125 	if (rotation.GetCount() > 0)
126 		return;
127 
128 	// Init rotation
129 	if (reader && reader->info.metadata.count("rotate") > 0) {
130 		// Use reader metadata rotation (if any)
131 		// This is typical with cell phone videos filmed in different orientations
132 		try {
133 			float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
134 			rotation = Keyframe(rotate_metadata);
135 		} catch (const std::exception& e) {}
136 	}
137 	else
138 		// Default no rotation
139 		rotation = Keyframe(0.0);
140 }
141 
142 // Default Constructor for a clip
Clip()143 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
144 {
145 	// Init all default settings
146 	init_settings();
147 }
148 
149 // Constructor with reader
Clip(ReaderBase * new_reader)150 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
151 {
152 	// Init all default settings
153 	init_settings();
154 
155 	// Open and Close the reader (to set the duration of the clip)
156 	Open();
157 	Close();
158 
159 	// Update duration and set parent
160 	if (reader) {
161 		End(reader->info.duration);
162 		reader->ParentClip(this);
163 		// Init reader info struct
164 		init_reader_settings();
165 	}
166 }
167 
168 // Constructor with filepath
Clip(std::string path)169 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
170 {
171 	// Init all default settings
172 	init_settings();
173 
174 	// Get file extension (and convert to lower case)
175 	std::string ext = get_file_extension(path);
176 	std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
177 
178 	// Determine if common video formats
179 	if (ext=="avi" || ext=="mov" || ext=="mkv" ||  ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
180 		ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
181 	{
182 		try
183 		{
184 			// Open common video format
185 			reader = new openshot::FFmpegReader(path);
186 
187 		} catch(...) { }
188 	}
189 	if (ext=="osp")
190 	{
191 		try
192 		{
193 			// Open common video format
194 			reader = new openshot::Timeline(path, true);
195 
196 		} catch(...) { }
197 	}
198 
199 
200 	// If no video found, try each reader
201 	if (!reader)
202 	{
203 		try
204 		{
205 			// Try an image reader
206 			reader = new openshot::QtImageReader(path);
207 
208 		} catch(...) {
209 			try
210 			{
211 				// Try a video reader
212 				reader = new openshot::FFmpegReader(path);
213 
214 			} catch(...) { }
215 		}
216 	}
217 
218 	// Update duration and set parent
219 	if (reader) {
220 		End(reader->info.duration);
221 		reader->ParentClip(this);
222 		allocated_reader = reader;
223 		// Init reader info struct
224 		init_reader_settings();
225 	}
226 }
227 
228 // Destructor
~Clip()229 Clip::~Clip()
230 {
231 	// Delete the reader if clip created it
232 	if (allocated_reader) {
233 		delete allocated_reader;
234 		allocated_reader = NULL;
235 	}
236 
237 	// Close the resampler
238 	if (resampler) {
239 		delete resampler;
240 		resampler = NULL;
241 	}
242 }
243 
244 // Attach clip to bounding box
AttachToObject(std::string object_id)245 void Clip::AttachToObject(std::string object_id)
246 {
247 	// Search for the tracked object on the timeline
248 	Timeline* parentTimeline = (Timeline *) ParentTimeline();
249 
250 	if (parentTimeline) {
251 		// Create a smart pointer to the tracked object from the timeline
252 		std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
253 		Clip* clipObject = parentTimeline->GetClip(object_id);
254 
255 		// Check for valid tracked object
256 		if (trackedObject){
257 			SetAttachedObject(trackedObject);
258 		}
259 		else if (clipObject) {
260 			SetAttachedClip(clipObject);
261 		}
262 	}
263 	return;
264 }
265 
266 // Set the pointer to the trackedObject this clip is attached to
SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject)267 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
268 	parentTrackedObject = trackedObject;
269 	return;
270 }
271 
272 // Set the pointer to the clip this clip is attached to
SetAttachedClip(Clip * clipObject)273 void Clip::SetAttachedClip(Clip* clipObject){
274 	parentClipObject = clipObject;
275 	return;
276 }
277 
278 /// Set the current reader
Reader(ReaderBase * new_reader)279 void Clip::Reader(ReaderBase* new_reader)
280 {
281 	// set reader pointer
282 	reader = new_reader;
283 
284 	// set parent
285 	reader->ParentClip(this);
286 
287 	// Init reader info struct
288 	init_reader_settings();
289 }
290 
291 /// Get the current reader
Reader()292 ReaderBase* Clip::Reader()
293 {
294 	if (reader)
295 		return reader;
296 	else
297 		// Throw error if reader not initialized
298 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
299 }
300 
301 // Open the internal reader
Open()302 void Clip::Open()
303 {
304 	if (reader)
305 	{
306 		// Open the reader
307 		reader->Open();
308 		is_open = true;
309 
310 		// Copy Reader info to Clip
311 		info = reader->info;
312 
313 		// Set some clip properties from the file reader
314 		if (end == 0.0)
315 			End(reader->info.duration);
316 	}
317 	else
318 		// Throw error if reader not initialized
319 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
320 }
321 
322 // Close the internal reader
Close()323 void Clip::Close()
324 {
325 	is_open = false;
326 	if (reader) {
327 		ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
328 
329 		// Close the reader
330 		reader->Close();
331 	}
332 	else
333 		// Throw error if reader not initialized
334 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
335 }
336 
337 // Get end position of clip (trim end of video), which can be affected by the time curve.
End() const338 float Clip::End() const
339 {
340 	// if a time curve is present, use its length
341 	if (time.GetCount() > 1)
342 	{
343 		// Determine the FPS fo this clip
344 		float fps = 24.0;
345 		if (reader)
346 			// file reader
347 			fps = reader->info.fps.ToFloat();
348 		else
349 			// Throw error if reader not initialized
350 			throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
351 
352 		return float(time.GetLength()) / fps;
353 	}
354 	else
355 		// just use the duration (as detected by the reader)
356 		return end;
357 }
358 
359 // Create an openshot::Frame object for a specific frame number of this reader.
GetFrame(int64_t frame_number)360 std::shared_ptr<Frame> Clip::GetFrame(int64_t frame_number)
361 {
362 	// Check for open reader (or throw exception)
363 	if (!is_open)
364 		throw ReaderClosed("The Clip is closed.  Call Open() before calling this method.");
365 
366 	if (reader)
367 	{
368 		// Adjust out of bounds frame number
369 		frame_number = adjust_frame_number_minimum(frame_number);
370 
371 		// Get the original frame and pass it to GetFrame overload
372 		std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
373 		return GetFrame(original_frame, frame_number, NULL);
374 	}
375 	else
376 		// Throw error if reader not initialized
377 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
378 }
379 
380 // Create an openshot::Frame object for a specific frame number of this reader.
GetFrame(std::shared_ptr<openshot::Frame> background_frame,int64_t frame_number)381 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
382 {
383     // Check for open reader (or throw exception)
384     if (!is_open)
385         throw ReaderClosed("The Clip is closed.  Call Open() before calling this method.");
386 
387     if (reader)
388     {
389         // Adjust out of bounds frame number
390         frame_number = adjust_frame_number_minimum(frame_number);
391 
392         // Get the original frame and pass it to GetFrame overload
393         std::shared_ptr<Frame> original_frame = GetOrCreateFrame(frame_number);
394         return GetFrame(original_frame, frame_number, NULL);
395     }
396     else
397         // Throw error if reader not initialized
398         throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
399 }
400 
401 // Use an existing openshot::Frame object and draw this Clip's frame onto it
GetFrame(std::shared_ptr<openshot::Frame> background_frame,int64_t frame_number,openshot::TimelineInfoStruct * options)402 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number, openshot::TimelineInfoStruct* options)
403 {
404 	// Check for open reader (or throw exception)
405 	if (!is_open)
406 		throw ReaderClosed("The Clip is closed.  Call Open() before calling this method.");
407 
408 	if (reader)
409 	{
410 		// Adjust out of bounds frame number
411 		frame_number = adjust_frame_number_minimum(frame_number);
412 
413 		// Is a time map detected
414 		int64_t new_frame_number = frame_number;
415 		int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(frame_number));
416 		if (time.GetLength() > 1)
417 			new_frame_number = time_mapped_number;
418 
419 		// Now that we have re-mapped what frame number is needed, go and get the frame pointer
420 		std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
421 
422 		// Get time mapped frame number (used to increase speed, change direction, etc...)
423 		// TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
424 		get_time_mapped_frame(original_frame, new_frame_number);
425 
426 		// Apply local effects to the frame (if any)
427 		apply_effects(original_frame);
428 
429         // Apply global timeline effects (i.e. transitions & masks... if any)
430         if (timeline != NULL && options != NULL) {
431             if (options->is_top_clip) {
432                 // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
433                 Timeline* timeline_instance = (Timeline*) timeline;
434                 original_frame = timeline_instance->apply_effects(original_frame, background_frame->number, Layer());
435             }
436         }
437 
438 		// Apply keyframe / transforms
439 		apply_keyframes(original_frame, background_frame->GetImage());
440 
441 		// Return processed 'frame'
442 		return original_frame;
443 	}
444 	else
445 		// Throw error if reader not initialized
446 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
447 }
448 
449 // Look up an effect by ID
GetEffect(const std::string & id)450 openshot::EffectBase* Clip::GetEffect(const std::string& id)
451 {
452 	// Find the matching effect (if any)
453 	for (const auto& effect : effects) {
454 		if (effect->Id() == id) {
455 			return effect;
456 		}
457 	}
458 	return nullptr;
459 }
460 
461 // Get file extension
get_file_extension(std::string path)462 std::string Clip::get_file_extension(std::string path)
463 {
464 	// return last part of path
465 	return path.substr(path.find_last_of(".") + 1);
466 }
467 
468 // Reverse an audio buffer
reverse_buffer(juce::AudioSampleBuffer * buffer)469 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
470 {
471 	int number_of_samples = buffer->getNumSamples();
472 	int channels = buffer->getNumChannels();
473 
474 	// Reverse array (create new buffer to hold the reversed version)
475 	juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
476 	reversed->clear();
477 
478 	for (int channel = 0; channel < channels; channel++)
479 	{
480 		int n=0;
481 		for (int s = number_of_samples - 1; s >= 0; s--, n++)
482 			reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
483 	}
484 
485 	// Copy the samples back to the original array
486 	buffer->clear();
487 	// Loop through channels, and get audio samples
488 	for (int channel = 0; channel < channels; channel++)
489 		// Get the audio samples for this channel
490 		buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
491 
492 	delete reversed;
493 	reversed = NULL;
494 }
495 
496 // Adjust the audio and image of a time mapped frame
get_time_mapped_frame(std::shared_ptr<Frame> frame,int64_t frame_number)497 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
498 {
499 	// Check for valid reader
500 	if (!reader)
501 		// Throw error if reader not initialized
502 		throw ReaderClosed("No Reader has been initialized for this Clip.  Call Reader(*reader) before calling this method.");
503 
504 	// Check for a valid time map curve
505 	if (time.GetLength() > 1)
506 	{
507 		const GenericScopedLock<juce::CriticalSection> lock(getFrameCriticalSection);
508 
509 		// create buffer and resampler
510 		juce::AudioSampleBuffer *samples = NULL;
511 		if (!resampler)
512 			resampler = new AudioResampler();
513 
514 		// Get new frame number
515 		int new_frame_number = frame->number;
516 
517 		// Get delta (difference in previous Y value)
518 		int delta = int(round(time.GetDelta(frame_number)));
519 
520 		// Init audio vars
521 		int channels = reader->info.channels;
522 		int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
523 
524 		// Only resample audio if needed
525 		if (reader->info.has_audio) {
526 			// Determine if we are speeding up or slowing down
527 			if (time.GetRepeatFraction(frame_number).den > 1) {
528 				// SLOWING DOWN AUDIO
529 				// Resample data, and return new buffer pointer
530 				juce::AudioSampleBuffer *resampled_buffer = NULL;
531 
532 				// SLOW DOWN audio (split audio)
533 				samples = new juce::AudioSampleBuffer(channels, number_of_samples);
534 				samples->clear();
535 
536 				// Loop through channels, and get audio samples
537 				for (int channel = 0; channel < channels; channel++)
538 					// Get the audio samples for this channel
539 					samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
540 									 number_of_samples, 1.0f);
541 
542 				// Reverse the samples (if needed)
543 				if (!time.IsIncreasing(frame_number))
544 					reverse_buffer(samples);
545 
546 				// Resample audio to be X times slower (where X is the denominator of the repeat fraction)
547 				resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
548 
549 				// Resample the data (since it's the 1st slice)
550 				resampled_buffer = resampler->GetResampledBuffer();
551 
552 				// Just take the samples we need for the requested frame
553 				int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
554 				if (start > 0)
555 					start -= 1;
556 				for (int channel = 0; channel < channels; channel++)
557 					// Add new (slower) samples, to the frame object
558 					frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
559 										number_of_samples, 1.0f);
560 
561 				// Clean up
562 				resampled_buffer = NULL;
563 
564 			}
565 			else if (abs(delta) > 1 && abs(delta) < 100) {
566 				int start = 0;
567 				if (delta > 0) {
568 					// SPEED UP (multiple frames of audio), as long as it's not more than X frames
569 					int total_delta_samples = 0;
570 					for (int delta_frame = new_frame_number - (delta - 1);
571 						 delta_frame <= new_frame_number; delta_frame++)
572 						total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
573 																		 reader->info.sample_rate,
574 																		 reader->info.channels);
575 
576 					// Allocate a new sample buffer for these delta frames
577 					samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
578 					samples->clear();
579 
580 					// Loop through each frame in this delta
581 					for (int delta_frame = new_frame_number - (delta - 1);
582 						 delta_frame <= new_frame_number; delta_frame++) {
583 						// buffer to hold detal samples
584 						int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
585 						juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
586 																					   number_of_delta_samples);
587 						delta_samples->clear();
588 
589 						for (int channel = 0; channel < channels; channel++)
590 							delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
591 												   number_of_delta_samples, 1.0f);
592 
593 						// Reverse the samples (if needed)
594 						if (!time.IsIncreasing(frame_number))
595 							reverse_buffer(delta_samples);
596 
597 						// Copy the samples to
598 						for (int channel = 0; channel < channels; channel++)
599 							// Get the audio samples for this channel
600 							samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
601 											 number_of_delta_samples, 1.0f);
602 
603 						// Clean up
604 						delete delta_samples;
605 						delta_samples = NULL;
606 
607 						// Increment start position
608 						start += number_of_delta_samples;
609 					}
610 				}
611 				else {
612 					// SPEED UP (multiple frames of audio), as long as it's not more than X frames
613 					int total_delta_samples = 0;
614 					for (int delta_frame = new_frame_number - (delta + 1);
615 						 delta_frame >= new_frame_number; delta_frame--)
616 						total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
617 																		 reader->info.sample_rate,
618 																		 reader->info.channels);
619 
620 					// Allocate a new sample buffer for these delta frames
621 					samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
622 					samples->clear();
623 
624 					// Loop through each frame in this delta
625 					for (int delta_frame = new_frame_number - (delta + 1);
626 						 delta_frame >= new_frame_number; delta_frame--) {
627 						// buffer to hold delta samples
628 						int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
629 						juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
630 																					   number_of_delta_samples);
631 						delta_samples->clear();
632 
633 						for (int channel = 0; channel < channels; channel++)
634 							delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
635 												   number_of_delta_samples, 1.0f);
636 
637 						// Reverse the samples (if needed)
638 						if (!time.IsIncreasing(frame_number))
639 							reverse_buffer(delta_samples);
640 
641 						// Copy the samples to
642 						for (int channel = 0; channel < channels; channel++)
643 							// Get the audio samples for this channel
644 							samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
645 											 number_of_delta_samples, 1.0f);
646 
647 						// Clean up
648 						delete delta_samples;
649 						delta_samples = NULL;
650 
651 						// Increment start position
652 						start += number_of_delta_samples;
653 					}
654 				}
655 
656 				// Resample audio to be X times faster (where X is the delta of the repeat fraction)
657 				resampler->SetBuffer(samples, float(start) / float(number_of_samples));
658 
659 				// Resample data, and return new buffer pointer
660 				juce::AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
661 
662 				// Add the newly resized audio samples to the current frame
663 				for (int channel = 0; channel < channels; channel++)
664 					// Add new (slower) samples, to the frame object
665 					frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
666 
667 				// Clean up
668 				buffer = NULL;
669 			}
670 			else {
671 				// Use the samples on this frame (but maybe reverse them if needed)
672 				samples = new juce::AudioSampleBuffer(channels, number_of_samples);
673 				samples->clear();
674 
675 				// Loop through channels, and get audio samples
676 				for (int channel = 0; channel < channels; channel++)
677 					// Get the audio samples for this channel
678 					samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
679 
680 				// reverse the samples
681 				if (!time.IsIncreasing(frame_number))
682 					reverse_buffer(samples);
683 
684 				// Add reversed samples to the frame object
685 				for (int channel = 0; channel < channels; channel++)
686 					frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
687 
688 
689 			}
690 
691 			delete samples;
692 			samples = NULL;
693 		}
694 	}
695 }
696 
697 // Adjust frame number minimum value
adjust_frame_number_minimum(int64_t frame_number)698 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
699 {
700 	// Never return a frame number 0 or below
701 	if (frame_number < 1)
702 		return 1;
703 	else
704 		return frame_number;
705 
706 }
707 
708 // Get or generate a blank frame
GetOrCreateFrame(int64_t number)709 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
710 {
711 	try {
712 		// Debug output
713 		ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number);
714 
715 		// Attempt to get a frame (but this could fail if a reader has just been closed)
716 		auto reader_frame = reader->GetFrame(number);
717 
718 		// Return real frame
719 		if (reader_frame) {
720 			// Create a new copy of reader frame
721 			// This allows a clip to modify the pixels and audio of this frame without
722 			// changing the underlying reader's frame data
723 			auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
724 			reader_copy->SampleRate(reader_frame->SampleRate());
725 			reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
726 			return reader_copy;
727 		}
728 
729 	} catch (const ReaderClosed & e) {
730 		// ...
731 	} catch (const OutOfBoundsFrame & e) {
732 		// ...
733 	}
734 
735 	// Estimate # of samples needed for this frame
736 	int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
737 
738 	// Debug output
739 	ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "estimated_samples_in_frame", estimated_samples_in_frame);
740 
741 	// Create blank frame
742 	auto new_frame = std::make_shared<Frame>(
743 		number, reader->info.width, reader->info.height,
744 		"#000000", estimated_samples_in_frame, reader->info.channels);
745 	new_frame->SampleRate(reader->info.sample_rate);
746 	new_frame->ChannelsLayout(reader->info.channel_layout);
747 	new_frame->AddAudioSilence(estimated_samples_in_frame);
748 	return new_frame;
749 }
750 
751 // Generate JSON string of this object
Json() const752 std::string Clip::Json() const {
753 
754 	// Return formatted string
755 	return JsonValue().toStyledString();
756 }
757 
758 // Get all properties for a specific frame
PropertiesJSON(int64_t requested_frame) const759 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
760 
761 	// Generate JSON properties list
762 	Json::Value root;
763 	root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
764 	root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
765 	root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
766 	root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
767 	root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
768 	root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
769 	root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
770 	root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
771 	root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
772 	root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
773 	root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
774 	if (!parentObjectId.empty()) {
775 		root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
776 	} else {
777 		root["parentObjectId"] = add_property_json("Parent", 0.0, "string", "", NULL, -1, -1, false, requested_frame);
778 	}
779 	// Add gravity choices (dropdown style)
780 	root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
781 	root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
782 	root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
783 	root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
784 	root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
785 	root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
786 	root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
787 	root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
788 	root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
789 
790 	// Add scale choices (dropdown style)
791 	root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
792 	root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
793 	root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
794 	root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
795 
796 	// Add frame number display choices (dropdown style)
797 	root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
798 	root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
799 	root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
800 	root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
801 
802 	// Add volume mixing choices (dropdown style)
803 	root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
804 	root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
805 	root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
806 
807 	// Add waveform choices (dropdown style)
808 	root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
809 	root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
810 
811 	// Add the parentTrackedObject's properties
812 	if (parentTrackedObject)
813 	{
814 		// Convert Clip's frame position to Timeline's frame position
815 		long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
816 		long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
817 		double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
818 
819 		// Get attached object's parent clip properties
820 		std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
821 		double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
822 		// Get attached object properties
823 		std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
824 
825 		// Correct the parent Tracked Object properties by the clip's reference system
826 		float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
827 		float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
828 		float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
829 		float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
830 		float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
831 
832 		// Add the parent Tracked Object properties to JSON
833 		root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
834 		root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
835 		root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
836 		root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
837 		root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
838 		root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
839 		root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
840 	}
841 	// Add the parentClipObject's properties
842 	else if (parentClipObject)
843 	{
844 		// Convert Clip's frame position to Timeline's frame position
845 		long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
846 		long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
847 		double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
848 
849 		// Correct the parent Clip Object properties by the clip's reference system
850 		float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
851 		float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
852 		float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
853 		float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
854 		float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
855 		float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
856 		float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
857 
858 		// Add the parent Clip Object properties to JSON
859 		root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
860 		root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
861 		root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
862 		root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
863 		root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
864 		root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
865 		root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
866 	}
867 	else
868 	{
869 		// Add this own clip's properties to JSON
870 		root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
871 		root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
872 		root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
873 		root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
874 		root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
875 		root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
876 		root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
877 	}
878 
879 	// Keyframes
880 	root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
881 	root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
882 	root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
883 	root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
884 	root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
885 	root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
886 	root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
887 	root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
888 	root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
889 
890 	// Add enable audio/video choices (dropdown style)
891 	root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
892 	root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
893 	root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
894 	root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
895 	root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
896 	root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
897 
898 	root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
899 	root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
900 	root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
901 	root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
902 
903 
904 	// Return formatted string
905 	return root.toStyledString();
906 }
907 
908 // Generate Json::Value for this object
JsonValue() const909 Json::Value Clip::JsonValue() const {
910 
911 	// Create root json object
912 	Json::Value root = ClipBase::JsonValue(); // get parent properties
913 	root["parentObjectId"] = parentObjectId;
914 	root["gravity"] = gravity;
915 	root["scale"] = scale;
916 	root["anchor"] = anchor;
917 	root["display"] = display;
918 	root["mixing"] = mixing;
919 	root["waveform"] = waveform;
920 	root["scale_x"] = scale_x.JsonValue();
921 	root["scale_y"] = scale_y.JsonValue();
922 	root["location_x"] = location_x.JsonValue();
923 	root["location_y"] = location_y.JsonValue();
924 	root["alpha"] = alpha.JsonValue();
925 	root["rotation"] = rotation.JsonValue();
926 	root["time"] = time.JsonValue();
927 	root["volume"] = volume.JsonValue();
928 	root["wave_color"] = wave_color.JsonValue();
929 	root["shear_x"] = shear_x.JsonValue();
930 	root["shear_y"] = shear_y.JsonValue();
931 	root["origin_x"] = origin_x.JsonValue();
932 	root["origin_y"] = origin_y.JsonValue();
933 	root["channel_filter"] = channel_filter.JsonValue();
934 	root["channel_mapping"] = channel_mapping.JsonValue();
935 	root["has_audio"] = has_audio.JsonValue();
936 	root["has_video"] = has_video.JsonValue();
937 	root["perspective_c1_x"] = perspective_c1_x.JsonValue();
938 	root["perspective_c1_y"] = perspective_c1_y.JsonValue();
939 	root["perspective_c2_x"] = perspective_c2_x.JsonValue();
940 	root["perspective_c2_y"] = perspective_c2_y.JsonValue();
941 	root["perspective_c3_x"] = perspective_c3_x.JsonValue();
942 	root["perspective_c3_y"] = perspective_c3_y.JsonValue();
943 	root["perspective_c4_x"] = perspective_c4_x.JsonValue();
944 	root["perspective_c4_y"] = perspective_c4_y.JsonValue();
945 
946 	// Add array of effects
947 	root["effects"] = Json::Value(Json::arrayValue);
948 
949 	// loop through effects
950 	for (auto existing_effect : effects)
951 	{
952 		root["effects"].append(existing_effect->JsonValue());
953 	}
954 
955 	if (reader)
956 		root["reader"] = reader->JsonValue();
957 	else
958 		root["reader"] = Json::Value(Json::objectValue);
959 
960 	// return JsonValue
961 	return root;
962 }
963 
964 // Load JSON string into this object
SetJson(const std::string value)965 void Clip::SetJson(const std::string value) {
966 
967 	// Parse JSON string into JSON objects
968 	try
969 	{
970 		const Json::Value root = openshot::stringToJson(value);
971 		// Set all values that match
972 		SetJsonValue(root);
973 	}
974 	catch (const std::exception& e)
975 	{
976 		// Error parsing JSON (or missing keys)
977 		throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
978 	}
979 }
980 
981 // Load Json::Value into this object
SetJsonValue(const Json::Value root)982 void Clip::SetJsonValue(const Json::Value root) {
983 
984 	// Set parent data
985 	ClipBase::SetJsonValue(root);
986 
987 	// Set data from Json (if key is found)
988 	if (!root["parentObjectId"].isNull()){
989 		parentObjectId = root["parentObjectId"].asString();
990 		if (parentObjectId.size() > 0 && parentObjectId != ""){
991 			AttachToObject(parentObjectId);
992 		} else{
993 			parentTrackedObject = nullptr;
994 			parentClipObject = NULL;
995 		}
996 	}
997 	if (!root["gravity"].isNull())
998 		gravity = (GravityType) root["gravity"].asInt();
999 	if (!root["scale"].isNull())
1000 		scale = (ScaleType) root["scale"].asInt();
1001 	if (!root["anchor"].isNull())
1002 		anchor = (AnchorType) root["anchor"].asInt();
1003 	if (!root["display"].isNull())
1004 		display = (FrameDisplayType) root["display"].asInt();
1005 	if (!root["mixing"].isNull())
1006 		mixing = (VolumeMixType) root["mixing"].asInt();
1007 	if (!root["waveform"].isNull())
1008 		waveform = root["waveform"].asBool();
1009 	if (!root["scale_x"].isNull())
1010 		scale_x.SetJsonValue(root["scale_x"]);
1011 	if (!root["scale_y"].isNull())
1012 		scale_y.SetJsonValue(root["scale_y"]);
1013 	if (!root["location_x"].isNull())
1014 		location_x.SetJsonValue(root["location_x"]);
1015 	if (!root["location_y"].isNull())
1016 		location_y.SetJsonValue(root["location_y"]);
1017 	if (!root["alpha"].isNull())
1018 		alpha.SetJsonValue(root["alpha"]);
1019 	if (!root["rotation"].isNull())
1020 		rotation.SetJsonValue(root["rotation"]);
1021 	if (!root["time"].isNull())
1022 		time.SetJsonValue(root["time"]);
1023 	if (!root["volume"].isNull())
1024 		volume.SetJsonValue(root["volume"]);
1025 	if (!root["wave_color"].isNull())
1026 		wave_color.SetJsonValue(root["wave_color"]);
1027 	if (!root["shear_x"].isNull())
1028 		shear_x.SetJsonValue(root["shear_x"]);
1029 	if (!root["shear_y"].isNull())
1030 		shear_y.SetJsonValue(root["shear_y"]);
1031 	if (!root["origin_x"].isNull())
1032 		origin_x.SetJsonValue(root["origin_x"]);
1033 	if (!root["origin_y"].isNull())
1034 		origin_y.SetJsonValue(root["origin_y"]);
1035 	if (!root["channel_filter"].isNull())
1036 		channel_filter.SetJsonValue(root["channel_filter"]);
1037 	if (!root["channel_mapping"].isNull())
1038 		channel_mapping.SetJsonValue(root["channel_mapping"]);
1039 	if (!root["has_audio"].isNull())
1040 		has_audio.SetJsonValue(root["has_audio"]);
1041 	if (!root["has_video"].isNull())
1042 		has_video.SetJsonValue(root["has_video"]);
1043 	if (!root["perspective_c1_x"].isNull())
1044 		perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1045 	if (!root["perspective_c1_y"].isNull())
1046 		perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1047 	if (!root["perspective_c2_x"].isNull())
1048 		perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1049 	if (!root["perspective_c2_y"].isNull())
1050 		perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1051 	if (!root["perspective_c3_x"].isNull())
1052 		perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1053 	if (!root["perspective_c3_y"].isNull())
1054 		perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1055 	if (!root["perspective_c4_x"].isNull())
1056 		perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1057 	if (!root["perspective_c4_y"].isNull())
1058 		perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1059 	if (!root["effects"].isNull()) {
1060 
1061 		// Clear existing effects
1062 		effects.clear();
1063 
1064 		// loop through effects
1065 		for (const auto existing_effect : root["effects"]) {
1066 			// Create Effect
1067 			EffectBase *e = NULL;
1068 			if (!existing_effect["type"].isNull()) {
1069 
1070 				// Create instance of effect
1071 				if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1072 
1073 					// Load Json into Effect
1074 					e->SetJsonValue(existing_effect);
1075 
1076 					// Add Effect to Timeline
1077 					AddEffect(e);
1078 				}
1079 			}
1080 		}
1081 	}
1082 	if (!root["reader"].isNull()) // does Json contain a reader?
1083 	{
1084 		if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1085 		{
1086 			// Close previous reader (if any)
1087 			bool already_open = false;
1088 			if (reader)
1089 			{
1090 				// Track if reader was open
1091 				already_open = reader->IsOpen();
1092 
1093 				// Close and delete existing reader (if any)
1094 				reader->Close();
1095 				delete reader;
1096 				reader = NULL;
1097 			}
1098 
1099 			// Create new reader (and load properties)
1100 			std::string type = root["reader"]["type"].asString();
1101 
1102 			if (type == "FFmpegReader") {
1103 
1104 				// Create new reader
1105 				reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1106 				reader->SetJsonValue(root["reader"]);
1107 
1108 			} else if (type == "QtImageReader") {
1109 
1110 				// Create new reader
1111 				reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1112 				reader->SetJsonValue(root["reader"]);
1113 
1114 #ifdef USE_IMAGEMAGICK
1115 			} else if (type == "ImageReader") {
1116 
1117 				// Create new reader
1118 				reader = new ImageReader(root["reader"]["path"].asString(), false);
1119 				reader->SetJsonValue(root["reader"]);
1120 
1121 			} else if (type == "TextReader") {
1122 
1123 				// Create new reader
1124 				reader = new TextReader();
1125 				reader->SetJsonValue(root["reader"]);
1126 #endif
1127 
1128 			} else if (type == "ChunkReader") {
1129 
1130 				// Create new reader
1131 				reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1132 				reader->SetJsonValue(root["reader"]);
1133 
1134 			} else if (type == "DummyReader") {
1135 
1136 				// Create new reader
1137 				reader = new openshot::DummyReader();
1138 				reader->SetJsonValue(root["reader"]);
1139 
1140 			} else if (type == "Timeline") {
1141 
1142 				// Create new reader (always load from file again)
1143 				// This prevents FrameMappers from being loaded on accident
1144 				reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1145 			}
1146 
1147 			// mark as managed reader and set parent
1148 			if (reader) {
1149 				reader->ParentClip(this);
1150 				allocated_reader = reader;
1151 			}
1152 
1153 			// Re-Open reader (if needed)
1154 			if (already_open)
1155 				reader->Open();
1156 
1157 		}
1158 	}
1159 }
1160 
1161 // Sort effects by order
sort_effects()1162 void Clip::sort_effects()
1163 {
1164 	// sort clips
1165 	effects.sort(CompareClipEffects());
1166 }
1167 
1168 // Add an effect to the clip
AddEffect(EffectBase * effect)1169 void Clip::AddEffect(EffectBase* effect)
1170 {
1171 	// Set parent clip pointer
1172 	effect->ParentClip(this);
1173 
1174 	// Add effect to list
1175 	effects.push_back(effect);
1176 
1177 	// Sort effects
1178 	sort_effects();
1179 
1180 	// Get the parent timeline of this clip
1181 	Timeline* parentTimeline = (Timeline *) ParentTimeline();
1182 
1183 	if (parentTimeline)
1184 		effect->ParentTimeline(parentTimeline);
1185 
1186     #ifdef USE_OPENCV
1187 	// Add Tracked Object to Timeline
1188 	if (effect->info.has_tracked_object){
1189 
1190 		// Check if this clip has a parent timeline
1191 		if (parentTimeline){
1192 
1193 			effect->ParentTimeline(parentTimeline);
1194 
1195 			// Iterate through effect's vector of Tracked Objects
1196 			for (auto const& trackedObject : effect->trackedObjects){
1197 
1198 				// Cast the Tracked Object as TrackedObjectBBox
1199 				std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1200 
1201 				// Set the Tracked Object's parent clip to this
1202 				trackedObjectBBox->ParentClip(this);
1203 
1204 				// Add the Tracked Object to the timeline
1205 				parentTimeline->AddTrackedObject(trackedObjectBBox);
1206 			}
1207 		}
1208 	}
1209     #endif
1210 
1211 	// Clear cache
1212 	cache.Clear();
1213 }
1214 
1215 // Remove an effect from the clip
RemoveEffect(EffectBase * effect)1216 void Clip::RemoveEffect(EffectBase* effect)
1217 {
1218 	effects.remove(effect);
1219 }
1220 
1221 // Apply effects to the source frame (if any)
apply_effects(std::shared_ptr<Frame> frame)1222 void Clip::apply_effects(std::shared_ptr<Frame> frame)
1223 {
1224 	// Find Effects at this position and layer
1225 	for (auto effect : effects)
1226 	{
1227 		// Apply the effect to this frame
1228 		frame = effect->GetFrame(frame, frame->number);
1229 
1230 	} // end effect loop
1231 }
1232 
1233 // Compare 2 floating point numbers for equality
isEqual(double a,double b)1234 bool Clip::isEqual(double a, double b)
1235 {
1236 	return fabs(a - b) < 0.000001;
1237 }
1238 
1239 // Apply keyframes to the source frame (if any)
apply_keyframes(std::shared_ptr<Frame> frame,std::shared_ptr<QImage> background_canvas)1240 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
1241     // Skip out if video was disabled or only an audio frame (no visualisation in use)
1242     if (has_video.GetInt(frame->number) == 0 ||
1243         (!Waveform() && !Reader()->info.has_video))
1244         // Skip the rest of the image processing for performance reasons
1245         return;
1246 
1247     // Get image from clip
1248     std::shared_ptr<QImage> source_image = frame->GetImage();
1249 
1250     /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
1251     if (Waveform())
1252     {
1253         // Debug output
1254         ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
1255 
1256         // Get the color of the waveform
1257         int red = wave_color.red.GetInt(frame->number);
1258         int green = wave_color.green.GetInt(frame->number);
1259         int blue = wave_color.blue.GetInt(frame->number);
1260         int alpha = wave_color.alpha.GetInt(frame->number);
1261 
1262         // Generate Waveform Dynamically (the size of the timeline)
1263         source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
1264     }
1265 
1266     // Size of final image
1267     int width = background_canvas->width();
1268     int height = background_canvas->height();
1269 
1270     // Get transform from clip's keyframes
1271     QTransform transform = get_transform(frame, width, height);
1272 
1273     // Debug output
1274     ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
1275 
1276     // Load timeline's new frame image into a QPainter
1277     QPainter painter(background_canvas.get());
1278     painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
1279 
1280     // Apply transform (translate, rotate, scale)
1281     painter.setTransform(transform);
1282 
1283     // Composite a new layer onto the image
1284     painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
1285     painter.drawImage(0, 0, *source_image);
1286 
1287     if (timeline) {
1288         Timeline *t = (Timeline *) timeline;
1289 
1290         // Draw frame #'s on top of image (if needed)
1291         if (display != FRAME_DISPLAY_NONE) {
1292             std::stringstream frame_number_str;
1293             switch (display) {
1294                 case (FRAME_DISPLAY_NONE):
1295                     // This is only here to prevent unused-enum warnings
1296                     break;
1297 
1298                 case (FRAME_DISPLAY_CLIP):
1299                     frame_number_str << frame->number;
1300                     break;
1301 
1302                 case (FRAME_DISPLAY_TIMELINE):
1303                     frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
1304                     break;
1305 
1306                 case (FRAME_DISPLAY_BOTH):
1307                     frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1308                     break;
1309             }
1310 
1311             // Draw frame number on top of image
1312             painter.setPen(QColor("#ffffff"));
1313             painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1314         }
1315     }
1316     painter.end();
1317 
1318     // Add new QImage to frame
1319     frame->AddImage(background_canvas);
1320 }
1321 
1322 // Apply keyframes to the source frame (if any)
get_transform(std::shared_ptr<Frame> frame,int width,int height)1323 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1324 {
1325     // Get image from clip
1326     std::shared_ptr<QImage> source_image = frame->GetImage();
1327 
1328 	/* ALPHA & OPACITY */
1329 	if (alpha.GetValue(frame->number) != 1.0)
1330 	{
1331 		float alpha_value = alpha.GetValue(frame->number);
1332 
1333 		// Get source image's pixels
1334 		unsigned char *pixels = source_image->bits();
1335 
1336 		// Loop through pixels
1337 		for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
1338 		{
1339 			// Apply alpha to pixel values (since we use a premultiplied value, we must
1340 			// multiply the alpha with all colors).
1341 			pixels[byte_index + 0] *= alpha_value;
1342 			pixels[byte_index + 1] *= alpha_value;
1343 			pixels[byte_index + 2] *= alpha_value;
1344 			pixels[byte_index + 3] *= alpha_value;
1345 		}
1346 
1347 		// Debug output
1348 		ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
1349 	}
1350 
1351 	/* RESIZE SOURCE IMAGE - based on scale type */
1352 	QSize source_size = source_image->size();
1353 
1354 	// Apply stretch scale to correctly fit the bounding-box
1355 	if (parentTrackedObject){
1356 		scale = SCALE_STRETCH;
1357 	}
1358 
1359 	switch (scale)
1360 	{
1361 		case (SCALE_FIT): {
1362 			source_size.scale(width, height, Qt::KeepAspectRatio);
1363 
1364 			// Debug output
1365 			ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1366 			break;
1367 		}
1368 		case (SCALE_STRETCH): {
1369 			source_size.scale(width, height, Qt::IgnoreAspectRatio);
1370 
1371 			// Debug output
1372 			ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1373 			break;
1374 		}
1375 		case (SCALE_CROP): {
1376 			source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
1377 
1378 			// Debug output
1379 			ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1380 			break;
1381 		}
1382 		case (SCALE_NONE): {
1383 		    // Image is already the original size (i.e. no scaling mode) relative
1384 		    // to the preview window size (i.e. timeline / preview ratio). No further
1385 		    // scaling is needed here.
1386 			// Debug output
1387 			ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
1388 			break;
1389 		}
1390 	}
1391 
1392 	// Initialize parent object's properties (Clip or Tracked Object)
1393 	float parentObject_location_x = 0.0;
1394 	float parentObject_location_y = 0.0;
1395 	float parentObject_scale_x = 1.0;
1396 	float parentObject_scale_y = 1.0;
1397 	float parentObject_shear_x = 0.0;
1398 	float parentObject_shear_y = 0.0;
1399 	float parentObject_rotation = 0.0;
1400 
1401 	// Get the parentClipObject properties
1402 	if (parentClipObject){
1403 
1404 		// Convert Clip's frame position to Timeline's frame position
1405 		long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1406 		long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1407 		double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1408 
1409 		// Get parent object's properties (Clip)
1410 		parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
1411 		parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
1412 		parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
1413 		parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
1414 		parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
1415 		parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
1416 		parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
1417 	}
1418 
1419 	// Get the parentTrackedObject properties
1420 	if (parentTrackedObject){
1421 
1422 		// Convert Clip's frame position to Timeline's frame position
1423 		long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
1424 		long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
1425 		double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
1426 
1427 		// Get parentTrackedObject's parent clip's properties
1428 		std::map<std::string, float> trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
1429 
1430 		// Get the attached object's parent clip's properties
1431 		if (!trackedObjectParentClipProperties.empty())
1432 		{
1433 			// Get parent object's properties (Tracked Object)
1434 			float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
1435 
1436 			// Access the parentTrackedObject's properties
1437 			std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
1438 
1439 			// Get the Tracked Object's properties and correct them by the clip's reference system
1440 			parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
1441 			parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
1442 			parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1443 			parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1444 			parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
1445 		}
1446 		else
1447 		{
1448 			// Access the parentTrackedObject's properties
1449 			std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
1450 
1451 			// Get the Tracked Object's properties and correct them by the clip's reference system
1452 			parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
1453 			parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
1454 			parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
1455 			parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
1456 			parentObject_rotation = trackedObjectProperties["r"];
1457 		}
1458 	}
1459 
1460 	/* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1461 	float x = 0.0; // left
1462 	float y = 0.0; // top
1463 
1464 	// Adjust size for scale x and scale y
1465 	float sx = scale_x.GetValue(frame->number); // percentage X scale
1466 	float sy = scale_y.GetValue(frame->number); // percentage Y scale
1467 
1468 	// Change clip's scale to parentObject's scale
1469 	if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1470 		sx*= parentObject_scale_x;
1471 		sy*= parentObject_scale_y;
1472 	}
1473 
1474 	float scaled_source_width = source_size.width() * sx;
1475 	float scaled_source_height = source_size.height() * sy;
1476 
1477 	switch (gravity)
1478 	{
1479 		case (GRAVITY_TOP_LEFT):
1480 			// This is only here to prevent unused-enum warnings
1481 			break;
1482 		case (GRAVITY_TOP):
1483 			x = (width - scaled_source_width) / 2.0; // center
1484 			break;
1485 		case (GRAVITY_TOP_RIGHT):
1486 			x = width - scaled_source_width; // right
1487 			break;
1488 		case (GRAVITY_LEFT):
1489 			y = (height - scaled_source_height) / 2.0; // center
1490 			break;
1491 		case (GRAVITY_CENTER):
1492 			x = (width - scaled_source_width) / 2.0; // center
1493 			y = (height - scaled_source_height) / 2.0; // center
1494 			break;
1495 		case (GRAVITY_RIGHT):
1496 			x = width - scaled_source_width; // right
1497 			y = (height - scaled_source_height) / 2.0; // center
1498 			break;
1499 		case (GRAVITY_BOTTOM_LEFT):
1500 			y = (height - scaled_source_height); // bottom
1501 			break;
1502 		case (GRAVITY_BOTTOM):
1503 			x = (width - scaled_source_width) / 2.0; // center
1504 			y = (height - scaled_source_height); // bottom
1505 			break;
1506 		case (GRAVITY_BOTTOM_RIGHT):
1507 			x = width - scaled_source_width; // right
1508 			y = (height - scaled_source_height); // bottom
1509 			break;
1510 	}
1511 
1512 	// Debug output
1513 	ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
1514 
1515 	QTransform transform;
1516 
1517 	/* LOCATION, ROTATION, AND SCALE */
1518 	float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1519 	x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
1520 	y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
1521 	float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1522 	float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1523 	float origin_x_value = origin_x.GetValue(frame->number);
1524 	float origin_y_value = origin_y.GetValue(frame->number);
1525 
1526 	// Transform source image (if needed)
1527 	ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
1528 
1529 	if (!isEqual(x, 0) || !isEqual(y, 0)) {
1530 		// TRANSLATE/MOVE CLIP
1531 		transform.translate(x, y);
1532 	}
1533 	if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
1534 		// ROTATE CLIP (around origin_x, origin_y)
1535 		float origin_x_offset = (scaled_source_width * origin_x_value);
1536 		float origin_y_offset = (scaled_source_height * origin_y_value);
1537 		transform.translate(origin_x_offset, origin_y_offset);
1538 		transform.rotate(r);
1539 		transform.shear(shear_x_value, shear_y_value);
1540 		transform.translate(-origin_x_offset,-origin_y_offset);
1541 	}
1542 	// SCALE CLIP (if needed)
1543 	float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1544 	float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1545 	if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
1546 		transform.scale(source_width_scale, source_height_scale);
1547 	}
1548 
1549     return transform;
1550 }
1551