1# -*- coding: utf-8 -*-
2# Pitivi video editor
3# Copyright (c) 2011, Benjamin M. Schwartz <bens@alum.mit.edu>
4#
5# This program is free software; you can redistribute it and/or
6# modify it under the terms of the GNU Lesser General Public
7# License as published by the Free Software Foundation; either
8# version 2.1 of the License, or (at your option) any later version.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13# Lesser General Public License for more details.
14#
15# You should have received a copy of the GNU Lesser General Public
16# License along with this program; if not, write to the
17# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18# Boston, MA 02110-1301, USA.
19# TODO reimplement after GES port
20"""Automatic alignment of `Clip`s."""
21import array
22import os
23import time
24
25from gi.repository import Gst
26from gi.repository import Gtk
27
28
29try:
30    import numpy
31except ImportError:
32    numpy = None
33
34from gettext import gettext as _
35
36import pitivi.configure as configure
37
38from pitivi.utils.ui import beautify_ETA
39from pitivi.utils.misc import call_false
40from pitivi.utils.extract import Extractee
41from pitivi.utils.loggable import Loggable
42
43
44def nextpow2(x):
45    a = 1
46    while a < x:
47        a *= 2
48    return a
49
50
51def submax(left, middle, right):
52    """
53    Find the maximum of a quadratic function from three samples.
54
55    Given samples from a quadratic P(x) at x=-1, 0, and 1, find the x
56    that extremizes P.  This is useful for determining the subsample
57    position of the extremum given three samples around the observed
58    extreme.
59
60    @param left: value at x=-1
61    @type left: L{float}
62    @param middle: value at x=0
63    @type middle: L{float}
64    @param right: value at x=1
65    @type right: L{float}
66    @returns: value of x that extremizes the interpolating quadratic
67    @rtype: L{float}
68
69    """
70    L = middle - left   # L and R are both positive if middle is the
71    R = middle - right  # observed max of the integer samples
72    return 0.5 * (R - L) / (R + L)
73    # Derivation: Consider a quadratic q(x) := P(0) - P(x).  Then q(x) has
74    # two roots, one at 0 and one at z, and the extreme is at (0+z)/2
75    # (i.e. at z/2)
76    # q(x) = bx*(x-z) # a may be positive or negative
77    # q(1) = b*(1 - z) = R
78    # q(-1) = b*(1 + z) = L
79    # (1+z)/(1-z) = L/R  (from here it's just algebra to find a)
80    # z + 1 = R/L - (R/L)*z
81    # z*(1+R/L) = R/L - 1
82    # z = (R/L - 1)/(R/L + 1) = (R-L)/(R+L)
83
84
85def rigidalign(reference, targets):
86    """
87    Estimate the relative shift between reference and targets.
88
89    The algorithm works by subtracting the mean, and then locating
90    the maximum of the cross-correlation.  For inputs of length M{N},
91    the running time is M{O(C{len(targets)}*N*log(N))}.
92
93    @param reference: the waveform to regard as fixed
94    @type reference: Sequence(Number)
95    @param targets: the waveforms that should be aligned to reference
96    @type targets: Sequence(Sequence(Number))
97    @returns: The shift necessary to bring each target into alignment
98        with the reference.  The returned shift may not be an integer,
99        indicating that the best alignment would be achieved by a
100        non-integer shift and appropriate interpolation.
101    @rtype: Sequence(Number)
102
103    """
104    # L is the maximum size of a cross-correlation between the
105    # reference and any of the targets.
106    L = len(reference) + max(len(t) for t in targets) - 1
107    # We round up L to the next power of 2 for speed in the FFT.
108    L = nextpow2(L)
109    reference = reference - numpy.mean(reference)
110    fref = numpy.fft.rfft(reference, L).conj()
111    shifts = []
112    for t in targets:
113        t = t - numpy.mean(t)
114        # Compute cross-correlation
115        xcorr = numpy.fft.irfft(fref * numpy.fft.rfft(t, L))
116        # shift maximizes dotproduct(t[shift:],reference)
117        # int() to convert numpy.int32 to python int
118        shift = int(numpy.argmax(xcorr))
119        subsample_shift = submax(xcorr[(shift - 1) % L],
120                                 xcorr[shift],
121                                 xcorr[(shift + 1) % L])
122        shift = shift + subsample_shift
123        # shift is now a float indicating the interpolated maximum
124        if shift >= len(t):  # Negative shifts appear large and positive
125            shift -= L       # This corrects them to be negative
126        shifts.append(-shift)
127        # Sign reversed to move the target instead of the reference
128    return shifts
129
130
131def _findslope(a):
132    # Helper function for affinealign
133    # The provided matrix a contains a bright line whose slope we want to know,
134    # against a noisy background.
135    # The line starts at 0,0.  If the slope is positive, it runs toward the
136    # center of the matrix (i.e. toward (-1,-1))
137    # If the slope is negative, it wraps from 0,0 to 0,-1 and continues toward
138    # the center, (i.e. toward (-1,0)).
139    # The line segment terminates at the midline along the X direction.
140    # We locate the line by simply checking the sum along each possible line
141    # up to the Y-max edge of a.  The caller sets the limit by choosing the
142    # size of a.
143    # The function returns a floating-point slope assuming that the matrix
144    # has "square pixels".
145    Y, X = a.shape
146    X /= 2
147    x_pos = numpy.arange(1, X)
148    x_neg = numpy.arange(2 * X - 1, X, -1)
149    best_end = 0
150    max_sum = 0
151    for end in range(Y):
152        y = (x_pos * end) // X
153        s = numpy.sum(a[y, x_pos])
154        if s > max_sum:
155            max_sum = s
156            best_end = end
157        s = numpy.sum(a[y, x_neg])
158        if s > max_sum:
159            max_sum = s
160            best_end = -end
161    return float(best_end) / X
162
163
164def affinealign(reference, targets, max_drift=0.02):
165    """ EXPERIMENTAL FUNCTION.
166
167    Perform an affine registration between a reference and a number of
168    targets.  Designed for aligning the amplitude envelopes of recordings of
169    the same event by different devices.
170
171    NOTE: This method is currently NOT USED by Pitivi, as it has proven both
172    unnecessary and unusable.  So far every test case has been registered
173    successfully by rigidalign, and until Pitivi supports time-stretching of
174    audio, the drift calculation cannot actually be used.
175
176    @param reference: the reference signal to which others will be registered
177    @type reference: array(number)
178    @param targets: the signals to register
179    @type targets: ordered iterable(array(number))
180    @param max_drift: the maximum absolute clock drift rate
181                  (i.e. stretch factor) that will be considered during search
182    @type max_drift: positive L{float}
183    @return: (offsets, drifts).  offsets[i] is the point in reference at which
184           targets[i] starts.  drifts[i] is the speed of targets[i] relative to
185           the reference (positive is faster, meaning the target should be
186           slowed down to be in sync with the reference)
187    """
188    L = len(reference) + max(len(t) for t in targets) - 1
189    L2 = nextpow2(L)
190    bsize = int(20. / max_drift)  # NEEDS TUNING
191    num_blocks = nextpow2(1.0 * len(reference) // bsize)  # NEEDS TUNING
192    bspace = (len(reference) - bsize) // num_blocks
193    reference -= numpy.mean(reference)
194
195    # Construct FFT'd reference blocks
196    freference_blocks = numpy.zeros((L2 / 2 + 1, num_blocks),
197                                    dtype=numpy.complex)
198    for i in range(num_blocks):
199        s = i * bspace
200        tmp = numpy.zeros((L2,))
201        tmp[s:s + bsize] = reference[s:s + bsize]
202        freference_blocks[:, i] = numpy.fft.rfft(tmp, L2).conj()
203    freference_blocks[:10, :] = 0  # High-pass to ignore slow volume variations
204
205    offsets = []
206    drifts = []
207    for t in targets:
208        t -= numpy.mean(t)
209        ft = numpy.fft.rfft(t, L2)
210        # fxcorr is the FFT'd cross-correlation with the reference blocks
211        fxcorr_blocks = numpy.zeros((L2 / 2 + 1, num_blocks),
212                                    dtype=numpy.complex)
213        for i in range(num_blocks):
214            fxcorr_blocks[:, i] = ft * freference_blocks[:, i]
215            fxcorr_blocks[:, i] /= numpy.sqrt(
216                numpy.sum(fxcorr_blocks[:, i] ** 2))
217        del ft
218        # At this point xcorr_blocks would show a distinct bright line, nearly
219        # orthogonal to time, indicating where each of these blocks found their
220        # peak.  Each point on this line represents the time in t where block i
221        # found its match.  The time-intercept gives the time in b at which the
222        # reference starts, and the slope gives the amount by which the
223        # reference is faster relative to b.
224
225        # The challenge now is to find this line.  Our strategy is to reduce the
226        # search to one dimension by first finding the slope.
227        # The Fourier Transform of a smooth real line in 2D is an orthogonal
228        # line through the origin, with phase that gives its position.
229        # Unfortunately this line is not clearly visible in fxcorr_blocks, so
230        # we discard the phase (by taking the absolute value) and then inverse
231        # transform.  This places the line at the origin, so we can find its
232        # slope.
233
234        # Construct the half-autocorrelation matrix
235        # (A true autocorrelation matrix would be ifft(abs(fft(x))**2), but this
236        # is just ifft(abs(fft(x))).)
237        # Construction is stepwise partly in an attempt to save memory
238        # The width is 2*num_blocks in order to avoid overlapping positive and
239        # negative correlations
240        halfautocorr = numpy.fft.fft(fxcorr_blocks, 2 * num_blocks, 1)
241        halfautocorr = numpy.abs(halfautocorr)
242        halfautocorr = numpy.fft.ifft(halfautocorr, None, 1)
243        halfautocorr = numpy.fft.irfft(halfautocorr, None, 0)
244        # Now it's actually the half-autocorrelation.
245        # Chop out the bit we don't care about
246        halfautocorr = halfautocorr[:bspace * num_blocks * max_drift, :]
247        # Remove the local-correlation peak.
248        halfautocorr[-1:2, -1:2] = 0  # NEEDS TUNING
249        # Normalize each column (appears to be necessary)
250        for i in range(2 * num_blocks):
251            halfautocorr[:, i] /= numpy.sqrt(
252                numpy.sum(halfautocorr[:, i] ** 2))
253        drift = _findslope(halfautocorr) / bspace
254        del halfautocorr
255
256        # inverse transform and shift everything into alignment
257        xcorr_blocks = numpy.fft.irfft(fxcorr_blocks, None, 0)
258        del fxcorr_blocks
259        # TODO: see if phase ramps are worthwhile here
260        for i in range(num_blocks):
261            blockcenter = i * bspace + bsize / 2
262            shift = int(blockcenter * drift)
263            if shift > 0:
264                temp = xcorr_blocks[:shift, i].copy()
265                xcorr_blocks[:-shift, i] = xcorr_blocks[shift:, i].copy()
266                xcorr_blocks[-shift:, i] = temp
267            elif shift < 0:
268                temp = xcorr_blocks[shift:, i].copy()
269                xcorr_blocks[-shift:, i] = xcorr_blocks[:shift, i].copy()
270                xcorr_blocks[:-shift, i] = temp
271
272        # xcorr is the drift-compensated cross-correlation
273        xcorr = numpy.sum(xcorr_blocks, axis=1)
274        del xcorr_blocks
275
276        offset = numpy.argmax(xcorr)
277        del xcorr
278        if offset >= len(t):
279            offset -= L2
280
281        # now offset is the point in target at which reference starts and
282        # drift is the speed with which the reference drifts relative to the
283        # target.  We reverse these relationships for the caller.
284        slope = 1 + drift
285        offsets.append(-offset / slope)
286        drifts.append(1 / slope - 1)
287    return offsets, drifts
288
289
290def getAudioTrack(clip):
291    """
292    Helper function for getting an audio track from a Clip
293
294    @param clip: The Clip from which to locate an audio track
295    @type clip: L{Clip}
296    @returns: An audio track from clip, or None if clip has no audio track
297    @rtype: audio L{TrackElement} or L{NoneType}
298    """
299    for track in clip.track_elements:
300        if track.stream_type == AudioStream:
301            return track
302    return None
303
304
305class ProgressMeter:
306
307    """Abstract interface representing a progress meter."""
308
309    def addWatcher(self, function):
310        """ Add a progress watching callback function.  This callback will
311        always be called from the main thread.
312
313        @param function: a function to call with progress updates.
314        @type function: callable(fractional_progress, time_remaining_text).
315            fractional_progress is a float normalized to [0,1].
316            time_remaining_text is a localized text string indicating the
317            estimated time remaining.
318        """
319        raise NotImplementedError
320
321
322class ProgressAggregator(ProgressMeter):
323
324    """A ProgressMeter that aggregates progress reports.
325
326    Reports from multiple sources are combined into a unified progress
327    report.
328
329    """
330
331    def __init__(self):
332        # _targets is a list giving the size of each task.
333        self._targets = []
334        # _portions is a list of the same length as _targets, indicating
335        # the portion of each task that as been completed (initially 0).
336        self._portions = []
337        self._start = time.time()
338        self._watchers = []
339
340    def getPortionCB(self, target):
341        """Prepare a new input for the Aggregator.
342
343        Given a target size
344        (in arbitrary units, but should be consistent across all calls on
345        a single ProgressAggregator object), it returns a callback that
346        can be used to update progress on this portion of the task.
347
348        @param target: the total task size for this portion
349        @type target: number
350        @returns: a callback that can be used to inform the Aggregator of
351            subsequent updates to this portion
352        @rtype: function(x), where x should be a number indicating the
353            absolute amount of this subtask that has been completed.
354
355        """
356        i = len(self._targets)
357        self._targets.append(target)
358        self._portions.append(0)
359
360        def cb(thusfar):
361            self._portions[i] = thusfar
362            GLib.idle_add(self._callForward)
363        return cb
364
365    def addWatcher(self, function):
366        self._watchers.append(function)
367
368    def _callForward(self):
369        # This function always returns False so that it may be safely
370        # invoked via GLib.idle_add(). Use of idle_add() is necessary
371        # to ensure that watchers are always called from the main thread,
372        # even if progress updates are received from other threads.
373        total_target = sum(self._targets)
374        total_completed = sum(self._portions)
375        if total_target == 0:
376            return False
377        frac = min(1.0, float(total_completed) / total_target)
378        now = time.time()
379        remaining = (now - self._start) * (1 - frac) / frac
380        for function in self._watchers:
381            function(frac, beautify_ETA(int(remaining * Gst.SECOND)))
382        return False
383
384
385class EnvelopeExtractee(Extractee, Loggable):
386
387    """Class that computes the envelope of a 1-D signal (audio).
388
389    The envelope is defined as the sum of the absolute value of the signal
390    over each block.  This class computes the envelope incrementally,
391    so that the entire signal does not ever need to be stored.
392
393    """
394
395    def __init__(self, blocksize, callback, *cbargs):
396        """
397        @param blocksize: the number of samples in a block
398        @type blocksize: L{int}
399        @param callback: a function to call when the extraction is complete.
400            The function's first argument will be a numpy array
401            representing the envelope, and any later argument to this
402            function will be passed as subsequent arguments to callback.
403
404        """
405        Loggable.__init__(self)
406        self._blocksize = blocksize
407        self._cb = callback
408        self._cbargs = cbargs
409        self._blocks = numpy.zeros((0,), dtype=numpy.float32)
410        self._empty = array.array('f', [])
411        # self._samples buffers up to self._threshold samples, before
412        # their envelope is computed and store in self._blocks, in order
413        # to amortize some of the function call overheads.
414        self._samples = array.array('f', [])
415        self._threshold = 2000 * blocksize
416        self._progress_watchers = []
417
418    def receive(self, a):
419        self._samples.extend(a)
420        if len(self._samples) < self._threshold:
421            return
422        else:
423            self._process_samples()
424
425    def addWatcher(self, w):
426        """
427        Add a function to call with progress updates.
428
429        @param w: callback function
430        @type w: function(# of samples received so far)
431
432        """
433        self._progress_watchers.append(w)
434
435    def _process_samples(self):
436        excess = len(self._samples) % self._blocksize
437        if excess != 0:
438            samples_to_process = self._samples[:-excess]
439            self._samples = self._samples[-excess:]
440        else:
441            samples_to_process = self._samples
442            self._samples = array.array('f', [])
443        self.debug("Adding %s samples to %s blocks",
444                   len(samples_to_process), len(self._blocks))
445        newblocks = len(samples_to_process) // self._blocksize
446        samples_abs = numpy.abs(
447            samples_to_process).reshape((newblocks, self._blocksize))
448        self._blocks.resize((len(self._blocks) + newblocks,))
449        # This numpy.sum() call relies on samples_abs being a
450        # floating-point type. If samples_abs.dtype is int16
451        # then the sum may overflow.
452        self._blocks[-newblocks:] = numpy.sum(samples_abs, 1)
453        for w in self._progress_watchers:
454            w(self._blocksize * len(self._blocks) + excess)
455
456    def finalize(self):
457        self._process_samples()  # absorb any remaining buffered samples
458        self._cb(self._blocks, *self._cbargs)
459
460
461class AutoAligner(Loggable):
462
463    """
464    Class for aligning a set of L{Clip}s automatically.
465
466    The alignment is based on their contents, so that the shifted tracks
467    are synchronized.  The current implementation only analyzes audio
468    data, so timeline objects without an audio track cannot be aligned.
469
470    """
471
472    BLOCKRATE = 25
473    """
474    @ivar BLOCKRATE: The number of amplitude blocks per second.
475
476    The AutoAligner works by computing the "amplitude envelope" of each
477    audio stream.  We define an amplitude envelope as the absolute value
478    of the audio samples, downsampled to a low samplerate.  This
479    samplerate, in Hz, is given by BLOCKRATE.  (It is given this name
480    because the downsampling filter is implemented by very simple
481    averaging over blocks, i.e. a box filter.)  25 Hz appears to be a
482    good choice because it evenly divides all common audio samplerates
483    (e.g. 11025 and 8000). Lower blockrate requires less CPU time but
484    produces less accurate alignment.  Higher blockrate is the reverse
485    (and also cannot evenly divide all samplerates).
486
487    """
488
489    def __init__(self, clips, callback):
490        """
491        @param clips: an iterable of L{Clip}s.
492            In this implementation, only L{Clip}s with at least one
493            audio track will be aligned.
494        @type clips: iter(L{Clip})
495        @param callback: A function to call when alignment is complete.  No
496            arguments will be provided.
497        @type callback: function
498
499        """
500        Loggable.__init__(self)
501        # self._clips maps each object to its envelope.  The values
502        # are initially None prior to envelope extraction.
503        self._clips = dict.fromkeys(clips)
504        self._callback = callback
505        # stack of (Track, Extractee) pairs waiting to be processed
506        # When start() is called, the stack will be populated, and then
507        # processed sequentially.  Only one item from the stack will be
508        # actively in process at a time.
509        self._extraction_stack = []
510
511    @staticmethod
512    def canAlign(clips):
513        """
514        Can an AutoAligner align these objects?
515
516        Determine whether a group of timeline objects can all
517        be aligned together by an AutoAligner.
518
519        @param clips: a group of timeline objects
520        @type clips: iterable(L{Clip})
521        @returns: True iff the objects can aligned.
522        @rtype: L{bool}
523
524        """
525        # numpy is a "soft dependency".  If you're running without numpy,
526        # this False return value is your only warning not to
527        # use the AutoAligner, which will crash immediately.
528        return all(getAudioTrack(t) is not None for t in clips)
529
530    def _extractNextEnvelope(self):
531        audiotrack, extractee = self._extraction_stack.pop()
532        r = RandomAccessAudioExtractor(audiotrack.factory,
533                                       audiotrack.stream)
534        r.extract(extractee, audiotrack.in_point,
535                  audiotrack.out_point - audiotrack.in_point)
536        return False
537
538    def _envelopeCb(self, array, clip):
539        self.debug("Receiving envelope for %s", clip)
540        self._clips[clip] = array
541        if self._extraction_stack:
542            self._extractNextEnvelope()
543        else:  # This was the last envelope
544            self._performShifts()
545            self._callback()
546
547    def start(self):
548        """
549        Initiate the auto-alignment process.
550
551        @returns: a L{ProgressMeter} indicating the progress of the
552            alignment
553        @rtype: L{ProgressMeter}
554
555        """
556        progress_aggregator = ProgressAggregator()
557        pairs = []  # (Clip, {audio}TrackElement) pairs
558        for clip in list(self._clips.keys()):
559            audiotrack = getAudioTrack(clip)
560            if audiotrack is not None:
561                pairs.append((clip, audiotrack))
562            else:  # forget any Clip without an audio track
563                self._clips.pop(clip)
564        if len(pairs) >= 2:
565            for clip, audiotrack in pairs:
566                # blocksize is the number of samples per block
567                blocksize = audiotrack.stream.rate // self.BLOCKRATE
568                extractee = EnvelopeExtractee(
569                    blocksize, self._envelopeCb, clip)
570                # numsamples is the total number of samples in the track,
571                # which is used by progress_aggregator to determine
572                # the percent completion.
573                numsamples = ((audiotrack.duration / Gst.SECOND) *
574                              audiotrack.stream.rate)
575                extractee.addWatcher(
576                    progress_aggregator.getPortionCB(numsamples))
577                self._extraction_stack.append((audiotrack, extractee))
578            # After we return, start the extraction cycle.
579            # This GLib.idle_add call should not be necessary;
580            # we should be able to invoke _extractNextEnvelope directly
581            # here.  However, there is some as-yet-unexplained
582            # race condition between the Python GIL, GTK UI updates,
583            # GLib mainloop, and pygst multithreading, resulting in
584            # occasional deadlocks during autoalignment.
585            # This call to idle_add() reportedly eliminates the deadlock.
586            # No one knows why.
587            GLib.idle_add(self._extractNextEnvelope)
588        else:  # We can't do anything without at least two audio tracks
589            # After we return, call the callback function (once)
590            GLib.idle_add(call_false, self._callback)
591        return progress_aggregator
592
593    def _chooseReference(self):
594        """
595        Chooses the timeline object to use as a reference.
596
597        This function currently selects the one with lowest priority,
598        i.e. appears highest in the GUI.  The behavior of this function
599        affects user interaction, because the user may want to
600        determine which object moves and which stays put.
601
602        @returns: the timeline object with lowest priority.
603        @rtype: L{Clip}
604
605        """
606        def priority(clip):
607            return clip.priority
608        return min(iter(self._clips.keys()), key=priority)
609
610    def _performShifts(self):
611        self.debug("performing shifts")
612        reference = self._chooseReference()
613        # By using pop(), this line also removes the reference
614        # Clip and its envelope from further consideration,
615        # saving some CPU time in rigidalign.
616        reference_envelope = self._clips.pop(reference)
617        # We call list() because we need a reliable ordering of the pairs
618        # (In python 3, dict.items() returns an unordered dictview)
619        pairs = list(self._clips.items())
620        envelopes = [p[1] for p in pairs]
621        offsets = rigidalign(reference_envelope, envelopes)
622        for (movable, envelope), offset in zip(pairs, offsets):
623            # tshift is the offset rescaled to units of nanoseconds
624            tshift = int((offset * Gst.SECOND) / self.BLOCKRATE)
625            self.debug("Shifting %s to %i ns from %i",
626                       movable, tshift, reference.start)
627            newstart = reference.start + tshift
628            if newstart >= 0:
629                movable.start = newstart
630            else:
631                # Timeline objects always must have a positive start point, so
632                # if alignment would move an object to start at negative time,
633                # we instead make it start at zero and chop off the required
634                # amount at the beginning.
635                movable.start = 0
636                movable.in_point = movable.in_point - newstart
637                movable.duration += newstart
638
639
640class AlignmentProgressDialog:
641
642    """ Dialog indicating the progress of the auto-alignment process.
643        Code derived from L{RenderingProgressDialog}, but greatly simplified
644        (read-only, no buttons)."""
645
646    def __init__(self, app):
647        self.builder = Gtk.Builder()
648        self.builder.add_from_file(
649            os.path.join(configure.get_ui_dir(), "alignmentprogress.ui"))
650        self.builder.connect_signals(self)
651
652        self.window = self.builder.get_object("align-progress")
653        self.progressbar = self.builder.get_object("progressbar")
654        # Parent this dialog with mainwindow
655        # set_transient_for allows this dialog to properly
656        # minimize together with the mainwindow.  This method is
657        # taken from RenderingProgressDialog.  In both cases, it appears
658        # to work correctly, although there is a known bug for Gnome 3 in
659        # RenderingProgressDialog (bug #652917)
660        self.window.set_transient_for(app.gui)
661
662        # FIXME: Add a cancel button
663
664    def updatePosition(self, fraction, estimated):
665        self.progressbar.set_fraction(fraction)
666        self.window.set_title(_("%d%% Analyzed") % int(100 * fraction))
667        if estimated:
668            self.progressbar.set_text(_("About %s left") % estimated)
669
670
671if __name__ == '__main__':
672    # Simple command-line test
673    from sys import argv
674    names = argv[1:]
675    envelopes = [numpy.fromfile(n) for n in names]
676    reference = envelopes[-1]
677    offsets, drifts = affinealign(reference, envelopes, 0.02)
678    print(offsets, drifts)
679    import matplotlib.pyplot as plt
680    fig, ax = plt.subplots()
681    for o, d, e in zip(offsets, drifts, envelopes):
682        t = o + (1 + d) * numpy.arange(len(e))
683        ax.plot(t, e / numpy.sqrt(numpy.sum(e ** 2)))
684    plt.show()
685