1__author__ = "Joshua Zosky"
2
3"""
4    Copyright 2015 Joshua Zosky
5    joshua.e.zosky@gmail.com
6
7    This file is part of "RetroTS".
8    "RetroTS" is free software: you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation, either version 3 of the License, or
11    (at your option) any later version.
12    "RetroTS" is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16    You should have received a copy of the GNU General Public License
17    along with "RetroTS".  If not, see <http://www.gnu.org/licenses/>.
18"""
19
20import numpy
21
22# from scipy import *
23# from scipy.fftpack import fft
24from numpy import real
25from numpy.fft import fft, ifft
26from numpy import zeros, ones, nonzero
27
28# rcr: omit new style sub-library of pylab
29from pylab import plot, subplot, show, text, figure
30from scipy.signal import lfilter, firwin
31from scipy.interpolate import interp1d
32import glob
33
34"""
35function [r, e] = PeakFinder(var_vector, Opt)
36In python, the above command looks like:
37import PeakFinder as PF
38(r, e) = PF.peak_finder(var_vector, **opt)
39where opt is a dictionary with the parameters for the function
40"""
41# numpy.set_printoptions(threshold='nan')
42
43# for checking whether quotients are sufficiently close to integral
44g_epsilon = 0.00001
45
46
47def fftsegs(ww, po, nv):
48    """
49    Returns the segements that are to be used for fft calculations.
50    Example: (bli, ble, num) = fftsegs (100, 70, 1000);
51    :param ww: Segment width (in number of samples)
52    :param po: Percent segment overlap
53    :param nv: Total number of samples in original symbol
54    :return: (bli, ble, num): bli, ble: Two Nblck x 1 vectors defining the segments' starting and ending indices;
55                                num: An nv x 1 vector containing the number of segments each sample belongs to
56    return_tuple = (ww, po, nv)
57    return return_tuple
58    """
59    if ww == 0:
60        po = 0
61        ww = nv
62    elif nv < ww < 32:
63        print("Error fftsegs: Bad value for window width of %d" % ww)
64        return
65    out = 0
66    while out == 0:
67        # clear bli ble
68        bli = []
69        ble = []
70        # % How many blocks?
71        jmp = numpy.floor((100 - po) * ww / 100)  # jump from block to block
72        nblck = nv / jmp
73        ib = 0
74        cnt = 0
75        while ble == [] or ble[-1] < (nv - 1):
76            bli.append(ib)
77            ble.append(min(ib + ww - 1, nv))
78            cnt += 1
79            ib += jmp
80        # If the last block is too small, spread the love
81        if ble[-1] - bli[-1] < (0.1 * ww):  # Too small a last block, merge
82            ble[-2] = ble[-1]  # into previous
83            cnt -= 1
84            del ble[-1]
85            del bli[-1]
86            out = 1
87        elif ble[-1] - bli[-1] < (0.75 * ww):  # Too large to merge, spread it
88            ww = ww + numpy.floor((ble[-1] - bli[-1]) / nblck)
89            out = 0
90        else:  # Last block big enough, proceed
91            out = 1
92        # ble - bli + 1
93        # out
94    # bli
95    # ble
96    # ble - bli + 1
97    # now figure out the number of estimates each point of the time series gets
98    num = zeros((nv, 1))
99    cnt = 1
100    while cnt <= len(ble):
101        num[bli[-1] : ble[-1] + 1] = num[bli[-1] : ble[-1] + 1] + ones(
102            (ble[-1] - bli[-1] + 1, 1)
103        )
104        cnt += 1
105    return bli, ble, num
106
107
108def analytic_signal(vi, windwidth, percover, win):
109    nvi = len(vi)
110    # h = zeros(vi.shape)
111    bli, ble, num = fftsegs(windwidth, percover, nvi)
112    for ii in range(len(bli)):
113        v = vi[bli[ii] : ble[ii] + 1]
114        nv = len(v)
115        if win == 1:
116            fv = fft(v * numpy.hamming(nv))
117        else:
118            fv = fft(v)
119        wind = zeros(v.size)
120        # zero negative frequencies, double positive frequencies
121        if nv % 2 == 0:
122            wind[0] = 1  # keep DC
123            wind[(nv // 2)] = 1
124            wind[1 : (nv // 2)] = 2  # double pos. freq
125
126        else:
127            wind[0] = 1
128            wind[list(range(1, (nv + 1) // 2))] = 2
129        h = ifft(fv * wind)
130    for i in range(len(h)):
131        h[i] /= numpy.complex(num[i])
132    return h
133
134
135def remove_pn_duplicates(tp, vp, tn, vn, quiet):
136    ok = zeros((1, len(tp)), dtype=numpy.int32)
137    ok = ok[0]
138    # ok[0] = 1
139    j = 0
140    for i in range(1, min(len(tp), len(tn))):
141        #  0.3 is the minimum time before the next beat
142        if (tp[i] != tp[i - 1]) and (tn[i] != tn[i - 1]) and (tp[i] - tp[i - 1] > 0.3):
143            j += 1
144            if j == 127:
145                print("stop")
146            ok[j] = i
147        else:
148            if not quiet:
149                print("Dropped peak at %s sec" % tp[i])
150    ok = ok[: j + 1]
151    tp = tp[ok]
152    vp = vp[ok]
153    tn = tn[ok]
154    vn = vn[ok]
155    return tp, vp, tn, vn
156
157
158def remove_duplicates(t, v, quiet):
159    j = 0
160    for i in range(1, len(t) + 1):
161        #  0.3 is the minimum time before the next beat
162        if t[i] != t[i - 1] and (t[i] - t[i - 1]) > 0.3:
163            j += 1
164            t[j] = t[i]
165            v[j] = v[i]
166        elif quiet == 0:
167            print("Dropped peak at %s sec" % t[i])
168    t = t[: j + 1]
169    v = v[: j + 1]
170    return t, v
171
172
173def clean_resamp(v):
174    i_nan = nonzero(numpy.isnan(v))  # the bad
175    i_nan = i_nan[0]
176    i_good = nonzero(numpy.isfinite(v))  # the good
177    i_good = i_good[0]
178    for i in range(0, len(i_nan)):
179        if i_nan[i] < i_good[0]:
180            v[i_nan[i]] = v[i_good[0]]
181        elif i_nan[i] > i_good[-1]:
182            v[i_nan[i]] = v[i_good[-1]]
183        else:
184            print("Error: Unexpected NaN case")
185            v[i_nan[i]] = 0
186    return v
187
188
189def peak_finder(var_v, filename=None, v=None):
190    """,
191                phys_fs=(1 / 0.025),
192                zero_phase_offset=0.5,
193                quiet=0,
194                resample_fs=(1 / 0.025),
195                frequency_cutoff=10,
196                fir_order=80,
197                interpolation_style='linear',
198                demo=0,
199                as_window_width=0,
200                as_percover=0,
201                as_fftwin=0,
202                sep_dups=0):
203    """
204    """
205    Example: PeakFinder('Resp*.1D') or PeakFinder(var_vector) where var_vector is a column vector.
206    If var_vector is a matrix, each column is processed separately.
207    :param var_vector: column vector--list of list(s)
208    :param phys_fs: Sampling frequency
209    :param zero_phase_offset: Fraction of the period that corresponds to a phase of 0
210                                0.5 means the middle of the period, 0 means the 1st peak
211    :param quiet:
212    :param resample_fs:
213    :param frequency_cutoff:
214    :param fir_order: BC ???
215    :param interpolation_style:
216    :param demo:
217    :param as_window_width:
218    :param as_percover:
219    :param fftwin:
220    :param sep_dups:
221    :return: [r, e] r = Peak of var_vector; e = error value
222    """
223    # #clear all but var_vector (useful if I run this function as as script)
224    # keep('var_vector', 'opt')
225    var_vector = dict(
226        phys_fs=(1 / 0.025),
227        zero_phase_offset=0.5,
228        quiet=0,
229        resample_fs=(1 / 0.025),
230        frequency_cutoff=10,
231        fir_order=80,
232        interpolation_style="linear",
233        demo=0,
234        as_window_width=0,
235        as_percover=0,
236        as_fftwin=0,
237        sep_dups=0,
238    )
239    var_vector.update(var_v)
240    default_div = 1 / 0.025
241    if (var_vector["phys_fs"] != default_div) and (
242        var_vector["resample_fs"] == default_div
243    ):
244        var_vector["resample_fs"] = var_vector["phys_fs"]
245    if var_vector["demo"]:
246        var_vector["quiet"] = 0
247    else:
248        pause = False  # pause off
249    e = False  # default value for e
250    r = {}
251    # Some filtering
252    nyquist_filter = var_vector["phys_fs"] / 2.0
253    # w[1] = 0.1/filter_nyquist  # Cut frequencies below 0.1Hz
254    # w[2] = var_vector['frequency_cutoff']/filter_nyquist  # Upper cut off frequency normalized
255    # b = signal.firwin(var_vector['fir_order'], w, 'bandpass')  # FIR filter of order 40
256    w = var_vector["frequency_cutoff"] / nyquist_filter
257    b = firwin(
258        numtaps=(var_vector["fir_order"] + 1), cutoff=w, window="hamming"
259    )  # FIR filter of order 40
260    b = numpy.array(b)
261    no_dups = 1  # Remove duplicates that might come up when improving peak location
262    """
263    if isinstance(var_vector, str):
264        L = glob(var_vector)  # NEED TO CONVERT ZGLOBB INTO LIST MAKER OF FILE OBJECTS; I.E. type(L) == list
265        nl = len(L)
266        #if isinstance(L, (int, long, float, complex)):
267            #print 'Error: File (%s) not found\n', var_vector
268            #e = True
269            #return e
270    else:
271        L = []
272        nl = len(var_vector)
273        if nl < 1:
274            print 'Error: No vectors\n', nl
275            e = True
276            return e
277    """
278    nl = 1  # temporary, delete this line when above lines get fixed with glob
279    # del(r) # "Must clear it. Or next line fails" -- Probably unnecessary in Python
280    r_list = []
281    for i in range(nl):
282        r = {
283            "v_name": filename,
284            "t": [],
285            "x": [],
286            "iz": [],  # zero crossing (peak) locations
287            "p_trace": [],
288            "tp_trace": [],
289            "n_trace": [],
290            "tn_trace": [],
291            "prd": [],
292            "t_mid_prd": [],
293            "p_trace_mid_prd": [],
294            "phase": [],
295            "rv": [],
296            "rvt": [],
297        }
298        r_list.append(r)
299    """
300    for i_column in range(nl):
301        if L and not os.path.isdir(L):
302            r_list[i_column]['v_name'] = '%s%s' % (sys.path, L[i_column]['name'])"""
303    # with open(r['v_name'], "rb") as f:
304    # v = f.read()
305    # v = map(int, v)
306    if v is None:
307        v = []
308        with open(r["v_name"], "rb") as h:
309            for line in h:
310                v.append(float(line.strip()))
311
312    v_np = numpy.asarray(v)
313    # else:
314    # r_list[i_column]['v_name'] = 'vector input col %d' % i_column
315    # v = var_vector[i_column]
316
317    window_width = 0.2  # Window for adjusting peak location in seconds
318    # Remove the mean
319    v_np_mean = numpy.mean(v_np)
320    v_np = v_np - v_np_mean
321    r["v"] = v_np  # Store it for debugging (found it is used in phase estimator)
322    # Filter both ways to cancel phase shift
323    v_np = lfilter(b, 1, v_np, axis=0)
324    v_np = numpy.flipud(v_np)
325    v_np = lfilter(b, 1, v_np)
326    v_np = numpy.flipud(v_np)
327    # Get the analytic signal
328    r["x"] = analytic_signal(
329        v_np,
330        var_vector["as_window_width"] * var_vector["phys_fs"],
331        var_vector["as_percover"],
332        var_vector["as_fftwin"],
333    )
334
335    # Using local version to illustrate, can use hilbert
336    # Doing ffts over smaller windows can improve peak detection in the few instances that go undetected but
337    # what value to use is not clear and there seems to be at times more errors introduced in the lower envelope.
338    nt = len(r["x"])
339
340    # force 't' to have the same length as 'x', rather than trusting
341    # divisions (when it should come out evenly)  5 Jun, 2017 [rickr]
342    #
343    # r['t'] = numpy.arange(0.,
344    #                       float(nt) / var_vector['phys_fs'],
345    #                       (1. / var_vector['phys_fs']))  # # % FIX FIX FIX
346    fsi = 1.0 / var_vector["phys_fs"]
347    r["t"] = numpy.array([i * fsi for i in range(len(r["x"]))])
348
349    iz = nonzero((r["x"][0 : nt - 1].imag * r["x"][1:nt].imag) <= 0)
350    iz = iz[0]
351    polall = -numpy.sign(r["x"][0 : nt - 1].imag - r["x"][1:nt].imag)
352    pk = (r["x"][iz]).real
353    pol = polall[iz]
354    tiz = []
355    for i in iz:
356        tiz.append(r["t"][i])
357    ppp = nonzero(pol > 0)
358    ppp = ppp[0]
359    p_trace = []
360    tp_trace = []
361    for i in ppp:
362        p_trace.append(pk[i])
363        tp_trace.append(tiz[i])
364    ppp = nonzero(pol < 0)
365    ppp = ppp[0]
366    n_trace = []
367    tn_trace = []
368    for i in ppp:
369        n_trace.append(pk[i])
370        tn_trace.append(tiz[i])
371
372    if var_vector["quiet"] == 0:
373        print(
374            "--> Load signal\n--> Smooth signal\n--> Calculate analytic signal Z\n--> Find zero crossing of imag(Z)\n"
375        )
376        figure(1)
377        subplot(211)
378        plot(r["t"], real(r["x"]), "g")
379        # plot (r_list[i_column].t, imag(r_list[i_column]['x']),'g')
380        plot(tp_trace, p_trace, "ro")
381        plot(tn_trace, n_trace, "bo")
382        # plot (r_list[i_column].t, abs(r_list[i_column]['x']),'k')
383        subplot(413)
384        vn = real(r["x"]) / (abs(r["x"]) + numpy.spacing(1))
385        plot(r["t"], vn, "g")
386        ppp = nonzero(pol > 0)
387        ppp = ppp[0]
388        for i in ppp:
389            plot(tiz[i], vn[iz[i]], "ro")
390        ppp = nonzero(pol < 0)
391        ppp = ppp[0]
392        for i in ppp:
393            plot(tiz[i], vn[iz[i]], "bo")
394        if var_vector["demo"]:
395            # need to add a pause here - JZ
396            # uiwait(msgbox('Press button to resume', 'Pausing', 'modal'))
397            pass
398
399    # Some polishing
400    if 1 == 1:
401        nww = numpy.ceil((window_width / 2) * var_vector["phys_fs"])
402        pkp = pk
403        r["iz"] = iz
404        for i in range(len(iz)):
405            ###################### left off here, turns out there's a
406            # difference in floating point precision in the calculation
407            # of r['x'], maybe look into the reason why they'd be different
408
409            # force these to ints    17 May 2017 [DNielson]
410            n0 = int(max(2, iz[i] - nww))
411            n1 = int(min(nt, iz[i] + nww))
412            temp = (r["x"][n0 : n1 + 1]).real
413            if pol[i] > 0:
414                xx, ixx = numpy.max(temp, 0), numpy.argmax(temp, 0)
415            else:
416                xx, ixx = numpy.min(temp, 0), numpy.argmin(temp, 0)
417            r["iz"][i] = n0 + ixx - 1
418            pkp[i] = xx
419            if i == 100:
420                print("pause")
421        tizp = r["t"][r["iz"]]
422
423        ppp = nonzero(pol > 0)
424        ppp = ppp[0]
425        r["p_trace"] = pkp[ppp]
426        r["tp_trace"] = tizp[ppp]
427        ppp = nonzero(pol < 0)
428        ppp = ppp[0]
429        r["n_trace"] = pkp[ppp]
430        r["tn_trace"] = tizp[ppp]
431
432        if no_dups:
433            # remove duplicates
434            if var_vector["sep_dups"]:
435                print("YOU SHOULD NOT BE USING THIS.\n")
436                print(" left here for the record\n")
437                [r["tp_trace"], r["p_trace"]] = remove_duplicates(
438                    r["tp_trace"], r["p_trace"], var_vector["quiet"]
439                )
440                [r["tn_trace"], r["n_trace"]] = remove_duplicates(
441                    r["tn_trace"], r["n_trace"], var_vector["quiet"]
442                )
443            else:
444                r["tp_trace"], r["p_trace"], r["tn_trace"], r[
445                    "n_trace"
446                ] = remove_pn_duplicates(
447                    r["tp_trace"],
448                    r["p_trace"],
449                    r["tn_trace"],
450                    r["n_trace"],
451                    var_vector["quiet"],
452                )
453            if len(r["p_trace"]) != len(r["n_trace"]):
454                print("Bad news in tennis shoes. I'm outta here.\n")
455                e = True
456                return r, e
457
458        if var_vector["quiet"] == 0:
459            print("--> Improved peak location\n--> Removed duplicates")
460            # style.use('ggplot')
461            subplot(211)
462            plot(r["tp_trace"], r["p_trace"], "r+", r["tp_trace"], r["p_trace"], "r")
463            plot(r["tn_trace"], r["n_trace"], "b+", r["tn_trace"], r["n_trace"], "b")
464            if var_vector["demo"]:
465                # need to add a pause here - JZ
466                # uiwait(msgbox('Press button to resume', 'Pausing', 'modal'))
467                pass
468    else:
469        tizp = tiz
470        r["iz"] = iz
471        pkp = pk
472        r["p_trace"] = p_trace
473        r[
474            "n_trace"
475        ] = (
476            n_trace
477        )  # This seems like a mistake, the .m file's version is highly suspect
478
479    # Calculate the period
480    nptrc = len(r["tp_trace"])
481    print(r["tp_trace"])
482    r["prd"] = r["tp_trace"][1:nptrc] - r["tp_trace"][0 : nptrc - 1]
483    r["p_trace_mid_prd"] = (r["p_trace"][1:nptrc] + r["p_trace"][0 : nptrc - 1]) / 2.0
484    r["t_mid_prd"] = (r["tp_trace"][1:nptrc] + r["tp_trace"][0 : nptrc - 1]) / 2.0
485    if var_vector["quiet"] == 0:
486        print("--> Calculated the period (from beat to beat)\n")
487        subplot(211)
488        plot(r["t_mid_prd"], r["p_trace_mid_prd"], "kx")
489        for i in range(0, len(r["prd"])):
490            text(r["t_mid_prd"][i], r["p_trace_mid_prd"][i], ("%.2f" % r["prd"][i]))
491        if var_vector["demo"]:
492            # need to add a pause here - JZ
493            # uiwait(msgbox('Press button to resume', 'Pausing', 'modal'))
494            pass
495        show()
496
497    if var_vector["interpolation_style"] != "":
498        # Interpolate to slice sampling time grid:
499        step_interval = 1.0 / var_vector["resample_fs"]
500        # r['tR'] = numpy.arange(0, max(r['t']) + step_interval, step_interval)
501
502        # allow for a ratio that is barely below an integer
503        #                               5 Jun, 2017 [rickr]
504        step_size = int(max(r["t"]) / step_interval + g_epsilon) + 1
505        r["tR"] = []
506
507        for i in range(0, step_size):
508            r["tR"].append(i * step_interval)
509        """
510        with open('tR.csv', 'w') as f:
511            for i in r['tR']:
512                f.write("%s\n" % i)
513        # Squeeze these arrays down from an [x,y] shape to an [x,] shape in order to use interp1d
514        r['tp_trace'] = r['tp_trace'].squeeze()
515        r['p_trace'] = r['p_trace'].squeeze()
516        """
517        r["p_trace_r"] = interp1d(
518            r["tp_trace"],
519            r["p_trace"],
520            var_vector["interpolation_style"],
521            bounds_error=False,
522        )
523        r["p_trace_r"] = r["p_trace_r"](r["tR"])
524        # r['tn_trace'] = r['tn_trace'].squeeze()
525        # r['n_trace'] = r['n_trace'].squeeze()
526        r["n_trace_r"] = interp1d(
527            r["tn_trace"],
528            r["n_trace"],
529            var_vector["interpolation_style"],
530            bounds_error=False,
531        )(r["tR"])
532        # r['t_mid_prd'] = r['t_mid_prd'].squeeze()
533        # r['prd'] = r['prd'].squeeze()
534        r["prdR"] = interp1d(
535            r["t_mid_prd"],
536            r["prd"],
537            var_vector["interpolation_style"],
538            bounds_error=False,
539        )(r["tR"])
540        # You get NaN when tR exceeds original signal time, so set those
541        # to the last interpolated value
542        r["p_trace_r"] = clean_resamp(r["p_trace_r"])
543        r["n_trace_r"] = clean_resamp(r["n_trace_r"])
544        r["prdR"] = clean_resamp(r["prdR"])
545
546        # if (i_column != nl):
547        # input('Hit enter to proceed...','s')
548        # if var_vector['quiet'] == 0:
549        # plotsign2(1)
550
551    return r, e
552