1 /*************************************************************************/
2 /*                                                                       */
3 /*                Centre for Speech Technology Research                  */
4 /*                     University of Edinburgh, UK                       */
5 /*                       Copyright (c) 1996,1997                         */
6 /*                        All Rights Reserved.                           */
7 /*                                                                       */
8 /*  Permission is hereby granted, free of charge, to use and distribute  */
9 /*  this software and its documentation without restriction, including   */
10 /*  without limitation the rights to use, copy, modify, merge, publish,  */
11 /*  distribute, sublicense, and/or sell copies of this work, and to      */
12 /*  permit persons to whom this work is furnished to do so, subject to   */
13 /*  the following conditions:                                            */
14 /*   1. The code must retain the above copyright notice, this list of    */
15 /*      conditions and the following disclaimer.                         */
16 /*   2. Any modifications must be clearly marked as such.                */
17 /*   3. Original authors' names are not deleted.                         */
18 /*   4. The authors' names are not used to endorse or promote products   */
19 /*      derived from this software without specific prior written        */
20 /*      permission.                                                      */
21 /*                                                                       */
22 /*  THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK        */
23 /*  DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING      */
24 /*  ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT   */
25 /*  SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE     */
26 /*  FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES    */
27 /*  WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN   */
28 /*  AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,          */
29 /*  ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF       */
30 /*  THIS SOFTWARE.                                                       */
31 /*                                                                       */
32 /*************************************************************************/
33 /*                      Author :  Alan W Black                           */
34 /*                      Date   :  April 1996                             */
35 /*-----------------------------------------------------------------------*/
36 /*                                                                       */
37 /* Duff intonation                                                       */
38 /*                                                                       */
39 /*=======================================================================*/
40 #include <cstdio>
41 #include "festival.h"
42 #include "intonation.h"
43 
FT_Intonation_Default_Utt(LISP utt)44 LISP FT_Intonation_Default_Utt(LISP utt)
45 {
46     return utt;
47 }
48 
FT_Int_Targets_Default_Utt(LISP utt)49 LISP FT_Int_Targets_Default_Utt(LISP utt)
50 {
51     // Predict intonation from labels etc (producing F0 contour)
52     EST_Utterance *u = get_c_utt(utt);
53     EST_Item *s;
54     EST_Relation *seg;
55     LISP params;
56     float start,end;
57 
58     *cdebug << "Intonation duff module\n";
59 
60     // should create some random targets
61     params = siod_get_lval("duffint_params",NULL);
62     start = get_param_float("start",params,130.0);
63     end = get_param_float("end",params,110.0);
64     u->create_relation("Target");
65 
66     seg = u->relation("Segment");
67 
68     if (seg->length() == 0)
69 	return utt;
70 
71     add_target(u,seg->first(),0,start);
72     s = seg->last();
73     add_target(u,s,(float)ffeature(s,"segment_end"),end);
74 
75     return utt;
76 }
77 
FT_Int_Targets_Relation_Utt(LISP utt,LISP relname)78 LISP FT_Int_Targets_Relation_Utt(LISP utt, LISP relname)
79 {
80     // Predict intonation from labels etc (producing F0 contour)
81     EST_Utterance *u = get_c_utt(utt);
82     EST_Track *pm = 0;
83     LISP params;
84     float start,end;
85     int n_frames;
86 
87     *cdebug << "Intonation duff module\n";
88 
89     // should create some random targets
90     params = siod_get_lval("duffint_params",NULL);
91     start = get_param_float("start",params,130.0);
92     end = get_param_float("end",params,110.0);
93 
94     pm = track(u->relation(get_c_string(relname))->head()->f("coefs"));
95 
96     float pp = 1.0/start;
97     //    float end_time = ((float)pm->num_frames()) * pp;
98     float end_time = pm->end();
99 
100     n_frames = (int)(ceil)(end_time/pp);
101     cout << "n_frames: " << n_frames << endl;
102     cout << "end_time: " << end_time << endl;
103 
104     EST_Track *f0 = new EST_Track;
105     f0->resize(n_frames, 1);
106     f0->fill_time(0.01);
107 
108     float m = (end-start) /end_time;
109     float c = start;
110 
111     for (int i = 0; i < n_frames; ++ i)
112 	f0->a(i) = (m * ((float) i) * 0.01) + c;
113 
114     u->create_relation("f0");
115     EST_Item *item = u->relation("f0")->append();
116     item->set_val("f0", est_val(f0));
117 
118     return utt;
119 }
120 
121