1 /**
2  * @file   ngram_decode.c
3  *
4  * <JA>
5  * @brief  N-gram��Ψ�˴�Ť���ñ��ͽ¬����2�ѥ���
6  *
7  * Julius ��N-gram���Ѥ��������å��ǥ����ǥ���(��2�ѥ�)�ˤ����ơ�
8  * ������³������ñ��ν������ꤹ��.
9  *
10  * Ϳ����줿Ÿ��������λ�ü�ե졼���ͽ¬����ñ��ȥ�ꥹ���
11  * ����ͽ¬�ե졼����դ˽�ü��¸�ߤ���ñ��ν����
12  * ����N-gram�и���Ψ�ȤȤ���֤�.
13  *
14  * Julius �Ǥ� ngram_firstwords(), ngram_nextwords(), ngram_acceptable() ��
15  * ���줾����2�ѥ��Υᥤ��ؿ� wchmm_fbs() ����ƤӽФ����. �ʤ���
16  * Julian �ǤϤ����δؿ�������� dfa_decode.c �δؿ����Ѥ�����.
17  * </JA>
18  *
19  * <EN>
20  * @brief N-gram based word prediction for the 2nd pass.
21  *
22  * These functions returns next word candidates in the 2nd recognition
23  * pass of Julius, i.e. N-gram based stack decoding.
24  *
25  * Given a partial sentence hypothesis, it first estimate the beginning frame
26  * of the hypothesis based on the word trellis.  Then the words in the word
27  * trellis around the estimated frame are extracted from the word trellis.
28  * They will be returned with their N-gram probabilities.
29  *
30  * In Julius, ngram_firstwords(), ngram_nextwords() and ngram_acceptable()
31  * are called from main search function wchmm_fbs().  In Julian,
32  * corresponding functions in dfa_decode.c will be used instead.
33  * </EN>
34  *
35  * @author Akinobu Lee
36  * @date   Fri Jul  8 14:57:51 2005
37  *
38  * $Revision: 1.3 $
39  *
40  */
41 /*
42  * Copyright (c) 1991-2007 Kawahara Lab., Kyoto University
43  * Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology
44  * Copyright (c) 2005-2007 Julius project team, Nagoya Institute of Technology
45  * All rights reserved
46  */
47 
48 #include <julius/julius.h>
49 
50 /**
51  * <JA>
52  * ��ñ����䥽������ qsort ������Хå��ؿ�.
53  *
54  * @param a [in] ����1
55  * @param b [in] ����2
56  *
57  * @return a��ñ��ID > b��ñ��ID �ʤ�1, �դʤ� -1, Ʊ���ʤ� 0 ���֤�.
58  * </JA>
59  * <EN>
60  * qsort callback function to sort next word candidates by their word ID.
61  *
62  * @param a [in] element 1
63  * @param b [in] element 2
64  *
65  * @return 1 if word id of a > that of b, -1 if negative, 0 if equal.
66  * </EN>
67  */
68 static int
compare_nw(NEXTWORD ** a,NEXTWORD ** b)69 compare_nw(NEXTWORD **a, NEXTWORD **b)
70 {
71   if ((*a)->id > (*b)->id) return 1;
72   if ((*a)->id < (*b)->id) return -1;
73   return 0;
74 }
75 
76 /**
77  * <JA>
78  * ��ñ�����ꥹ���⤫��ñ���������.
79  *
80  * @param nw [in] ��ñ�����ꥹ��
81  * @param w [in] ��������ñ���ID
82  * @param num [in] ��ñ�����ꥹ�Ȥ�Ĺ��
83  *
84  * @return ���Ĥ��ä���礽�μ�ñ����乽¤�ΤؤΥݥ��������Ĥ���ʤ����
85  * NULL ���֤�.
86  * </JA>
87  * <EN>
88  * Find a word from list of next word candidates.
89  *
90  * @param nw [in] list of next word candidates
91  * @param w [in] word id to search for
92  * @param num [in] length of @a nw
93  *
94  * @return the pointer to the NEXTWORD data if found, or NULL if not found.
95  * </EN>
96  */
97 /* find next word candiate whose id 'w' */
98 static NEXTWORD *
search_nw(NEXTWORD ** nw,WORD_ID w,int num)99 search_nw(NEXTWORD **nw, WORD_ID w, int num)
100 {
101   int left,right,mid;
102   NEXTWORD *tmp;
103 
104   if (num == 0) return NULL;
105   left = 0;
106   right = num - 1;
107   while (left < right) {
108     mid = (left + right) / 2;
109     if ((nw[mid])->id < w) {
110       left = mid + 1;
111     } else {
112       right = mid;
113     }
114   }
115   tmp = nw[left];
116   if (tmp->id == w) {
117     return tmp;
118   } else {
119     return NULL;
120   }
121 }
122 
123 /**
124  * <EN>
125  * Compute backward N-gram score from forward N-gram.
126  * </EN>
127  * <JA>
128  * ������� N-gram �������������� N-gram ���黻�Ф���.
129  * </JA>
130  *
131  * @param ngram [in] N-gram data structure
132  * @param w [in] word sequence
133  * @param wlen [in] length of @a w
134  *
135  * @return the backward probability of the word w[0].
136  *
137  */
138 static LOGPROB
ngram_forw2back(NGRAM_INFO * ngram,WORD_ID * w,int wlen)139 ngram_forw2back(NGRAM_INFO *ngram, WORD_ID *w, int wlen)
140 {
141   int i;
142   LOGPROB p1, p2;
143 
144   p1 = 0.0;
145   for(i = 1; i < ngram->n; i++) {
146     if (i >= wlen) break;
147     p1 += ngram_prob(ngram, i, &(w[1]));
148   }
149   p2 = 0.0;
150   for(i = 0; i < ngram->n; i++) {
151     if (i >= wlen) break;
152     p2 += ngram_prob(ngram, i+1, w);
153   }
154 
155   return(p2 - p1);
156 }
157 
158 /**
159  * <JA>
160  * @brief  ñ��ȥ�ꥹ���鼡ñ��������Ф���.
161  *
162  * ñ��ȥ�ꥹ��λ��ꤷ���ե졼���˽�ü��¸�ߤ���ȥ�ꥹñ��
163  * �Υꥹ�Ȥ���Ф��������μ�ñ��Ȥ��Ƥ� N-gram ��³��Ψ��׻�����.
164  * ���Υꥹ�Ȥ�ñ�����¤�Τ��ɲä����֤�.
165  *
166  * @param r [in] ǧ��������������
167  * @param nw [i/o] ��ñ�����ꥹ�ȡ���з�̤� @a oldnum �ʹߤ��ɲä�����
168  * @param oldnum [in] @a nw �ˤ��Ǥ˳�Ǽ����Ƥ��뼡ñ��ο�
169  * @param hypo [in] Ÿ������ʸ����
170  * @param t [in] ����ե졼��
171  *
172  * @return ��Хꥹ�Ȥ��ɲä������Ȥ� @a nw �˴ޤޤ�뼡ñ������.
173  * </JA>
174  * <EN>
175  * @brief  Extract next word candidates from word trellis.
176  *
177  * This function extracts the list of trellis words whose word end
178  * has survived in the word trellis at the specified frame.
179  * The N-gram probabilities of them are then computed and added to
180  * the current next word candidates data.
181  *
182  * @param r [in] recognition process instance
183  * @param nw [in] list of next word candidates (new words will be appended at @a oldnum)
184  * @param oldnum [in] number of words already stored in @a nw
185  * @param hypo [in] the source sentence hypothesis
186  * @param t [in] specified frame
187  *
188  * @return the total number of words currently stored in the @a nw.
189  * </EN>
190  */
191 static int
pick_backtrellis_words(RecogProcess * r,NEXTWORD ** nw,int oldnum,NODE * hypo,short t)192 pick_backtrellis_words(RecogProcess *r, NEXTWORD **nw, int oldnum, NODE *hypo, short t)
193 {
194   int i;
195   WORD_ID w;
196   LOGPROB rawscore;
197 #ifdef WPAIR
198   int w_old = WORD_INVALID;
199 #endif
200   int num;
201   WORD_ID cnword[MAX_N];	///< Last two non-transparent words
202   WORD_ID cnwordrev[MAX_N];	///< Last two non-transparent words
203   int cnnum;		///< Num of found non-transparent words (<=2)
204   int last_trans;		///< Num of skipped transparent words
205 
206   BACKTRELLIS *bt;
207   WORD_INFO *winfo;
208   NGRAM_INFO *ngram;
209   LOGPROB lm_weight2, lm_penalty2, lm_penalty_trans;
210 
211   num = oldnum;
212   bt = r->backtrellis;
213   winfo = r->lm->winfo;
214   ngram = r->lm->ngram;
215   lm_weight2 = r->config->lmp.lm_weight2;
216   lm_penalty2 = r->config->lmp.lm_penalty2;
217   lm_penalty_trans = r->config->lmp.lm_penalty_trans;
218 
219   /* set word contexts to cnword[] from 1 considering transparent words */
220   if (ngram) {
221     cnnum = 0;
222     last_trans = 0;
223     for(i=hypo->seqnum-1;i>=0;i--) {
224       if (! winfo->is_transparent[hypo->seq[i]]) {
225 	cnword[cnnum+1] = hypo->seq[i];
226 	cnnum++;
227 	if (cnnum >= ngram->n - 1) break;
228       } else {
229 	last_trans++;
230       }
231     }
232     if (ngram->dir == DIR_RL) {
233       for(i=0;i<cnnum;i++) {
234 	cnwordrev[cnnum-1-i] = cnword[i+1];
235       }
236     }
237     /* use ngram id */
238     if (ngram->dir == DIR_RL) {
239       for(i=0;i<cnnum;i++) cnwordrev[i] = winfo->wton[cnwordrev[i]];
240     } else {
241       for(i=0;i<cnnum;i++) cnword[i+1] = winfo->wton[cnword[i+1]];
242     }
243   }
244 
245   /* lookup survived words in backtrellis on time frame 't' */
246   for (i=0;i<bt->num[t];i++) {
247     w = (bt->rw[t][i])->wid;
248 #ifdef WORD_GRAPH
249     /* only words on the word graphs are expanded */
250     if (!(bt->rw[t][i])->within_wordgraph) continue;
251 #endif /* not WORD_GRAPH */
252 #ifdef WPAIR
253     /* some word have same word ID with different previous word, so
254        only one will be opened (best word will be selected later
255        by next_word() */
256     if (w == w_old) continue;	/* backtrellis is sorted by word ID */
257     else w_old = w;
258 #endif /* WPAIR */
259     /* skip if already exist */
260     if (search_nw(nw, w, oldnum) != NULL) continue;
261 
262     /* compute LM probability of the word */
263     if (ngram) {
264       /* compute N-gram probability */
265       if (ngram->dir == DIR_RL) {
266 	/* just compute N-gram prob of the word candidate */
267 	cnwordrev[cnnum] = winfo->wton[w];
268 	rawscore = ngram_prob(ngram, cnnum + 1, cnwordrev);
269       } else {
270 	cnword[0] = winfo->wton[w];
271 	rawscore = ngram_forw2back(ngram, cnword, cnnum + 1);
272       }
273 #ifdef CLASS_NGRAM
274       rawscore += winfo->cprob[w];
275 #endif
276     }
277     if (r->lmvar == LM_NGRAM_USER) {
278       /* call user-defined function */
279       /* be careful that the word context is ordered in backward direction */
280       rawscore = (*(r->lm->lmfunc.lmprob))(winfo, hypo->seq, hypo->seqnum, w, rawscore);
281     }
282 
283     nw[num]->tre   = bt->rw[t][i];
284     nw[num]->id    = w;
285     nw[num]->lscore = rawscore * lm_weight2 + lm_penalty2;
286     if (winfo->is_transparent[w]) {
287       /*nw[num]->lscore -= (LOGPROB)last_trans * TRANS_RENZOKU_PENALTY;*/
288       if (winfo->is_transparent[hypo->seq[hypo->seqnum-1]]) {
289 	nw[num]->lscore += lm_penalty_trans;
290       }
291     }
292 
293     /* j_printf("%d: %s added\n", num, winfo->wname[nw[num]->id]); */
294     num++;
295   }
296 
297   return num;
298 }
299 
300 /**
301  * <JA>
302  * @brief  ñ��ȥ�ꥹ���鼡ñ�콸�����ꤹ��.
303  *
304  * ����ե졼������� lookup_range ʬ�˽�ü������ȥ�ꥹ���ñ����ᡤ
305  * ��ñ�칽¤�Τ��ۤ���. Ʊ��ñ�줬�嵭���ϰ����ʣ�������硤
306  * ����ե졼��ˤ�äȤ�ᤤ�ȥ�ꥹ���ñ�줬�������.
307  *
308  * @param r [in] ǧ��������������
309  * @param nw [out] ��ñ�콸����Ǽ���빽¤�ΤؤΥݥ���
310  * @param hypo [in] Ÿ��������ʬʸ����
311  * @param tm [in] ñ���õ���濴�Ȥʤ����ե졼��
312  * @param t_end [in] ñ���õ���ե졼��α�ü
313  *
314  * @return @a nw �˳�Ǽ���줿��ñ�����ο����֤�.
315  * </JA>
316  * <EN>
317  * @brief  Determine next word candidates from the word trellis.
318  *
319  * This function builds a list of next word candidates by looking up
320  * the word trellis at specified frame, with lookup_range frame margin.
321  * If the same words exists in the near frames, only the one nearest to the
322  * specified frame will be chosen.
323  *
324  * @param r [in] recognition process instance
325  * @param nw [out] pointer to hold the extracted words as list of next word candidates
326  * @param hypo [in] partial sentence hypothesis from which the words will be expanded
327  * @param tm [in] center time frame to look up the words
328  * @param t_end [in] right frame boundary for the lookup.
329  *
330  * @return the number of next words candidates stored in @a nw.
331  * </EN>
332  */
333 static int
get_backtrellis_words(RecogProcess * r,NEXTWORD ** nw,NODE * hypo,short tm,short t_end)334 get_backtrellis_words(RecogProcess *r, NEXTWORD **nw, NODE *hypo, short tm, short t_end)
335 {
336   int num = 0;
337   int t, t_step;
338   int oldnum=0;
339 
340   BACKTRELLIS *bt;
341   int lookup_range;
342 
343   if (tm < 0) return(0);
344 
345   bt = r->backtrellis;
346   lookup_range = r->config->pass2.lookup_range;
347 
348 #ifdef PREFER_CENTER_ON_TRELLIS_LOOKUP
349   /* fix for 3.2 (01/10/18 by ri) */
350   /* before and after (one near center frame has high priority) */
351   for (t_step = 0; t_step < lookup_range; t_step++) {
352     /* before or center */
353     t = tm - t_step;
354     if (t < 0 || t > bt->framelen - 1 || t >= t_end) continue;
355     num = pick_backtrellis_words(r, nw, oldnum, hypo, t);
356     if (num > oldnum) {
357       qsort(nw, num, sizeof(NEXTWORD *),
358 	    (int (*)(const void *,const void *))compare_nw);
359       oldnum = num;
360     }
361     if (t_step == 0) continue;	/* center */
362     /* after */
363     t = tm + t_step;
364     if (t < 0 || t > bt->framelen - 1 || t >= t_end) continue;
365     num = pick_backtrellis_words(r, nw, oldnum, hypo, t);
366     if (num > oldnum) {
367       qsort(nw, num, sizeof(NEXTWORD *),
368 	    (int (*)(const void *,const void *))compare_nw);
369       oldnum = num;
370     }
371   }
372 
373 #else
374 
375   /* before the center frame */
376   for(t = tm; t >= tm - lookup_range; t--) {
377     if (t < 0) break;
378     num = pick_backtrellis_words(r, nw, oldnum, hypo, t);
379     if (num > oldnum) {
380       qsort(nw, num, sizeof(NEXTWORD *),
381 	    (int (*)(const void *,const void *))compare_nw);
382       oldnum = num;
383     }
384   }
385   /* after the center frame */
386   for(t = tm + 1; t < tm + lookup_range; t++) {
387     if (t > bt->framelen - 1) break;
388     if (t >= t_end) break;
389     num = pick_backtrellis_words(r, nw, oldnum, hypo, t);
390     if (num > oldnum) {
391       qsort(nw, num, sizeof(NEXTWORD *),
392 	    (int (*)(const void *,const void *))compare_nw);
393       oldnum = num;
394     }
395   }
396 #endif
397 
398   return num;
399 }
400 
401 /**
402  * <JA>
403  * @brief  ��Ÿ��ñ������.
404  *
405  * ����ˤ��Ÿ���оݤȤʤ�ʤ�ñ���ꥹ�Ȥ���õ��.
406  *
407  * @param nw [i/o] ��ñ�콸��ʽ������Ÿ���Ǥ��ʤ�ñ�줬�õ����
408  * @param hypo [in] Ÿ��������ʬʸ����
409  * @param num [in] @a nw �˸��߳�Ǽ����Ƥ���ñ���
410  * @param winfo [in] ñ�켭��
411  *
412  * @return ������ nw �˴ޤޤ�뼡ñ���
413  * </JA>
414  * <EN>
415  * @brief  Remove non-expansion word from list.
416  *
417  * Remove words in the nextword list which should not be expanded.
418  *
419  * @param nw [i/o] list of next word candidates (will be shrinked by removing some words)
420  * @param hypo [in] partial sentence hypothesis from which the words will be expanded
421  * @param num [in] current number of next words in @a nw
422  * @param winfo [in] word dictionary
423  *
424  * @return the new number of words in @a nw
425  * </EN>
426  */
427 static int
limit_nw(NEXTWORD ** nw,NODE * hypo,int num,WORD_INFO * winfo)428 limit_nw(NEXTWORD **nw, NODE *hypo, int num, WORD_INFO *winfo)
429 {
430   int src,dst;
431   int newnum;
432 
433   /* <s>����ϲ���Ÿ�����ʤ� */
434   /* no hypothesis will be generated after "<s>" */
435   if (hypo->seq[hypo->seqnum-1] == winfo->head_silwid) {
436     return(0);
437   }
438 
439   dst = 0;
440   for (src=0; src<num; src++) {
441     if (nw[src]->id == winfo->tail_silwid) {
442       /* </s> ��Ÿ�����ʤ� */
443       /* do not expand </s> (it only appears at start) */
444       continue;
445     }
446 #ifdef FIX_35_INHIBIT_SAME_WORD_EXPANSION
447     /* ľ��ñ���Ʊ���ȥ�ꥹñ���Ÿ�����ʤ� */
448     /* inhibit expanding the exactly the same trellis word twice */
449     if (nw[src]->tre == hypo->tre) continue;
450 #endif
451 
452     if (src != dst) memcpy(nw[dst], nw[src], sizeof(NEXTWORD));
453     dst++;
454   }
455   newnum = dst;
456 
457   return newnum;
458 }
459 
460 
461 /**
462  * <JA>
463  * @brief  ���ñ�첾�⽸������.
464  *
465  * N-gram�١�����õ���Ǥϡ���������ñ��������̵��ñ��˸��ꤵ��Ƥ���.
466  * �����������硼�ȥݡ����������ơ��������ϡ���1�ѥ��Ǻǽ��ե졼��˽�ü��
467  * �Ĥä�ñ���������ٺ����ñ��Ȥʤ�.
468  *
469  * @param nw [out] ��ñ�����ꥹ�ȡ�����줿���ñ�첾����Ǽ�����
470  * @param peseqlen [in] ���ϥե졼��Ĺ
471  * @param maxnw [in] @a nw �˳�Ǽ�Ǥ���ñ��κ����
472  * @param r [in] ǧ��������������
473  *
474  * @return @a nw �˳�Ǽ���줿ñ���������֤�.
475  * </JA>
476  * <EN>
477  * @brief  Get initial word hypotheses at the beginning.
478  *
479  * on N-gram based recogntion, the initial hypothesis is fixed to the tail
480  * silence word.  Exception is that, in short-pause segmentation mode, the
481  * initial hypothesis will be chosen from survived words on the last input
482  * frame in the first pass.
483  *
484  * @param nw [out] pointer to hold the initial word candidates
485  * @param peseqlen [in] input frame length
486  * @param maxnw [in] maximum number of words that can be stored in @a nw
487  * @param r [in] recognition process instance
488  *
489  * @return the number of words extracted and stored to @a nw.
490  * </EN>
491  *
492  * @callgraph
493  * @callergraph
494  */
495 int
ngram_firstwords(NEXTWORD ** nw,int peseqlen,int maxnw,RecogProcess * r)496 ngram_firstwords(NEXTWORD **nw, int peseqlen, int maxnw, RecogProcess *r)
497 {
498 
499   if (r->config->successive.enabled) {
500     /* in sp segment mode  */
501     if (r->sp_break_2_begin_word != WORD_INVALID) {
502       /* �������� �ǽ��ե졼��˻Ĥä�ñ��ȥ�ꥹ��κ���ñ�� */
503       /* the initial hypothesis is the best word survived on the last frame of
504 	 the segment */
505       nw[0]->id = r->sp_break_2_begin_word;
506     } else {
507       /* �ǽ���������: �������� ñ���������̵��ñ��(=winfo->tail_silwid) */
508       /* we are in the last of sentence: initial hypothesis is word-end silence word */
509       nw[0]->id = r->lm->winfo->tail_silwid;
510     }
511   } else {
512     /* initial hypothesis should be word-end silence word */
513     nw[0]->id = r->lm->winfo->tail_silwid;
514   }
515 
516 #ifdef FIX_PENALTY
517   nw[0]->lscore = 0.0;
518 #else
519   nw[0]->lscore = r->config->lmp.lm_penalty2;
520 #endif
521 
522   return 1;			/* number of words = 1 */
523 }
524 
525 /**
526  * <JA>
527  * @brief ��ñ�첾�⽸����֤�.
528  *
529  * Ϳ����줿��ʬʸ���⤫�顤������³������ñ��ν�����֤�. �ºݤˤϡ�
530  * ��1�ѥ��η�̤Ǥ���ȥ�ꥹñ�콸�� bt ��ǡ�Ÿ��������ʬʸ����κǽ�ñ���
531  * �ʿ��ꤵ�줿�˻�ü�ե졼�� hypo->estimated_next_t �������¸�ߤ���
532  * ñ�콸����Ф��������� N-gram ��³��Ψ��׻������֤�.
533  * ���Ф��줿��ñ�첾��ϡ����餫���� maxnm ��Ĺ������
534  * �ΰ褬���ݤ���Ƥ��� nw �˳�Ǽ�����.
535  *
536  * @param hypo [in] Ÿ������ʸ����
537  * @param nw [out] ��ñ�����ꥹ�Ȥ��Ǽ�����ΰ�ؤΥݥ���
538  * @param maxnw [in] @a nw �κ���Ĺ
539  * @param r [in] ǧ��������������
540  *
541  * @return ��Ф��� nw �˳�Ǽ���줿��ñ�첾��ο����֤�.
542  * </JA>
543  * <EN>
544  * @brief  Return the list of next word candidate.
545  *
546  * Given a partial sentence hypothesis "hypo", it returns the list of
547  * next word candidates.  Actually, it extracts from word trellis the
548  * list of words whose word-end node has survived near the estimated
549  * beginning-of-word frame of last word "hypo->estimated_next_t", and store
550  * them to "nw" with their N-gram probabilities.
551  *
552  * @param hypo [in] source partial sentence hypothesis
553  * @param nw [out] pointer to store the list of next word candidates (should be already allocated)
554  * @param maxnw [in] maximum number of words that can be stored to @a nw
555  * @param r [in] recognition process instance
556  *
557  * @return the number of extracted next word candidates in @a nw.
558  * </EN>
559  * @callgraph
560  * @callergraph
561  */
562 int
ngram_nextwords(NODE * hypo,NEXTWORD ** nw,int maxnw,RecogProcess * r)563 ngram_nextwords(NODE *hypo, NEXTWORD **nw, int maxnw, RecogProcess *r)
564 {
565   int num, num2;
566 
567   if (hypo->seqnum == 0) {
568     j_internal_error("ngram_nextwords: hypo contains no word\n");
569   }
570 
571   /* ����ο��꽪ü����ˤ����� backtrellis��˻ĤäƤ���ñ������� */
572   /* get survived words on backtrellis at the estimated end frame */
573   num = get_backtrellis_words(r, nw, hypo, hypo->estimated_next_t, hypo->bestt);
574 
575   /* Ÿ���Ǥ��ʤ�ñ�������å����Ƴ��� */
576   /* exclude unallowed words */
577   num2 = limit_nw(nw, hypo, num, r->lm->winfo);
578 
579   if (debug2_flag) jlog("DEBUG: ngram_decode: %d-%d=%d unfolded\n",num, num-num2,num2);
580 
581   return(num2);
582 }
583 
584 /**
585  * <JA>
586  * @brief  ����Ƚ��
587  *
588  * Ϳ����줿��ʬʸ���⤬��ʸ�ʤ��ʤ��õ����λ�ˤȤ���
589  * ������ǽ�Ǥ��뤫�ɤ������֤�. N-gram �Ǥ�ʸƬ���б�����̵��ñ��
590  * (silhead) �Ǥ���м�������.
591  *
592  * @param hypo [in] ��ʬʸ����
593  * @param r [in] ǧ��������������
594  *
595  * @return ʸ�Ȥ��Ƽ�����ǽ�Ǥ���� TRUE���Բ�ǽ�ʤ� FALSE ���֤�.
596  * </JA>
597  * <EN>
598  * @brief  Acceptance check.
599  *
600  * Return whether the given partial hypothesis is acceptable as a sentence
601  * and can be treated as a final search candidate.  In N-gram mode, it checks
602  * whether the last word is the beginning-of-sentence silence (silhead).
603  *
604  * @param hypo [in] partial sentence hypothesis to be examined
605  * @param r [in] recognition process instance
606  *
607  * @return TRUE if acceptable as a sentence, or FALSE if not.
608  * </EN>
609  * @callgraph
610  * @callergraph
611  */
612 boolean
ngram_acceptable(NODE * hypo,RecogProcess * r)613 ngram_acceptable(NODE *hypo, RecogProcess *r)
614 {
615 
616   if (r->config->successive.enabled) {
617     /* �Ǹ�β��⤬�裱�ѥ����ಾ��κǽ��ñ��Ȱ��פ��ʤ���Фʤ�ʤ� */
618     /* the last word should be equal to the first word on the best hypothesis on 1st pass */
619     if (hypo->seq[hypo->seqnum-1] == r->sp_break_2_end_word) {
620       return TRUE;
621     }
622   } else {
623     /* �Ǹ�β��⤬ʸƬ̵��ñ��Ǥʤ���Фʤ�ʤ� */
624     /* the last word should be head silence word */
625     if (hypo->seq[hypo->seqnum-1] == r->lm->winfo->head_silwid) {
626       return TRUE;
627     }
628   }
629   return FALSE;
630 }
631 
632 /* end of file */
633