1 #ifdef HAVE_CONFIG_H
2 #include "config.h"
3 #endif
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <math.h>
8 #include <ctype.h>
9 #include <string.h>
10 #include <limits.h>
11 
12 #include "ViennaRNA/fold_vars.h"
13 #include "ViennaRNA/params/default.h"
14 #include "ViennaRNA/utils/basic.h"
15 #include "ViennaRNA/alphabet.h"
16 #include "ViennaRNA/constraints/hard.h"
17 #include "ViennaRNA/constraints/soft.h"
18 #include "ViennaRNA/gquad.h"
19 #include "ViennaRNA/structured_domains.h"
20 #include "ViennaRNA/unstructured_domains.h"
21 #include "ViennaRNA/loops/external.h"
22 
23 #ifdef __GNUC__
24 # define INLINE inline
25 #else
26 # define INLINE
27 #endif
28 
29 #define SPEEDUP_HC  1
30 
31 #include "external_hc.inc"
32 #include "external_sc_pf.inc"
33 
34 struct vrna_mx_pf_aux_el_s {
35   FLT_OR_DBL  *qq;
36   FLT_OR_DBL  *qq1;
37 
38   int         qqu_size;
39   FLT_OR_DBL  **qqu;
40 };
41 
42 /*
43  #################################
44  # PRIVATE FUNCTION DECLARATIONS #
45  #################################
46  */
47 
48 PRIVATE INLINE FLT_OR_DBL
49 reduce_ext_ext_fast(vrna_fold_compound_t        *fc,
50                     int                         i,
51                     int                         j,
52                     struct vrna_mx_pf_aux_el_s  *aux_mx,
53                     vrna_callback_hc_evaluate   *evaluate,
54                     struct hc_ext_def_dat       *hc_dat_local,
55                     struct sc_ext_exp_dat       *sc_wrapper);
56 
57 
58 PRIVATE INLINE FLT_OR_DBL
59 reduce_ext_stem_fast(vrna_fold_compound_t       *fc,
60                      int                        i,
61                      int                        j,
62                      struct vrna_mx_pf_aux_el_s *aux_mx,
63                      vrna_callback_hc_evaluate  *evaluate,
64                      struct hc_ext_def_dat      *hc_dat_local,
65                      struct sc_ext_exp_dat      *sc_wrapper);
66 
67 
68 PRIVATE INLINE FLT_OR_DBL
69 reduce_ext_up_fast(vrna_fold_compound_t       *fc,
70                    int                        i,
71                    int                        j,
72                    struct vrna_mx_pf_aux_el_s *aux_mx,
73                    vrna_callback_hc_evaluate  *evaluate,
74                    struct hc_ext_def_dat      *hc_dat_local,
75                    struct sc_ext_exp_dat      *sc_wrapper);
76 
77 
78 PRIVATE INLINE FLT_OR_DBL
79 split_ext_fast(vrna_fold_compound_t       *fc,
80                int                        i,
81                int                        j,
82                struct vrna_mx_pf_aux_el_s *aux_mx,
83                vrna_callback_hc_evaluate  *evaluate,
84                struct hc_ext_def_dat      *hc_dat_local,
85                struct sc_ext_exp_dat      *sc_wrapper);
86 
87 
88 PRIVATE FLT_OR_DBL
89 exp_E_ext_fast(vrna_fold_compound_t       *fc,
90                int                        i,
91                int                        j,
92                struct vrna_mx_pf_aux_el_s *aux_mx);
93 
94 
95 /*
96  #################################
97  # BEGIN OF FUNCTION DEFINITIONS #
98  #################################
99  */
100 PUBLIC FLT_OR_DBL
vrna_exp_E_ext_stem(unsigned int type,int n5d,int n3d,vrna_exp_param_t * p)101 vrna_exp_E_ext_stem(unsigned int      type,
102                     int               n5d,
103                     int               n3d,
104                     vrna_exp_param_t  *p)
105 {
106   double energy = 1.0;
107 
108   if (n5d >= 0 && n3d >= 0)
109     energy = p->expmismatchExt[type][n5d][n3d];
110   else if (n5d >= 0)
111     energy = p->expdangle5[type][n5d];
112   else if (n3d >= 0)
113     energy = p->expdangle3[type][n3d];
114 
115   if (type > 2)
116     energy *= p->expTermAU;
117 
118   return (FLT_OR_DBL)energy;
119 }
120 
121 
122 PUBLIC struct vrna_mx_pf_aux_el_s *
vrna_exp_E_ext_fast_init(vrna_fold_compound_t * fc)123 vrna_exp_E_ext_fast_init(vrna_fold_compound_t *fc)
124 {
125   struct vrna_mx_pf_aux_el_s *aux_mx = NULL;
126 
127   if (fc) {
128     unsigned int              u;
129     int                       i, j, max_j, d, n, turn, ij, *iidx, with_ud;
130     FLT_OR_DBL                *q, **q_local;
131     vrna_callback_hc_evaluate *evaluate;
132     struct hc_ext_def_dat     hc_dat_local;
133     struct sc_ext_exp_dat     sc_wrapper;
134     vrna_ud_t                 *domains_up;
135 
136     n           = (int)fc->length;
137     iidx        = fc->iindx;
138     turn        = fc->exp_params->model_details.min_loop_size;
139     domains_up  = fc->domains_up;
140     with_ud     = (domains_up && domains_up->exp_energy_cb);
141 
142     if (fc->hc->type == VRNA_HC_WINDOW)
143       evaluate = prepare_hc_ext_def_window(fc, &hc_dat_local);
144     else
145       evaluate = prepare_hc_ext_def(fc, &hc_dat_local);
146 
147     init_sc_ext_exp(fc, &sc_wrapper);
148 
149     /* allocate memory for helper arrays */
150     aux_mx =
151       (struct vrna_mx_pf_aux_el_s *)vrna_alloc(sizeof(struct vrna_mx_pf_aux_el_s));
152     aux_mx->qq        = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2));
153     aux_mx->qq1       = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2));
154     aux_mx->qqu_size  = 0;
155     aux_mx->qqu       = NULL;
156 
157     /* pre-processing ligand binding production rule(s) and auxiliary memory */
158     if (with_ud) {
159       int ud_max_size = 0;
160       for (u = 0; u < domains_up->uniq_motif_count; u++)
161         if (ud_max_size < domains_up->uniq_motif_size[u])
162           ud_max_size = domains_up->uniq_motif_size[u];
163 
164       aux_mx->qqu_size  = ud_max_size;
165       aux_mx->qqu       = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * (ud_max_size + 1));
166 
167       for (u = 0; u <= ud_max_size; u++)
168         aux_mx->qqu[u] = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (n + 2));
169     }
170 
171     if (fc->hc->type == VRNA_HC_WINDOW) {
172       q_local = fc->exp_matrices->q_local;
173       max_j   = MIN2(turn + 1, fc->window_size);
174       max_j   = MIN2(max_j, n);
175       for (j = 1; j <= max_j; j++)
176         for (i = 1; i <= j; i++)
177           q_local[i][j] =
178             reduce_ext_up_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
179     } else {
180       q = fc->exp_matrices->q;
181       for (d = 0; d <= turn; d++)
182         for (i = 1; i <= n - d; i++) {
183           j   = i + d;
184           ij  = iidx[i] - j;
185 
186           q[ij] = reduce_ext_up_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
187         }
188 
189       if ((fc->aux_grammar) && (fc->aux_grammar->cb_aux_exp_f)) {
190         for (d = 0; d <= turn; d++)
191           for (i = 1; i <= n - d; i++) {
192             j   = i + d;
193             ij  = iidx[i] - j;
194 
195             q[ij] += fc->aux_grammar->cb_aux_exp_f(fc, i, j, fc->aux_grammar->data);
196           }
197       }
198     }
199   }
200 
201   return aux_mx;
202 }
203 
204 
205 PUBLIC void
vrna_exp_E_ext_fast_rotate(struct vrna_mx_pf_aux_el_s * aux_mx)206 vrna_exp_E_ext_fast_rotate(struct vrna_mx_pf_aux_el_s *aux_mx)
207 {
208   if (aux_mx) {
209     int         u;
210     FLT_OR_DBL  *tmp;
211 
212     tmp         = aux_mx->qq1;
213     aux_mx->qq1 = aux_mx->qq;
214     aux_mx->qq  = tmp;
215 
216     /* rotate auxiliary arrays for unstructured domains */
217     if (aux_mx->qqu) {
218       tmp = aux_mx->qqu[aux_mx->qqu_size];
219       for (u = aux_mx->qqu_size; u > 0; u--)
220         aux_mx->qqu[u] = aux_mx->qqu[u - 1];
221       aux_mx->qqu[0] = tmp;
222     }
223   }
224 }
225 
226 
227 PUBLIC void
vrna_exp_E_ext_fast_free(struct vrna_mx_pf_aux_el_s * aux_mx)228 vrna_exp_E_ext_fast_free(struct vrna_mx_pf_aux_el_s *aux_mx)
229 {
230   if (aux_mx) {
231     int u;
232 
233     free(aux_mx->qq);
234     free(aux_mx->qq1);
235 
236     if (aux_mx->qqu) {
237       for (u = 0; u <= aux_mx->qqu_size; u++)
238         free(aux_mx->qqu[u]);
239 
240       free(aux_mx->qqu);
241     }
242 
243     free(aux_mx);
244   }
245 }
246 
247 
248 PUBLIC FLT_OR_DBL
vrna_exp_E_ext_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx)249 vrna_exp_E_ext_fast(vrna_fold_compound_t        *fc,
250                     int                         i,
251                     int                         j,
252                     struct vrna_mx_pf_aux_el_s  *aux_mx)
253 {
254   if (fc) {
255     if (j < i) {
256       int t = j;
257       vrna_message_warning(
258         "vrna_exp_E_ext_fast: i (%d) larger than j (%d)! Swapping coordinates...",
259         i,
260         j);
261       j = i;
262       i = t;
263     } else if ((j < 1) || (i < 1)) {
264       vrna_message_warning(
265         "vrna_exp_E_ext_fast: Indices too small [i = %d, j = %d]! "
266         "Refusing to compute anything...",
267         i,
268         j);
269       return 0.;
270     } else if (j > fc->length) {
271       vrna_message_warning(
272         "vrna_exp_E_ext_fast: Indices exceed sequence length (%d) [i = %d, j = %d]! "
273         "Refusing to compute anything...",
274         fc->length,
275         i,
276         j);
277       return 0.;
278     }
279 
280     return exp_E_ext_fast(fc, i, j, aux_mx);
281   }
282 
283   return 0.;
284 }
285 
286 
287 PUBLIC void
vrna_exp_E_ext_fast_update(vrna_fold_compound_t * fc,int j,struct vrna_mx_pf_aux_el_s * aux_mx)288 vrna_exp_E_ext_fast_update(vrna_fold_compound_t       *fc,
289                            int                        j,
290                            struct vrna_mx_pf_aux_el_s *aux_mx)
291 {
292   int                       k, turn;
293   FLT_OR_DBL                **q;
294   vrna_callback_hc_evaluate *evaluate;
295   struct hc_ext_def_dat     hc_dat_local;
296   struct sc_ext_exp_dat     sc_wrapper;
297 
298   /*
299    *  init exterior loop contributions for small segments [i, j]
300    *  that can only be unpaired. This needs to be done for each
301    *  j just before any contributions for [i,j] will be computed
302    */
303   if ((fc) && (fc->hc->type == VRNA_HC_WINDOW)) {
304     turn      = fc->exp_params->model_details.min_loop_size;
305     q         = fc->exp_matrices->q_local;
306     evaluate  = prepare_hc_ext_def_window(fc, &hc_dat_local);
307     init_sc_ext_exp(fc, &sc_wrapper);
308 
309 
310     for (k = j; k >= MAX2(1, j - turn); k--)
311       q[k][j] = reduce_ext_up_fast(fc, k, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
312   }
313 }
314 
315 
316 PRIVATE INLINE FLT_OR_DBL
reduce_ext_ext_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_ext_exp_dat * sc_wrapper)317 reduce_ext_ext_fast(vrna_fold_compound_t        *fc,
318                     int                         i,
319                     int                         j,
320                     struct vrna_mx_pf_aux_el_s  *aux_mx,
321                     vrna_callback_hc_evaluate   *evaluate,
322                     struct hc_ext_def_dat       *hc_dat_local,
323                     struct sc_ext_exp_dat       *sc_wrapper)
324 {
325   int           u;
326   FLT_OR_DBL    q_temp, q_temp2, q, *qq1, **qqu, *scale;
327   vrna_ud_t     *domains_up;
328   sc_ext_exp_cb *sc_red_ext;
329 
330   domains_up  = fc->domains_up;
331   qq1         = aux_mx->qq1;
332   qqu         = aux_mx->qqu;
333   scale       = fc->exp_matrices->scale;
334   sc_red_ext  = sc_wrapper->red_ext;
335 
336   q = 0.;
337 
338   if (evaluate(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
339     q_temp = qq1[i] * scale[1];
340 
341     if (sc_red_ext)
342       q_temp *= sc_red_ext(i, j, i, j - 1, sc_wrapper);
343 
344     if ((domains_up) && (domains_up->exp_energy_cb)) {
345       int cnt;
346       for (cnt = 0; cnt < domains_up->uniq_motif_count; cnt++) {
347         u = domains_up->uniq_motif_size[cnt];
348         if (j - u >= i) {
349           if (evaluate(i, j, i, j - u, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
350             q_temp2 = qqu[u][i] *
351                       domains_up->exp_energy_cb(fc,
352                                                 j - u + 1,
353                                                 j,
354                                                 VRNA_UNSTRUCTURED_DOMAIN_EXT_LOOP | VRNA_UNSTRUCTURED_DOMAIN_MOTIF,
355                                                 domains_up->data) *
356                       scale[u];
357 
358             if (sc_red_ext)
359               q_temp2 *= sc_red_ext(i, j, i, j - u, sc_wrapper);
360 
361             q_temp += q_temp2;
362           }
363         }
364       }
365     }
366 
367     q = q_temp;
368   }
369 
370   return q;
371 }
372 
373 
374 PRIVATE INLINE FLT_OR_DBL
reduce_ext_stem_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_ext_exp_dat * sc_wrapper)375 reduce_ext_stem_fast(vrna_fold_compound_t       *fc,
376                      int                        i,
377                      int                        j,
378                      struct vrna_mx_pf_aux_el_s *aux_mx,
379                      vrna_callback_hc_evaluate  *evaluate,
380                      struct hc_ext_def_dat      *hc_dat_local,
381                      struct sc_ext_exp_dat      *sc_wrapper)
382 {
383   short             **S, **S5, **S3, *S1, *S2, s5, s3;
384   unsigned int      type, *sn, n, s, n_seq, **a2s;
385   int               *idx, circular;
386   FLT_OR_DBL        qbt, q_temp, qb;
387   vrna_exp_param_t  *pf_params;
388   vrna_md_t         *md;
389   sc_ext_exp_cb     *sc_red_stem;
390 
391   sc_red_stem = sc_wrapper->red_stem;
392   n           = fc->length;
393   sn          = fc->strand_number;
394   pf_params   = fc->exp_params;
395   md          = &(pf_params->model_details);
396   circular    = md->circ;
397   idx         = fc->iindx;
398   qb          = (fc->hc->type == VRNA_HC_WINDOW) ?
399                 fc->exp_matrices->qb_local[i][j] :
400                 fc->exp_matrices->qb[idx[i] - j];
401   qbt = 0.;
402 
403   /* exterior loop part with stem (i, j) */
404   if (evaluate(i, j, i, j, VRNA_DECOMP_EXT_STEM, hc_dat_local)) {
405     q_temp = qb;
406 
407     switch (fc->type) {
408       case VRNA_FC_TYPE_SINGLE:
409         S1      = fc->sequence_encoding;
410         S2      = fc->sequence_encoding2;
411         type    = vrna_get_ptype_md(S2[i], S2[j], md);
412         s5      = (((i > 1) || circular) && (sn[i] == sn[i - 1])) ? S1[i - 1] : -1;
413         s3      = (((j < n) || circular) && (sn[j + 1] == sn[j])) ? S1[j + 1] : -1;
414         q_temp  *= vrna_exp_E_ext_stem(type, s5, s3, pf_params);
415         break;
416 
417       case VRNA_FC_TYPE_COMPARATIVE:
418         n_seq = fc->n_seq;
419         S     = fc->S;
420         S5    = fc->S5;
421         S3    = fc->S3;
422         a2s   = fc->a2s;
423         for (s = 0; s < n_seq; s++) {
424           type    = vrna_get_ptype_md(S[s][i], S[s][j], md);
425           q_temp  *= vrna_exp_E_ext_stem(type,
426                                          ((a2s[s][i] > 1) || circular) ? S5[s][i] : -1,
427                                          ((a2s[s][j] < a2s[s][n]) || circular) ? S3[s][j] : -1,
428                                          pf_params);
429         }
430         break;
431     }
432 
433     if (sc_red_stem)
434       q_temp *= sc_red_stem(i, j, i, j, sc_wrapper);
435 
436     qbt += q_temp;
437   }
438 
439   return qbt;
440 }
441 
442 
443 PRIVATE INLINE FLT_OR_DBL
reduce_ext_up_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_ext_exp_dat * sc_wrapper)444 reduce_ext_up_fast(vrna_fold_compound_t       *fc,
445                    int                        i,
446                    int                        j,
447                    struct vrna_mx_pf_aux_el_s *aux_mx,
448                    vrna_callback_hc_evaluate  *evaluate,
449                    struct hc_ext_def_dat      *hc_dat_local,
450                    struct sc_ext_exp_dat      *sc_wrapper)
451 {
452   int               u;
453   FLT_OR_DBL        qbt, q_temp, *scale;
454   vrna_ud_t         *domains_up;
455   sc_ext_exp_red_up *sc_red_up;
456 
457   sc_red_up = sc_wrapper->red_up;
458 
459   scale       = fc->exp_matrices->scale;
460   domains_up  = fc->domains_up;
461   qbt         = 0.;
462 
463   if (evaluate(i, j, i, j, VRNA_DECOMP_EXT_UP, hc_dat_local)) {
464     u       = j - i + 1;
465     q_temp  = scale[u];
466 
467     if (sc_red_up)
468       q_temp *= sc_red_up(i, j, sc_wrapper);
469 
470     qbt += q_temp;
471 
472     if ((domains_up) && (domains_up->exp_energy_cb)) {
473       qbt += q_temp *
474              domains_up->exp_energy_cb(fc,
475                                        i, j,
476                                        VRNA_UNSTRUCTURED_DOMAIN_EXT_LOOP,
477                                        domains_up->data);
478     }
479   }
480 
481   return qbt;
482 }
483 
484 
485 PRIVATE INLINE FLT_OR_DBL
split_ext_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_ext_exp_dat * sc_wrapper)486 split_ext_fast(vrna_fold_compound_t       *fc,
487                int                        i,
488                int                        j,
489                struct vrna_mx_pf_aux_el_s *aux_mx,
490                vrna_callback_hc_evaluate  *evaluate,
491                struct hc_ext_def_dat      *hc_dat_local,
492                struct sc_ext_exp_dat      *sc_wrapper)
493 {
494   int               *idx, k, ij1;
495   FLT_OR_DBL        qbt, *q, *qq, *qqq;
496   sc_ext_exp_split  *sc_split;
497 
498   sc_split = sc_wrapper->split;
499 
500   idx = fc->iindx;
501   q   = (fc->hc->type == VRNA_HC_WINDOW) ?
502         fc->exp_matrices->q_local[i] :
503         fc->exp_matrices->q + idx[i];
504   qq  = aux_mx->qq;
505   qbt = 0.;
506 
507   /*
508    *  use an auxiliary array qqq that already contains soft constraint
509    *  contributions when we split the exterior loops
510    */
511   if (sc_split) {
512     /* pre-process qq array */
513     qqq = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (j - i + 1));
514     qqq -= i;
515 
516     for (k = j; k > i; k--)
517       qqq[k] = qq[k] * sc_split(i, j, k, sc_wrapper);
518   } else {
519     /* pre-process qq array */
520     qqq = qq;
521   }
522 
523   /*
524    *  the index for access in q array now is:
525    *
526    *  (a) q[- (j - 1)] for global, and
527    *  (b) q[j - 1] for local structure prediction
528    *
529    *  Thus, we may use a single variable to address both cases:
530    *  k = j:
531    *    ij1 = (j - 1) * factor = j * factor - factor
532    *  k = j - 1:
533    *    ij1 = (j - 2) * factor = j * factor - 2 * factor = j * factor - factor - factor
534    *  ...
535    */
536   int factor = (fc->hc->type == VRNA_HC_WINDOW) ? 1 : -1;
537 
538   ij1 = factor * (j - 1);
539 
540   /* do actual decomposition (skip hard constraint checks if we use default settings) */
541 #if SPEEDUP_HC
542   /*
543    *  checking whether we actually are provided with hard constraints and
544    *  otherwise not evaluating the default ones within the loop drastically
545    *  increases speed. However, once we check for the split point between
546    *  strands in hard constraints, we have to think of something else...
547    */
548   if ((evaluate == &hc_ext_cb_def) || (evaluate == &hc_ext_cb_def_window)) {
549     for (k = j; k > i; k--) {
550       qbt += q[ij1] *
551              qqq[k];
552       ij1 -= factor;
553     }
554   } else {
555     for (k = j; k > i; k--) {
556       if (evaluate(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_EXT, hc_dat_local))
557         qbt += q[ij1] *
558                qqq[k];
559 
560       ij1 -= factor;
561     }
562   }
563 
564 #else
565   for (k = j; k > i; k--) {
566     if (evaluate(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_EXT, hc_dat_local))
567       qbt += q[ij1] *
568              qqq[k];
569 
570     ij1 -= factor;
571   }
572 #endif
573 
574   if (qqq != qq) {
575     qqq += i;
576     free(qqq);
577   }
578 
579   return qbt;
580 }
581 
582 
583 PRIVATE FLT_OR_DBL
exp_E_ext_fast(vrna_fold_compound_t * fc,int i,int j,struct vrna_mx_pf_aux_el_s * aux_mx)584 exp_E_ext_fast(vrna_fold_compound_t       *fc,
585                int                        i,
586                int                        j,
587                struct vrna_mx_pf_aux_el_s *aux_mx)
588 {
589   int                       *iidx, ij, with_ud, with_gquad;
590   FLT_OR_DBL                qbt1, *qq, **qqu, *G, **G_local;
591   vrna_md_t                 *md;
592   vrna_exp_param_t          *pf_params;
593   vrna_ud_t                 *domains_up;
594   vrna_callback_hc_evaluate *evaluate;
595   struct hc_ext_def_dat     hc_dat_local;
596   struct sc_ext_exp_dat     sc_wrapper;
597 
598   qq          = aux_mx->qq;
599   qqu         = aux_mx->qqu;
600   pf_params   = fc->exp_params;
601   md          = &(pf_params->model_details);
602   domains_up  = fc->domains_up;
603   with_gquad  = md->gquad;
604   with_ud     = (domains_up && domains_up->exp_energy_cb);
605 
606   if (fc->hc->type == VRNA_HC_WINDOW)
607     evaluate = prepare_hc_ext_def_window(fc, &hc_dat_local);
608   else
609     evaluate = prepare_hc_ext_def(fc, &hc_dat_local);
610 
611   init_sc_ext_exp(fc, &sc_wrapper);
612 
613   qbt1 = 0.;
614 
615   /* all exterior loop parts [i, j] with exactly one stem (i, u) i < u < j */
616   qbt1 += reduce_ext_ext_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
617   /* exterior loop part with stem (i, j) */
618   qbt1 += reduce_ext_stem_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
619 
620   if (with_gquad) {
621     if (fc->hc->type == VRNA_HC_WINDOW) {
622       G_local = fc->exp_matrices->G_local;
623       qbt1    += G_local[i][j];
624     } else {
625       G     = fc->exp_matrices->G;
626       iidx  = fc->iindx;
627       ij    = iidx[i] - j;
628       qbt1  += G[ij];
629     }
630   }
631 
632   qq[i] = qbt1;
633 
634   if (with_ud)
635     qqu[0][i] = qbt1;
636 
637   /* the entire stretch [i,j] is unpaired */
638   qbt1 += reduce_ext_up_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
639 
640   qbt1 += split_ext_fast(fc, i, j, aux_mx, evaluate, &hc_dat_local, &sc_wrapper);
641 
642   /* apply auxiliary grammar rule for exterior loop case */
643   if ((fc->aux_grammar) && (fc->aux_grammar->cb_aux_exp_f))
644     qbt1 += fc->aux_grammar->cb_aux_exp_f(fc, i, j, fc->aux_grammar->data);
645 
646   free_sc_ext_exp(&sc_wrapper);
647 
648   return qbt1;
649 }
650 
651 
652 /*
653  *###########################################
654  *# deprecated functions below              #
655  *###########################################
656  */
657 
658 #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
659 
660 PUBLIC FLT_OR_DBL
exp_E_Stem(int type,int si1,int sj1,int extLoop,vrna_exp_param_t * P)661 exp_E_Stem(int              type,
662            int              si1,
663            int              sj1,
664            int              extLoop,
665            vrna_exp_param_t *P)
666 {
667   double  energy  = 1.0;
668   double  d5      = (si1 >= 0) ? P->expdangle5[type][si1] : 1.;
669   double  d3      = (sj1 >= 0) ? P->expdangle3[type][sj1] : 1.;
670 
671   if (si1 >= 0 && sj1 >= 0)
672     energy = (extLoop) ? P->expmismatchExt[type][si1][sj1] : P->expmismatchM[type][si1][sj1];
673   else
674     energy = d5 * d3;
675 
676   if (type > 2)
677     energy *= P->expTermAU;
678 
679   if (!extLoop)
680     energy *= P->expMLintern[type];
681 
682   return (FLT_OR_DBL)energy;
683 }
684 
685 
686 PUBLIC FLT_OR_DBL
exp_E_ExtLoop(int type,int si1,int sj1,vrna_exp_param_t * P)687 exp_E_ExtLoop(int               type,
688               int               si1,
689               int               sj1,
690               vrna_exp_param_t  *P)
691 {
692   return vrna_exp_E_ext_stem(type, si1, sj1, P);
693 }
694 
695 
696 #endif
697