1 #ifdef HAVE_CONFIG_H
2 #include "config.h"
3 #endif
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <math.h>
8 #include <ctype.h>
9 #include <string.h>
10 #include "ViennaRNA/fold_vars.h"
11 #include "ViennaRNA/alphabet.h"
12 #include "ViennaRNA/utils/basic.h"
13 #include "ViennaRNA/constraints/hard.h"
14 #include "ViennaRNA/constraints/soft.h"
15 #include "ViennaRNA/loops/external.h"
16 #include "ViennaRNA/gquad.h"
17 #include "ViennaRNA/structured_domains.h"
18 #include "ViennaRNA/unstructured_domains.h"
19 #include "ViennaRNA/loops/internal.h"
20
21
22 #ifdef __GNUC__
23 # define INLINE inline
24 #else
25 # define INLINE
26 #endif
27
28 #include "internal_hc.inc"
29 #include "internal_sc_pf.inc"
30
31 /*
32 #################################
33 # PRIVATE FUNCTION DECLARATIONS #
34 #################################
35 */
36
37 PRIVATE FLT_OR_DBL
38 exp_E_int_loop(vrna_fold_compound_t *fc,
39 int i,
40 int j);
41
42
43 PRIVATE FLT_OR_DBL
44 exp_E_ext_int_loop(vrna_fold_compound_t *fc,
45 int p,
46 int q);
47
48
49 PRIVATE FLT_OR_DBL
50 exp_E_interior_loop(vrna_fold_compound_t *fc,
51 int i,
52 int j,
53 int k,
54 int l);
55
56
57 /*
58 #################################
59 # BEGIN OF FUNCTION DEFINITIONS #
60 #################################
61 */
62 PUBLIC FLT_OR_DBL
vrna_exp_E_int_loop(vrna_fold_compound_t * fc,int i,int j)63 vrna_exp_E_int_loop(vrna_fold_compound_t *fc,
64 int i,
65 int j)
66 {
67 FLT_OR_DBL q = 0.;
68
69 if ((fc) && (i > 0) && (j > 0)) {
70 if (j < i) {
71 /* Note: j < i indicates that we want to evaluate exterior int loop (for circular RNAs)! */
72 if (fc->hc->type == VRNA_HC_WINDOW) {
73 vrna_message_warning(
74 "vrna_exp_E_int_loop: invalid sequence positions for pair (i,j) = (%d,%d)!",
75 i,
76 j);
77 } else {
78 q = exp_E_ext_int_loop(fc, j, i);
79 }
80 } else {
81 q = exp_E_int_loop(fc, i, j);
82 }
83 }
84
85 return q;
86 }
87
88
89 PUBLIC FLT_OR_DBL
vrna_exp_E_interior_loop(vrna_fold_compound_t * fc,int i,int j,int k,int l)90 vrna_exp_E_interior_loop(vrna_fold_compound_t *fc,
91 int i,
92 int j,
93 int k,
94 int l)
95 {
96 if (fc)
97 return exp_E_interior_loop(fc, i, j, k, l);
98
99 return 0.;
100 }
101
102
103 PRIVATE FLT_OR_DBL
exp_E_int_loop(vrna_fold_compound_t * fc,int i,int j)104 exp_E_int_loop(vrna_fold_compound_t *fc,
105 int i,
106 int j)
107 {
108 unsigned char sliding_window, hc_decompose_ij, hc_decompose_kl;
109 char *ptype, **ptype_local;
110 unsigned char *hc_mx, **hc_mx_local;
111 short *S1, **SS, **S5, **S3;
112 unsigned int *sn, *se, *ss, n_seq, s, **a2s, n;
113 int *rtype, noclose, *my_iindx, *jindx, *hc_up, ij,
114 with_gquad, with_ud;
115 FLT_OR_DBL qbt1, q_temp, *qb, **qb_local, *G, *scale;
116 vrna_exp_param_t *pf_params;
117 vrna_md_t *md;
118 vrna_ud_t *domains_up;
119 eval_hc *evaluate;
120 struct hc_int_def_dat hc_dat_local;
121 struct sc_int_exp_dat sc_wrapper;
122
123 sliding_window = (fc->hc->type == VRNA_HC_WINDOW) ? 1 : 0;
124 n = fc->length;
125 n_seq = (fc->type == VRNA_FC_TYPE_SINGLE) ? 1 : fc->n_seq;
126 sn = fc->strand_number;
127 se = fc->strand_end;
128 ss = fc->strand_start;
129 ptype = (fc->type == VRNA_FC_TYPE_SINGLE) ? (sliding_window ? NULL : fc->ptype) : NULL;
130 ptype_local =
131 (fc->type == VRNA_FC_TYPE_SINGLE) ? (sliding_window ? fc->ptype_local : NULL) : NULL;
132 S1 = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->sequence_encoding : NULL;
133 SS = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S;
134 S5 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S5;
135 S3 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S3;
136 a2s = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->a2s;
137 qb = (sliding_window) ? NULL : fc->exp_matrices->qb;
138 G = (sliding_window) ? NULL : fc->exp_matrices->G;
139 qb_local = (sliding_window) ? fc->exp_matrices->qb_local : NULL;
140 scale = fc->exp_matrices->scale;
141 my_iindx = fc->iindx;
142 jindx = fc->jindx;
143 hc_mx = (sliding_window) ? NULL : fc->hc->mx;
144 hc_mx_local = (sliding_window) ? fc->hc->matrix_local : NULL;
145 hc_up = fc->hc->up_int;
146 pf_params = fc->exp_params;
147 md = &(pf_params->model_details);
148 with_gquad = md->gquad;
149 domains_up = fc->domains_up;
150 with_ud = ((domains_up) && (domains_up->exp_energy_cb)) ? 1 : 0;
151 rtype = &(md->rtype[0]);
152 qbt1 = 0.;
153 evaluate = prepare_hc_int_def(fc, &hc_dat_local);
154
155 init_sc_int_exp(fc, &sc_wrapper);
156
157 ij = (sliding_window) ? 0 : jindx[j] + i;
158
159 hc_decompose_ij = (sliding_window) ? hc_mx_local[i][j - i] : hc_mx[n * i + j];
160
161 /* CONSTRAINED INTERIOR LOOP start */
162 if (hc_decompose_ij & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
163 unsigned int type, type2, *tt;
164 int k, l, kl, last_k, first_l, u1, u2, turn, noGUclosure;
165
166 turn = md->min_loop_size;
167 noGUclosure = md->noGUclosure;
168 tt = NULL;
169 type = 0;
170
171 if (fc->type == VRNA_FC_TYPE_SINGLE)
172 type = sliding_window ?
173 vrna_get_ptype_window(i, j + i, ptype_local) :
174 vrna_get_ptype(ij, ptype);
175
176 noclose = ((noGUclosure) && (type == 3 || type == 4)) ? 1 : 0;
177
178 if (fc->type == VRNA_FC_TYPE_COMPARATIVE) {
179 tt = (unsigned int *)vrna_alloc(sizeof(unsigned int) * n_seq);
180 for (s = 0; s < n_seq; s++)
181 tt[s] = vrna_get_ptype_md(SS[s][i], SS[s][j], md);
182 }
183
184 /* handle stacks separately */
185 k = i + 1;
186 l = j - 1;
187 if ((k < l) && (sn[i] == sn[k]) && (sn[l] == sn[j])) {
188 kl = (sliding_window) ? 0 : jindx[l] + k;
189 hc_decompose_kl = (sliding_window) ? hc_mx_local[k][l - k] : hc_mx[n * k + l];
190
191 if ((hc_decompose_kl & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) &&
192 (evaluate(i, j, k, l, &hc_dat_local))) {
193 q_temp = (sliding_window) ? qb_local[k][l] : qb[my_iindx[k] - l];
194
195 switch (fc->type) {
196 case VRNA_FC_TYPE_SINGLE:
197 type2 = sliding_window ?
198 rtype[vrna_get_ptype_window(k, l + k, ptype_local)] :
199 rtype[vrna_get_ptype(kl, ptype)];
200
201 q_temp *= exp_E_IntLoop(0,
202 0,
203 type,
204 type2,
205 S1[i + 1],
206 S1[j - 1],
207 S1[k - 1],
208 S1[l + 1],
209 pf_params);
210
211 break;
212
213 case VRNA_FC_TYPE_COMPARATIVE:
214 for (s = 0; s < n_seq; s++) {
215 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
216 q_temp *= exp_E_IntLoop(0,
217 0,
218 tt[s],
219 type2,
220 S3[s][i],
221 S5[s][j],
222 S5[s][k],
223 S3[s][l],
224 pf_params);
225 }
226 break;
227 }
228
229 if (sc_wrapper.pair)
230 q_temp *= sc_wrapper.pair(i, j, k, l, &sc_wrapper);
231
232 qbt1 += q_temp *
233 scale[2];
234 }
235 }
236
237 if (!noclose) {
238 /* only proceed if the enclosing pair is allowed */
239
240 /* handle bulges in 5' side */
241 l = j - 1;
242 if ((l > i + 2) && (sn[j] == sn[l])) {
243 last_k = l - turn - 1;
244
245 if (last_k > i + 1 + MAXLOOP)
246 last_k = i + 1 + MAXLOOP;
247
248 if (last_k > i + 1 + hc_up[i + 1])
249 last_k = i + 1 + hc_up[i + 1];
250
251 if (last_k > se[sn[i]])
252 last_k = se[sn[i]];
253
254 u1 = 1;
255
256 k = i + 2;
257 kl = (sliding_window) ? 0 : jindx[l] + k;
258 hc_mx += n * l;
259
260 for (; k <= last_k; k++, u1++, kl++) {
261 hc_decompose_kl = (sliding_window) ? hc_mx_local[k][l - k] : hc_mx[k];
262
263 if ((hc_decompose_kl & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) &&
264 (evaluate(i, j, k, l, &hc_dat_local))) {
265 q_temp = (sliding_window) ? qb_local[k][l] : qb[my_iindx[k] - l];
266
267 switch (fc->type) {
268 case VRNA_FC_TYPE_SINGLE:
269 type2 = sliding_window ?
270 rtype[vrna_get_ptype_window(k, l + k, ptype_local)] :
271 rtype[vrna_get_ptype(kl, ptype)];
272
273 if ((noGUclosure) && (type2 == 3 || type2 == 4))
274 continue;
275
276 q_temp *= exp_E_IntLoop(u1,
277 0,
278 type,
279 type2,
280 S1[i + 1],
281 S1[j - 1],
282 S1[k - 1],
283 S1[l + 1],
284 pf_params);
285
286 break;
287
288 case VRNA_FC_TYPE_COMPARATIVE:
289 for (s = 0; s < n_seq; s++) {
290 int u1_local = a2s[s][k - 1] - a2s[s][i];
291 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
292 q_temp *= exp_E_IntLoop(u1_local,
293 0,
294 tt[s],
295 type2,
296 S3[s][i],
297 S5[s][j],
298 S5[s][k],
299 S3[s][l],
300 pf_params);
301 }
302 break;
303 }
304
305 if (sc_wrapper.pair)
306 q_temp *= sc_wrapper.pair(i, j, k, l, &sc_wrapper);
307
308 qbt1 += q_temp *
309 scale[u1 + 2];
310
311 if (with_ud) {
312 q_temp *= domains_up->exp_energy_cb(fc,
313 i + 1, k - 1,
314 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
315 domains_up->data);
316 qbt1 += q_temp *
317 scale[u1 + 2];
318 }
319 }
320 }
321
322 hc_mx -= n * l;
323 }
324
325 /* handle bulges in 3' side */
326 k = i + 1;
327 if ((k < j - 2) && (sn[i] == sn[k])) {
328 first_l = k + turn + 1;
329 if (first_l < j - 1 - MAXLOOP)
330 first_l = j - 1 - MAXLOOP;
331
332 if (first_l < ss[sn[j]])
333 first_l = ss[sn[j]];
334
335 u2 = 1;
336 hc_mx += n * k;
337
338 for (l = j - 2; l >= first_l; l--, u2++) {
339 if (u2 > hc_up[l + 1])
340 break;
341
342 kl = (sliding_window) ? 0 : jindx[l] + k;
343 hc_decompose_kl = (sliding_window) ? hc_mx_local[k][l - k] : hc_mx[l];
344
345 if ((hc_decompose_kl & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) &&
346 (evaluate(i, j, k, l, &hc_dat_local))) {
347 q_temp = (sliding_window) ? qb_local[k][l] : qb[my_iindx[k] - l];
348
349 switch (fc->type) {
350 case VRNA_FC_TYPE_SINGLE:
351 type2 = sliding_window ?
352 rtype[vrna_get_ptype_window(k, l + k, ptype_local)] :
353 rtype[vrna_get_ptype(kl, ptype)];
354
355 if ((noGUclosure) && (type2 == 3 || type2 == 4))
356 continue;
357
358 q_temp *= exp_E_IntLoop(0,
359 u2,
360 type,
361 type2,
362 S1[i + 1],
363 S1[j - 1],
364 S1[k - 1],
365 S1[l + 1],
366 pf_params);
367
368 break;
369
370 case VRNA_FC_TYPE_COMPARATIVE:
371 for (s = 0; s < n_seq; s++) {
372 int u2_local = a2s[s][j - 1] - a2s[s][l];
373 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
374 q_temp *= exp_E_IntLoop(0,
375 u2_local,
376 tt[s],
377 type2,
378 S3[s][i],
379 S5[s][j],
380 S5[s][k],
381 S3[s][l],
382 pf_params);
383 }
384 break;
385 }
386
387 if (sc_wrapper.pair)
388 q_temp *= sc_wrapper.pair(i, j, k, l, &sc_wrapper);
389
390 qbt1 += q_temp *
391 scale[u2 + 2];
392
393 if (with_ud) {
394 q_temp *= domains_up->exp_energy_cb(fc,
395 l + 1, j - 1,
396 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
397 domains_up->data);
398 qbt1 += q_temp *
399 scale[u2 + 2];
400 }
401 }
402 }
403
404 hc_mx -= n * k;
405 }
406
407 /* last but not least, all other internal loops */
408 last_k = j - turn - 3;
409
410 if (last_k > i + MAXLOOP + 1)
411 last_k = i + MAXLOOP + 1;
412
413 if (last_k > i + 1 + hc_up[i + 1])
414 last_k = i + 1 + hc_up[i + 1];
415
416 if (last_k > se[sn[i]])
417 last_k = se[sn[i]];
418
419 u1 = 1;
420
421 for (k = i + 2; k <= last_k; k++, u1++) {
422 first_l = k + turn + 1;
423
424 if (first_l < j - 1 - MAXLOOP + u1)
425 first_l = j - 1 - MAXLOOP + u1;
426
427 if (first_l < ss[sn[j]])
428 first_l = ss[sn[j]];
429
430 u2 = 1;
431
432 hc_mx += n * k;
433
434 for (l = j - 2; l >= first_l; l--, u2++) {
435 if (hc_up[l + 1] < u2)
436 break;
437
438 kl = (sliding_window) ? 0 : jindx[l] + k;
439 hc_decompose_kl = (sliding_window) ? hc_mx_local[k][l - k] : hc_mx[l];
440
441 if ((hc_decompose_kl & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) &&
442 (evaluate(i, j, k, l, &hc_dat_local))) {
443 q_temp = (sliding_window) ? qb_local[k][l] : qb[my_iindx[k] - l];
444
445 switch (fc->type) {
446 case VRNA_FC_TYPE_SINGLE:
447 type2 = sliding_window ?
448 rtype[vrna_get_ptype_window(k, l + k, ptype_local)] :
449 rtype[vrna_get_ptype(kl, ptype)];
450
451 if ((noGUclosure) && (type2 == 3 || type2 == 4))
452 continue;
453
454 q_temp *= exp_E_IntLoop(u1,
455 u2,
456 type,
457 type2,
458 S1[i + 1],
459 S1[j - 1],
460 S1[k - 1],
461 S1[l + 1],
462 pf_params);
463
464 break;
465
466 case VRNA_FC_TYPE_COMPARATIVE:
467 for (s = 0; s < n_seq; s++) {
468 int u1_local = a2s[s][k - 1] - a2s[s][i];
469 int u2_local = a2s[s][j - 1] - a2s[s][l];
470 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
471 q_temp *= exp_E_IntLoop(u1_local,
472 u2_local,
473 tt[s],
474 type2,
475 S3[s][i],
476 S5[s][j],
477 S5[s][k],
478 S3[s][l],
479 pf_params);
480 }
481
482 break;
483 }
484
485 if (sc_wrapper.pair)
486 q_temp *= sc_wrapper.pair(i, j, k, l, &sc_wrapper);
487
488 qbt1 += q_temp *
489 scale[u1 + u2 + 2];
490
491 if (with_ud) {
492 FLT_OR_DBL q5, q3;
493
494 q5 = domains_up->exp_energy_cb(fc,
495 i + 1, k - 1,
496 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
497 domains_up->data);
498 q3 = domains_up->exp_energy_cb(fc,
499 l + 1, j - 1,
500 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
501 domains_up->data);
502
503 qbt1 += q_temp *
504 q5 *
505 scale[u1 + u2 + 2];
506 qbt1 += q_temp *
507 q3 *
508 scale[u1 + u2 + 2];
509 qbt1 += q_temp *
510 q5 *
511 q3 *
512 scale[u1 + u2 + 2];
513 }
514 }
515 }
516
517 hc_mx -= n * k;
518 }
519
520 if ((with_gquad) && (!noclose)) {
521 switch (fc->type) {
522 case VRNA_FC_TYPE_SINGLE:
523 if (sliding_window) {
524 /* no G-Quadruplex support for sliding window partition function yet! */
525 } else if (sn[j] == sn[i]) {
526 qbt1 += exp_E_GQuad_IntLoop(i, j, type, S1, G, scale, my_iindx, pf_params);
527 }
528
529 break;
530
531 case VRNA_FC_TYPE_COMPARATIVE:
532 if (sliding_window) {
533 /* no G-Quadruplex support for sliding window partition function yet! */
534 } else {
535 qbt1 += exp_E_GQuad_IntLoop_comparative(i, j,
536 tt,
537 fc->S_cons,
538 S5, S3, a2s,
539 G,
540 scale,
541 my_iindx,
542 (int)n_seq,
543 pf_params);
544 }
545
546 break;
547 }
548 }
549 }
550
551 free(tt);
552 }
553
554 free_sc_int_exp(&sc_wrapper);
555
556 return qbt1;
557 }
558
559
560 PRIVATE FLT_OR_DBL
exp_E_ext_int_loop(vrna_fold_compound_t * fc,int i,int j)561 exp_E_ext_int_loop(vrna_fold_compound_t *fc,
562 int i,
563 int j)
564 {
565 unsigned char *hc_mx, eval_loop;
566 short *S, *S2, **SS, **S5, **S3;
567 unsigned int *tt, n_seq, s, **a2s, type, type2;
568 int k, l, u1, u2, u3, qmin, with_ud,
569 n, *my_iindx, *hc_up, turn,
570 u1_local, u2_local, u3_local;
571 FLT_OR_DBL q, q_temp, *qb, *scale;
572 vrna_exp_param_t *pf_params;
573 vrna_md_t *md;
574 vrna_ud_t *domains_up;
575 eval_hc *evaluate;
576 struct hc_int_def_dat hc_dat_local;
577 struct sc_int_exp_dat sc_wrapper;
578
579 n = fc->length;
580 n_seq = (fc->type == VRNA_FC_TYPE_SINGLE) ? 1 : fc->n_seq;
581 S = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->sequence_encoding : NULL;
582 S2 = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->sequence_encoding2 : NULL;
583 SS = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S;
584 S5 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S5;
585 S3 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S3;
586 a2s = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->a2s;
587 my_iindx = fc->iindx;
588 qb = fc->exp_matrices->qb;
589 scale = fc->exp_matrices->scale;
590 hc_mx = fc->hc->mx;
591 hc_up = fc->hc->up_int;
592 pf_params = fc->exp_params;
593 md = &(pf_params->model_details);
594 turn = md->min_loop_size;
595 type = 0;
596 tt = NULL;
597 domains_up = fc->domains_up;
598 with_ud = ((domains_up) && (domains_up->exp_energy_cb)) ? 1 : 0;
599
600 q = 0.;
601
602 evaluate = prepare_hc_int_def(fc, &hc_dat_local);
603
604 init_sc_int_exp(fc, &sc_wrapper);
605
606 /* CONSTRAINED INTERIOR LOOP start */
607 if (hc_mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) {
608 /* prepare necessary variables */
609 if (fc->type == VRNA_FC_TYPE_SINGLE) {
610 type = vrna_get_ptype_md(S2[j], S2[i], md);
611 } else {
612 tt = (unsigned int *)vrna_alloc(sizeof(unsigned int) * n_seq);
613
614 for (s = 0; s < n_seq; s++)
615 tt[s] = vrna_get_ptype_md(SS[s][j], SS[s][i], md);
616 }
617
618 for (k = j + 1; k < n; k++) {
619 u2 = k - j - 1;
620 if (u2 + i - 1 > MAXLOOP)
621 break;
622
623 if (hc_up[j + 1] < u2)
624 break;
625
626 qmin = u2 + i - 1 + n - MAXLOOP;
627 if (qmin < k + turn + 1)
628 qmin = k + turn + 1;
629
630 for (l = n; l >= qmin; l--) {
631 u1 = i - 1;
632 u3 = n - l;
633 if (hc_up[l + 1] < (u1 + u3))
634 break;
635
636 if (u1 + u2 + u3 > MAXLOOP)
637 continue;
638
639 eval_loop = hc_mx[n * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP;
640
641 if (eval_loop && evaluate(i, j, k, l, &hc_dat_local)) {
642 q_temp = qb[my_iindx[k] - l];
643
644 switch (fc->type) {
645 case VRNA_FC_TYPE_SINGLE:
646 type2 = vrna_get_ptype_md(S2[l], S2[k], md);
647
648 /* regular interior loop */
649 q_temp *=
650 exp_E_IntLoop(u2,
651 u1 + u3,
652 type,
653 type2,
654 S[j + 1],
655 S[i - 1],
656 S[k - 1],
657 S[l + 1],
658 pf_params);
659 break;
660
661 case VRNA_FC_TYPE_COMPARATIVE:
662 for (s = 0; s < n_seq; s++) {
663 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
664 u1_local = a2s[s][i - 1];
665 u2_local = a2s[s][k - 1] - a2s[s][j];
666 u3_local = a2s[s][n] - a2s[s][l];
667 q_temp *= exp_E_IntLoop(u2_local,
668 u1_local + u3_local,
669 tt[s],
670 type2,
671 S3[s][j],
672 S5[s][i],
673 S5[s][k],
674 S3[s][l],
675 pf_params);
676 }
677 break;
678 }
679
680 if (sc_wrapper.pair_ext)
681 q_temp *= sc_wrapper.pair_ext(i, j, k, l, &sc_wrapper);
682
683 q += q_temp *
684 scale[u1 + u2 + u3];
685
686 if (with_ud) {
687 FLT_OR_DBL q5, q3;
688
689 q5 = q3 = 0.;
690 u1 = i - 1;
691 u2 = k - j - 1;
692 u3 = n - l;
693
694 if (u2 > 0) {
695 q5 = domains_up->exp_energy_cb(fc,
696 j + 1, k - 1,
697 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
698 domains_up->data);
699 }
700
701 if (u1 + u3 > 0) {
702 q3 = domains_up->exp_energy_cb(fc,
703 l + 1, i - 1,
704 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
705 domains_up->data);
706 }
707
708 q += q_temp *
709 q5 *
710 scale[u1 + u2 + u3];
711 q += q_temp *
712 q3 *
713 scale[u1 + u2 + u3];
714 q += q_temp *
715 q5 *
716 q3 *
717 scale[u1 + u2 + u3];
718 }
719 }
720 }
721 }
722 }
723
724 free(tt);
725 free_sc_int_exp(&sc_wrapper);
726
727 return q;
728 }
729
730
731 PRIVATE FLT_OR_DBL
exp_E_interior_loop(vrna_fold_compound_t * fc,int i,int j,int k,int l)732 exp_E_interior_loop(vrna_fold_compound_t *fc,
733 int i,
734 int j,
735 int k,
736 int l)
737 {
738 unsigned char sliding_window, type, type2;
739 char *ptype, **ptype_local;
740 unsigned char *hc_mx, **hc_mx_local, eval_loop, hc_decompose_ij, hc_decompose_kl;
741 short *S1, **SS, **S5, **S3;
742 unsigned int n, *sn, n_seq, s, **a2s;
743 int u1, u2, *rtype, *jindx, *hc_up;
744 FLT_OR_DBL qbt1, q_temp, *scale;
745 vrna_exp_param_t *pf_params;
746 vrna_md_t *md;
747 vrna_ud_t *domains_up;
748 eval_hc *evaluate;
749 struct hc_int_def_dat hc_dat_local;
750 struct sc_int_exp_dat sc_wrapper;
751
752 sliding_window = (fc->hc->type == VRNA_HC_WINDOW) ? 1 : 0;
753 n = fc->length;
754 n_seq = (fc->type == VRNA_FC_TYPE_SINGLE) ? 1 : fc->n_seq;
755 ptype = (fc->type == VRNA_FC_TYPE_SINGLE) ? (sliding_window ? NULL : fc->ptype) : NULL;
756 ptype_local =
757 (fc->type == VRNA_FC_TYPE_SINGLE) ? (sliding_window ? fc->ptype_local : NULL) : NULL;
758 S1 = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->sequence_encoding : NULL;
759 SS = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S;
760 S5 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S5;
761 S3 = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S3;
762 a2s = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->a2s;
763 jindx = fc->jindx;
764 hc_mx = (sliding_window) ? NULL : fc->hc->mx;
765 hc_mx_local = (sliding_window) ? fc->hc->matrix_local : NULL;
766 hc_up = fc->hc->up_int;
767 pf_params = fc->exp_params;
768 sn = fc->strand_number;
769 md = &(pf_params->model_details);
770 scale = fc->exp_matrices->scale;
771 domains_up = fc->domains_up;
772 rtype = &(md->rtype[0]);
773 qbt1 = 0.;
774 u1 = k - i - 1;
775 u2 = j - l - 1;
776
777 if ((sn[k] != sn[i]) || (sn[j] != sn[l]))
778 return qbt1;
779
780 if (hc_up[l + 1] < u2)
781 return qbt1;
782
783 if (hc_up[i + 1] < u1)
784 return qbt1;
785
786 evaluate = prepare_hc_int_def(fc, &hc_dat_local);
787
788 init_sc_int_exp(fc, &sc_wrapper);
789
790 hc_decompose_ij = (sliding_window) ? hc_mx_local[i][j - i] : hc_mx[n * i + j];
791 hc_decompose_kl = (sliding_window) ? hc_mx_local[k][l - k] : hc_mx[n * k + l];
792 eval_loop = ((hc_decompose_ij & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) &&
793 (hc_decompose_kl & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) ?
794 1 : 0;
795
796 /* discard this configuration if (p,q) is not allowed to be enclosed pair of an interior loop */
797 if (eval_loop && evaluate(i, j, k, l, &hc_dat_local)) {
798 q_temp = 0;
799
800 switch (fc->type) {
801 case VRNA_FC_TYPE_SINGLE:
802 type = (sliding_window) ?
803 vrna_get_ptype_window(i, j, ptype_local) :
804 vrna_get_ptype(jindx[j] + i, ptype);
805 type2 = (sliding_window) ?
806 rtype[vrna_get_ptype_window(k, l, ptype_local)] :
807 rtype[vrna_get_ptype(jindx[l] + k, ptype)];
808
809 q_temp = exp_E_IntLoop(u1,
810 u2,
811 type,
812 type2,
813 S1[i + 1],
814 S1[j - 1],
815 S1[k - 1],
816 S1[l + 1],
817 pf_params);
818
819 break;
820
821 case VRNA_FC_TYPE_COMPARATIVE:
822 q_temp = 1.;
823
824 for (s = 0; s < n_seq; s++) {
825 int u1_local = a2s[s][k - 1] - a2s[s][i];
826 int u2_local = a2s[s][j - 1] - a2s[s][l];
827 type = vrna_get_ptype_md(SS[s][i], SS[s][j], md);
828 type2 = vrna_get_ptype_md(SS[s][l], SS[s][k], md);
829 q_temp *= exp_E_IntLoop(u1_local,
830 u2_local,
831 type,
832 type2,
833 S3[s][i],
834 S5[s][j],
835 S5[s][k],
836 S3[s][l],
837 pf_params);
838 }
839
840 break;
841 }
842
843 /* soft constraints */
844 if (sc_wrapper.pair)
845 q_temp *= sc_wrapper.pair(i, j, k, l, &sc_wrapper);
846
847 qbt1 += q_temp *
848 scale[u1 + u2 + 2];
849
850 /* unstructured domains */
851 if (domains_up && domains_up->exp_energy_cb) {
852 FLT_OR_DBL qq5, qq3;
853
854 qq5 = qq3 = 0.;
855
856 if (u1 > 0) {
857 qq5 = domains_up->exp_energy_cb(fc,
858 i + 1, k - 1,
859 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
860 domains_up->data);
861 }
862
863 if (u2 > 0) {
864 qq3 = domains_up->exp_energy_cb(fc,
865 l + 1, j - 1,
866 VRNA_UNSTRUCTURED_DOMAIN_INT_LOOP,
867 domains_up->data);
868 }
869
870 qbt1 += q_temp *
871 qq5 *
872 scale[u1 + u2 + 2]; /* only motifs in 5' part */
873 qbt1 += q_temp *
874 qq3 *
875 scale[u1 + u2 + 2]; /* only motifs in 3' part */
876 qbt1 += q_temp *
877 qq5 *
878 qq3 *
879 scale[u1 + u2 + 2]; /* motifs in both parts */
880 }
881 }
882
883 free_sc_int_exp(&sc_wrapper);
884
885 return qbt1;
886 }
887