1 #ifdef HAVE_CONFIG_H
2 #include "config.h"
3 #endif
4
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <math.h>
8 #include <ctype.h>
9 #include <string.h>
10 #include <limits.h>
11
12 #include "ViennaRNA/fold_vars.h"
13 #include "ViennaRNA/params/default.h"
14 #include "ViennaRNA/utils/basic.h"
15 #include "ViennaRNA/alphabet.h"
16 #include "ViennaRNA/constraints/hard.h"
17 #include "ViennaRNA/constraints/soft.h"
18 #include "ViennaRNA/gquad.h"
19 #include "ViennaRNA/structured_domains.h"
20 #include "ViennaRNA/unstructured_domains.h"
21 #include "ViennaRNA/loops/external.h"
22 #include "ViennaRNA/utils/higher_order_functions.h"
23
24 #ifdef __GNUC__
25 # define INLINE inline
26 #else
27 # define INLINE
28 #endif
29
30 #ifdef VRNA_WITH_SVM
31 #include "ViennaRNA/zscore.h" /* for VRNA_ZSCORE_* macros */
32 #endif
33
34
35 #include "external_hc.inc"
36 #include "external_sc.inc"
37
38 #ifdef VRNA_WITH_SVM
39 #include "ViennaRNA/zscore_dat.inc"
40 #endif
41
42 /*
43 #################################
44 # PRIVATE FUNCTION DECLARATIONS #
45 #################################
46 */
47 PRIVATE INLINE int
48 reduce_f5_up(vrna_fold_compound_t *fc,
49 int j,
50 vrna_callback_hc_evaluate *evaluate,
51 struct hc_ext_def_dat *hc_dat_local,
52 struct sc_f5_dat *sc_wrapper);
53
54
55 PRIVATE INLINE int
56 reduce_f3_up(vrna_fold_compound_t *fc,
57 int i,
58 vrna_callback_hc_evaluate *evaluate,
59 struct hc_ext_def_dat *hc_dat_local,
60 struct sc_f3_dat *sc_wrapper);
61
62
63 PRIVATE INLINE int *
64 get_stem_contributions_d0(vrna_fold_compound_t *fc,
65 int j,
66 vrna_callback_hc_evaluate *evaluate,
67 struct hc_ext_def_dat *hc_dat_local,
68 struct sc_f5_dat *sc_wrapper);
69
70
71 PRIVATE INLINE int *
72 f3_get_stem_contributions_d0(vrna_fold_compound_t *fc,
73 int i,
74 vrna_callback_hc_evaluate *evaluate,
75 struct hc_ext_def_dat *hc_dat_local,
76 struct sc_f3_dat *sc_wrapper);
77
78
79 PRIVATE INLINE int *
80 get_stem_contributions_d2(vrna_fold_compound_t *fc,
81 int j,
82 vrna_callback_hc_evaluate *evaluate,
83 struct hc_ext_def_dat *hc_dat_local,
84 struct sc_f5_dat *sc_wrapper);
85
86
87 PRIVATE INLINE int *
88 f3_get_stem_contributions_d2(vrna_fold_compound_t *fc,
89 int i,
90 vrna_callback_hc_evaluate *evaluate,
91 struct hc_ext_def_dat *hc_dat_local,
92 struct sc_f3_dat *sc_wrapper);
93
94
95 PRIVATE INLINE int *
96 f5_get_stem_contributions_d5(vrna_fold_compound_t *fc,
97 int j,
98 vrna_callback_hc_evaluate *evaluate,
99 struct hc_ext_def_dat *hc_dat_local,
100 struct sc_f5_dat *sc_wrapper);
101
102
103 PRIVATE INLINE int *
104 f3_get_stem_contributions_d3(vrna_fold_compound_t *fc,
105 int i,
106 vrna_callback_hc_evaluate *evaluate,
107 struct hc_ext_def_dat *hc_dat_local,
108 struct sc_f3_dat *sc_wrapper);
109
110
111 PRIVATE INLINE int *
112 f5_get_stem_contributions_d3(vrna_fold_compound_t *fc,
113 int j,
114 vrna_callback_hc_evaluate *evaluate,
115 struct hc_ext_def_dat *hc_dat_local,
116 struct sc_f5_dat *sc_wrapper);
117
118
119 PRIVATE INLINE int *
120 f3_get_stem_contributions_d5(vrna_fold_compound_t *fc,
121 int i,
122 vrna_callback_hc_evaluate *evaluate,
123 struct hc_ext_def_dat *hc_dat_local,
124 struct sc_f3_dat *sc_wrapper);
125
126
127 PRIVATE INLINE int *
128 f5_get_stem_contributions_d53(vrna_fold_compound_t *fc,
129 int j,
130 vrna_callback_hc_evaluate *evaluate,
131 struct hc_ext_def_dat *hc_dat_local,
132 struct sc_f5_dat *sc_wrapper);
133
134
135 PRIVATE INLINE int *
136 f3_get_stem_contributions_d53(vrna_fold_compound_t *fc,
137 int i,
138 vrna_callback_hc_evaluate *evaluate,
139 struct hc_ext_def_dat *hc_dat_local,
140 struct sc_f3_dat *sc_wrapper);
141
142
143 PRIVATE INLINE int
144 decompose_f5_ext_stem(vrna_fold_compound_t *fc,
145 int j,
146 int *stems);
147
148
149 PRIVATE INLINE int
150 decompose_f3_ext_stem(vrna_fold_compound_t *fc,
151 int i,
152 int max_j,
153 int *stems);
154
155
156 PRIVATE INLINE int
157 decompose_f5_ext_stem_d0(vrna_fold_compound_t *fc,
158 int j,
159 vrna_callback_hc_evaluate *evaluate,
160 struct hc_ext_def_dat *hc_dat_local,
161 struct sc_f5_dat *sc_wrapper);
162
163
164 PRIVATE INLINE int
165 decompose_f3_ext_stem_d0(vrna_fold_compound_t *fc,
166 int i,
167 vrna_callback_hc_evaluate *evaluate,
168 struct hc_ext_def_dat *hc_dat_local,
169 struct sc_f3_dat *sc_wrapper);
170
171
172 PRIVATE INLINE int
173 decompose_f5_ext_stem_d2(vrna_fold_compound_t *fc,
174 int j,
175 vrna_callback_hc_evaluate *evaluate,
176 struct hc_ext_def_dat *hc_dat_local,
177 struct sc_f5_dat *sc_wrapper);
178
179
180 PRIVATE INLINE int
181 decompose_f3_ext_stem_d2(vrna_fold_compound_t *fc,
182 int i,
183 vrna_callback_hc_evaluate *evaluate,
184 struct hc_ext_def_dat *hc_dat_local,
185 struct sc_f3_dat *sc_wrapper);
186
187
188 PRIVATE INLINE int
189 decompose_f5_ext_stem_d1(vrna_fold_compound_t *fc,
190 int j,
191 vrna_callback_hc_evaluate *evaluate,
192 struct hc_ext_def_dat *hc_dat_local,
193 struct sc_f5_dat *sc_wrapper);
194
195
196 PRIVATE INLINE int
197 decompose_f3_ext_stem_d1(vrna_fold_compound_t *fc,
198 int i,
199 vrna_callback_hc_evaluate *evaluate,
200 struct hc_ext_def_dat *hc_dat_local,
201 struct sc_f3_dat *sc_wrapper);
202
203
204 PRIVATE INLINE int
205 add_f5_gquad(vrna_fold_compound_t *fc,
206 int j,
207 vrna_callback_hc_evaluate *evaluate,
208 struct hc_ext_def_dat *hc_dat_local,
209 struct sc_f5_dat *sc_wrapper);
210
211
212 PRIVATE INLINE int
213 add_f3_gquad(vrna_fold_compound_t *fc,
214 int i,
215 vrna_callback_hc_evaluate *evaluate,
216 struct hc_ext_def_dat *hc_dat_local,
217 struct sc_f3_dat *sc_wrapper);
218
219
220 /*
221 #################################
222 # BEGIN OF FUNCTION DEFINITIONS #
223 #################################
224 */
225 PUBLIC int
vrna_E_ext_loop_5(vrna_fold_compound_t * fc)226 vrna_E_ext_loop_5(vrna_fold_compound_t *fc)
227 {
228 if (fc) {
229 int en, j, length, *f5, dangle_model, with_gquad, turn;
230 vrna_param_t *P;
231 vrna_callback_hc_evaluate *evaluate;
232 struct hc_ext_def_dat hc_dat_local;
233 struct sc_f5_dat sc_wrapper;
234 vrna_gr_aux_t *grammar;
235
236 length = (int)fc->length;
237 f5 = fc->matrices->f5;
238 P = fc->params;
239 dangle_model = P->model_details.dangles;
240 with_gquad = P->model_details.gquad;
241 turn = P->model_details.min_loop_size;
242 grammar = fc->aux_grammar;
243 evaluate = prepare_hc_ext_def(fc, &hc_dat_local);
244
245 init_sc_f5(fc, &sc_wrapper);
246
247 f5[0] = 0;
248 for (j = 1; j <= turn + 1; j++)
249 f5[j] = reduce_f5_up(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
250
251 if ((grammar) && (grammar->cb_aux_f)) {
252 for (j = 1; j <= turn + 1; j++) {
253 en = grammar->cb_aux_f(fc, 1, j, grammar->data);
254 f5[j] = MIN2(f5[j], en);
255 }
256 }
257
258 /*
259 * duplicated code may be faster than conditions inside loop or even
260 * using a function pointer ;)
261 */
262 switch (dangle_model) {
263 case 2:
264 for (j = turn + 2; j <= length; j++) {
265 /* extend previous solution(s) by adding an unpaired region */
266 f5[j] = reduce_f5_up(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
267
268 /* decompose into exterior loop part followed by a stem */
269 en = decompose_f5_ext_stem_d2(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
270 f5[j] = MIN2(f5[j], en);
271
272 if (with_gquad) {
273 en = add_f5_gquad(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
274 f5[j] = MIN2(f5[j], en);
275 }
276
277 if ((grammar) && (grammar->cb_aux_f)) {
278 en = grammar->cb_aux_f(fc, 1, j, grammar->data);
279 f5[j] = MIN2(f5[j], en);
280 }
281 }
282 break;
283
284 case 0:
285 for (j = turn + 2; j <= length; j++) {
286 /* extend previous solution(s) by adding an unpaired region */
287 f5[j] = reduce_f5_up(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
288
289 /* decompose into exterior loop part followed by a stem */
290 en = decompose_f5_ext_stem_d0(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
291 f5[j] = MIN2(f5[j], en);
292
293 if (with_gquad) {
294 en = add_f5_gquad(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
295 f5[j] = MIN2(f5[j], en);
296 }
297
298 if ((grammar) && (grammar->cb_aux_f)) {
299 en = grammar->cb_aux_f(fc, 1, j, grammar->data);
300 f5[j] = MIN2(f5[j], en);
301 }
302 }
303 break;
304
305 default:
306 for (j = turn + 2; j <= length; j++) {
307 /* extend previous solution(s) by adding an unpaired region */
308 f5[j] = reduce_f5_up(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
309
310 en = decompose_f5_ext_stem_d1(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
311 f5[j] = MIN2(f5[j], en);
312
313 if (with_gquad) {
314 en = add_f5_gquad(fc, j, evaluate, &hc_dat_local, &sc_wrapper);
315 f5[j] = MIN2(f5[j], en);
316 }
317
318 if ((grammar) && (grammar->cb_aux_f)) {
319 en = grammar->cb_aux_f(fc, 1, j, grammar->data);
320 f5[j] = MIN2(f5[j], en);
321 }
322 }
323 break;
324 }
325
326 free_sc_f5(&sc_wrapper);
327
328 return f5[length];
329 }
330
331 return INF;
332 }
333
334
335 PUBLIC int
vrna_E_ext_loop_3(vrna_fold_compound_t * fc,int i)336 vrna_E_ext_loop_3(vrna_fold_compound_t *fc,
337 int i)
338 {
339 if (fc) {
340 int e, en, dangle_model, with_gquad;
341 vrna_param_t *P;
342 vrna_md_t *md;
343 vrna_callback_hc_evaluate *evaluate;
344 struct hc_ext_def_dat hc_dat_local;
345 struct sc_f3_dat sc_wrapper;
346
347 e = INF;
348
349 P = fc->params;
350 md = &(P->model_details);
351 dangle_model = md->dangles;
352 with_gquad = md->gquad;
353 evaluate = prepare_hc_ext_def_window(fc, &hc_dat_local);
354
355 init_sc_f3(fc, i, &sc_wrapper);
356
357 /* first case: i stays unpaired */
358 e = reduce_f3_up(fc, i, evaluate, &hc_dat_local, &sc_wrapper);
359
360 /* decompose into stem followed by exterior loop part */
361 switch (dangle_model) {
362 case 0:
363 en = decompose_f3_ext_stem_d0(fc, i, evaluate, &hc_dat_local, &sc_wrapper);
364 e = MIN2(e, en);
365 break;
366
367 case 2:
368 en = decompose_f3_ext_stem_d2(fc, i, evaluate, &hc_dat_local, &sc_wrapper);
369 e = MIN2(e, en);
370 break;
371
372 default:
373 en = decompose_f3_ext_stem_d1(fc, i, evaluate, &hc_dat_local, &sc_wrapper);
374 e = MIN2(e, en);
375 break;
376 }
377
378 if (with_gquad) {
379 en = add_f3_gquad(fc, i, evaluate, &hc_dat_local, &sc_wrapper);
380 e = MIN2(e, en);
381 }
382
383 free_sc_f3(&sc_wrapper);
384
385 return e;
386 }
387
388 return INF;
389 }
390
391
392 PUBLIC int
vrna_E_ext_stem(unsigned int type,int n5d,int n3d,vrna_param_t * p)393 vrna_E_ext_stem(unsigned int type,
394 int n5d,
395 int n3d,
396 vrna_param_t *p)
397 {
398 int energy = 0;
399
400 if (n5d >= 0 && n3d >= 0)
401 energy += p->mismatchExt[type][n5d][n3d];
402 else if (n5d >= 0)
403 energy += p->dangle5[type][n5d];
404 else if (n3d >= 0)
405 energy += p->dangle3[type][n3d];
406
407 if (type > 2)
408 energy += p->TerminalAU;
409
410 return energy;
411 }
412
413
414 PUBLIC int
vrna_E_ext_loop(vrna_fold_compound_t * fc,int i,int j)415 vrna_E_ext_loop(vrna_fold_compound_t *fc,
416 int i,
417 int j)
418 {
419 char *ptype;
420 short *S;
421 unsigned int type;
422 int ij, en, e, *idx;
423 vrna_param_t *P;
424 vrna_md_t *md;
425 vrna_sc_t *sc;
426 vrna_callback_hc_evaluate *evaluate;
427 struct hc_ext_def_dat hc_dat_local;
428
429 S = fc->sequence_encoding;
430 idx = fc->jindx;
431 ptype = fc->ptype;
432 P = fc->params;
433 md = &(P->model_details);
434 sc = fc->sc;
435 evaluate = prepare_hc_ext_def(fc, &hc_dat_local);
436
437 e = INF;
438 ij = idx[j] + i;
439 type = vrna_get_ptype(ij, ptype);
440
441 if (evaluate(i, j, i, j, VRNA_DECOMP_EXT_STEM, &hc_dat_local)) {
442 switch (md->dangles) {
443 case 2:
444 e = vrna_E_ext_stem(type, S[i - 1], S[j + 1], P);
445 break;
446
447 case 0:
448 /* fall through */
449
450 default:
451 e = vrna_E_ext_stem(type, -1, -1, P);
452 break;
453 }
454 if (sc)
455 if (sc->f)
456 e += sc->f(i, j, i, j, VRNA_DECOMP_EXT_STEM, sc->data);
457 }
458
459 if (md->dangles % 2) {
460 ij = idx[j - 1] + i;
461 if (evaluate(i, j, i, j - 1, VRNA_DECOMP_EXT_STEM, &hc_dat_local)) {
462 type = vrna_get_ptype(ij, ptype);
463
464 en = vrna_E_ext_stem(type, -1, S[j], P);
465
466 if (sc)
467 if (sc->f)
468 en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_STEM, sc->data);
469
470 e = MIN2(e, en);
471 }
472
473 ij = idx[j] + i + 1;
474 if (evaluate(i, j, i + 1, j, VRNA_DECOMP_EXT_STEM, &hc_dat_local)) {
475 type = vrna_get_ptype(ij, ptype);
476
477 en = vrna_E_ext_stem(type, S[i], -1, P);
478
479 if (sc)
480 if (sc->f)
481 en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_STEM, sc->data);
482
483 e = MIN2(e, en);
484 }
485 }
486
487 return e;
488 }
489
490
491 /*
492 #####################################
493 # BEGIN OF STATIC HELPER FUNCTIONS #
494 #####################################
495 */
496 PRIVATE INLINE int
decompose_f5_ext_stem_d0(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)497 decompose_f5_ext_stem_d0(vrna_fold_compound_t *fc,
498 int j,
499 vrna_callback_hc_evaluate *evaluate,
500 struct hc_ext_def_dat *hc_dat_local,
501 struct sc_f5_dat *sc_wrapper)
502 {
503 int e, *stems;
504
505 stems = get_stem_contributions_d0(fc, j, evaluate, hc_dat_local, sc_wrapper);
506
507 /* 1st case, actual decompostion */
508 e = decompose_f5_ext_stem(fc, j, stems);
509
510 /* 2nd case, reduce to single stem */
511 e = MIN2(e, stems[1]);
512
513 free(stems);
514
515 return e;
516 }
517
518
519 PRIVATE INLINE int
decompose_f3_ext_stem_d0(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)520 decompose_f3_ext_stem_d0(vrna_fold_compound_t *fc,
521 int i,
522 vrna_callback_hc_evaluate *evaluate,
523 struct hc_ext_def_dat *hc_dat_local,
524 struct sc_f3_dat *sc_wrapper)
525 {
526 int e, *stems, maxdist, length;
527
528 length = (int)fc->length;
529 maxdist = fc->window_size;
530
531 stems = f3_get_stem_contributions_d0(fc, i, evaluate, hc_dat_local, sc_wrapper);
532
533 /* 1st case, actual decompostion */
534 e = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist), stems);
535
536 /* 2nd case, reduce to single stem */
537 if (length <= i + maxdist)
538 e = MIN2(e, stems[length]);
539
540 stems += i;
541 free(stems);
542
543 return e;
544 }
545
546
547 PRIVATE INLINE int
decompose_f5_ext_stem_d2(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)548 decompose_f5_ext_stem_d2(vrna_fold_compound_t *fc,
549 int j,
550 vrna_callback_hc_evaluate *evaluate,
551 struct hc_ext_def_dat *hc_dat_local,
552 struct sc_f5_dat *sc_wrapper)
553 {
554 int e, *stems;
555
556 stems = get_stem_contributions_d2(fc, j, evaluate, hc_dat_local, sc_wrapper);
557
558 /* 1st case, actual decompostion */
559 e = decompose_f5_ext_stem(fc, j, stems);
560
561 /* 2nd case, reduce to single stem */
562 e = MIN2(e, stems[1]);
563
564 free(stems);
565
566 return e;
567 }
568
569
570 PRIVATE INLINE int
decompose_f3_ext_stem_d2(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)571 decompose_f3_ext_stem_d2(vrna_fold_compound_t *fc,
572 int i,
573 vrna_callback_hc_evaluate *evaluate,
574 struct hc_ext_def_dat *hc_dat_local,
575 struct sc_f3_dat *sc_wrapper)
576 {
577 int e, *stems, maxdist, length;
578
579 length = (int)fc->length;
580 maxdist = fc->window_size;
581 stems = f3_get_stem_contributions_d2(fc, i, evaluate, hc_dat_local, sc_wrapper);
582
583 /* 1st case, actual decompostion */
584 e = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist), stems);
585
586 /* 2nd case, reduce to single stem */
587 if (length <= i + maxdist)
588 e = MIN2(e, stems[length]);
589
590 stems += i;
591 free(stems);
592
593 return e;
594 }
595
596
597 PRIVATE INLINE int
decompose_f5_ext_stem_d1(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)598 decompose_f5_ext_stem_d1(vrna_fold_compound_t *fc,
599 int j,
600 vrna_callback_hc_evaluate *evaluate,
601 struct hc_ext_def_dat *hc_dat_local,
602 struct sc_f5_dat *sc_wrapper)
603 {
604 int e, ee, *stems;
605
606 e = INF;
607
608 /* A) without dangling end contributions */
609
610 /* 1st case, actual decompostion */
611 stems = get_stem_contributions_d0(fc, j, evaluate, hc_dat_local, sc_wrapper);
612
613 ee = decompose_f5_ext_stem(fc, j, stems);
614
615 /* 2nd case, reduce to single stem */
616 ee = MIN2(ee, stems[1]);
617
618 free(stems);
619
620 e = MIN2(e, ee);
621
622 /* B) with dangling end contribution on 5' side of stem */
623 stems = f5_get_stem_contributions_d5(fc, j, evaluate, hc_dat_local, sc_wrapper);
624
625 /* 1st case, actual decompostion */
626 ee = decompose_f5_ext_stem(fc, j, stems);
627
628 /* 2nd case, reduce to single stem */
629 ee = MIN2(ee, stems[1]);
630
631 free(stems);
632
633 e = MIN2(e, ee);
634
635 /* C) with dangling end contribution on 3' side of stem */
636 stems = f5_get_stem_contributions_d3(fc, j, evaluate, hc_dat_local, sc_wrapper);
637
638 /* 1st case, actual decompostion */
639 ee = decompose_f5_ext_stem(fc, j, stems);
640
641 /* 2nd case, reduce to single stem */
642 ee = MIN2(ee, stems[1]);
643
644 free(stems);
645
646 e = MIN2(e, ee);
647
648 /* D) with dangling end contribution on both sides of stem */
649 stems = f5_get_stem_contributions_d53(fc, j, evaluate, hc_dat_local, sc_wrapper);
650
651 /* 1st case, actual decompostion */
652 ee = decompose_f5_ext_stem(fc, j, stems);
653
654 /* 2nd case, reduce to single stem */
655 ee = MIN2(ee, stems[1]);
656
657 free(stems);
658
659 e = MIN2(e, ee);
660
661 return e;
662 }
663
664
665 PRIVATE INLINE int
decompose_f3_ext_stem_d1(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)666 decompose_f3_ext_stem_d1(vrna_fold_compound_t *fc,
667 int i,
668 vrna_callback_hc_evaluate *evaluate,
669 struct hc_ext_def_dat *hc_dat_local,
670 struct sc_f3_dat *sc_wrapper)
671 {
672 int length, e, ee, *stems, maxdist;
673
674 length = (int)fc->length;
675 maxdist = fc->window_size;
676 e = INF;
677
678 /* A) without dangling end contributions */
679
680 /* 1st case, actual decompostion */
681 stems = f3_get_stem_contributions_d0(fc, i, evaluate, hc_dat_local, sc_wrapper);
682
683 ee = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist), stems);
684
685 /* 2nd case, reduce to single stem */
686 if (length <= i + maxdist)
687 ee = MIN2(ee, stems[length]);
688
689 stems += i;
690 free(stems);
691
692 e = MIN2(e, ee);
693
694 /* B) with dangling end contribution on 3' side of stem */
695 stems = f3_get_stem_contributions_d3(fc, i, evaluate, hc_dat_local, sc_wrapper);
696
697 /* 1st case, actual decompostion */
698 ee = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist + 1), stems);
699
700 /* 2nd case, reduce to single stem */
701 if (length <= i + maxdist)
702 ee = MIN2(ee, stems[length]);
703
704 stems += i;
705 free(stems);
706
707 e = MIN2(e, ee);
708
709 /* C) with dangling end contribution on 5' side of stem */
710 stems = f3_get_stem_contributions_d5(fc, i, evaluate, hc_dat_local, sc_wrapper);
711
712 /* 1st case, actual decompostion */
713 ee = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist + 1), stems);
714
715 /* 2nd case, reduce to single stem */
716 if (length <= i + maxdist)
717 ee = MIN2(ee, stems[length]);
718
719 stems += i;
720 free(stems);
721
722 e = MIN2(e, ee);
723
724 /* D) with dangling end contribution on both sides of stem */
725 stems = f3_get_stem_contributions_d53(fc, i, evaluate, hc_dat_local, sc_wrapper);
726
727 /* 1st case, actual decompostion */
728 ee = decompose_f3_ext_stem(fc, i, MIN2(length - 1, i + maxdist + 1), stems);
729
730 /* 2nd case, reduce to single stem */
731 if (length <= i + maxdist)
732 ee = MIN2(ee, stems[length]);
733
734 stems += i;
735 free(stems);
736
737 e = MIN2(e, ee);
738
739 return e;
740 }
741
742
743 /*
744 * extend f5 by adding an unpaired nucleotide or an unstructured domain
745 * to the 3' end
746 */
747 PRIVATE INLINE int
reduce_f5_up(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)748 reduce_f5_up(vrna_fold_compound_t *fc,
749 int j,
750 vrna_callback_hc_evaluate *evaluate,
751 struct hc_ext_def_dat *hc_dat_local,
752 struct sc_f5_dat *sc_wrapper)
753 {
754 int u, k, e, en, *f5;
755 vrna_ud_t *domains_up;
756 sc_f5_cb *sc_red_ext;
757
758 f5 = fc->matrices->f5;
759 domains_up = fc->domains_up;
760 e = INF;
761
762 sc_red_ext = sc_wrapper->red_ext;
763
764 /* check for 3' extension with one unpaired nucleotide */
765 if (f5[j - 1] != INF) {
766 if (evaluate(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
767 e = f5[j - 1];
768
769 if (sc_red_ext)
770 e += sc_red_ext(j, 1, j - 1, sc_wrapper);
771 }
772 }
773
774 if ((domains_up) && (domains_up->energy_cb)) {
775 for (k = 0; k < domains_up->uniq_motif_count; k++) {
776 u = domains_up->uniq_motif_size[k];
777 if ((j - u >= 0) && (f5[j - u] != INF)) {
778 if (evaluate(1, j, 1, j - u, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
779 en = f5[j - u] +
780 domains_up->energy_cb(fc,
781 j - u + 1,
782 j,
783 VRNA_UNSTRUCTURED_DOMAIN_EXT_LOOP | VRNA_UNSTRUCTURED_DOMAIN_MOTIF,
784 domains_up->data);
785
786 if (sc_red_ext)
787 en += sc_red_ext(j, 1, j - u, sc_wrapper);
788
789 e = MIN2(e, en);
790 }
791 }
792 }
793 }
794
795 return e;
796 }
797
798
799 PRIVATE INLINE int
reduce_f3_up(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)800 reduce_f3_up(vrna_fold_compound_t *fc,
801 int i,
802 vrna_callback_hc_evaluate *evaluate,
803 struct hc_ext_def_dat *hc_dat_local,
804 struct sc_f3_dat *sc_wrapper)
805 {
806 int u, k, length, e, en, *f3;
807 vrna_ud_t *domains_up;
808 sc_f3_cb *sc_red_ext;
809
810 length = (int)fc->length;
811 f3 = fc->matrices->f3_local;
812 domains_up = fc->domains_up;
813 e = INF;
814
815 sc_red_ext = sc_wrapper->red_ext;
816
817 /* check for 3' extension with one unpaired nucleotide */
818 if (f3[i + 1] != INF) {
819 if (evaluate(i, length, i + 1, length, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
820 e = f3[i + 1];
821
822 if (sc_red_ext)
823 e += sc_red_ext(i, i + 1, length, sc_wrapper);
824 }
825 }
826
827 if ((domains_up) && (domains_up->energy_cb)) {
828 for (k = 0; k < domains_up->uniq_motif_count; k++) {
829 u = domains_up->uniq_motif_size[k];
830 if ((i + u - 1 <= length) && (f3[i + u] != INF)) {
831 if (evaluate(i, length, i + u - 1, length, VRNA_DECOMP_EXT_EXT, hc_dat_local)) {
832 en = f3[i + u] +
833 domains_up->energy_cb(fc,
834 i,
835 i + u - 1,
836 VRNA_UNSTRUCTURED_DOMAIN_EXT_LOOP | VRNA_UNSTRUCTURED_DOMAIN_MOTIF,
837 domains_up->data);
838
839 if (sc_red_ext)
840 en += sc_red_ext(i, i + u, length, sc_wrapper);
841
842 e = MIN2(e, en);
843 }
844 }
845 }
846 }
847
848 return e;
849 }
850
851
852 PRIVATE INLINE int *
get_stem_contributions_d0(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)853 get_stem_contributions_d0(vrna_fold_compound_t *fc,
854 int j,
855 vrna_callback_hc_evaluate *evaluate,
856 struct hc_ext_def_dat *hc_dat_local,
857 struct sc_f5_dat *sc_wrapper)
858 {
859 char *ptype;
860 short **S;
861 unsigned int s, n_seq, type;
862 int i, ij, *indx, turn, *c, *stems;
863 vrna_param_t *P;
864 vrna_md_t *md;
865
866 sc_f5_cb *sc_spl_stem;
867 sc_f5_cb *sc_red_stem;
868
869 stems = (int *)vrna_alloc(sizeof(int) * j);
870
871 P = fc->params;
872 md = &(P->model_details);
873 indx = fc->jindx;
874 c = fc->matrices->c;
875 turn = md->min_loop_size;
876 ij = indx[j] + j - turn - 1;
877 ptype = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->ptype : NULL;
878 n_seq = (fc->type == VRNA_FC_TYPE_SINGLE) ? 1 : fc->n_seq;
879 S = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S;
880
881 sc_spl_stem = sc_wrapper->decomp_stem;
882 sc_red_stem = sc_wrapper->red_stem;
883
884 switch (fc->type) {
885 case VRNA_FC_TYPE_SINGLE:
886 for (i = j - turn - 1; i > 1; i--, ij--) {
887 stems[i] = INF;
888
889 if ((c[ij] != INF) &&
890 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
891 stems[i] = c[ij];
892 type = vrna_get_ptype(ij, ptype);
893 stems[i] += vrna_E_ext_stem(type, -1, -1, P);
894 }
895 }
896 break;
897
898 case VRNA_FC_TYPE_COMPARATIVE:
899 for (i = j - turn - 1; i > 1; i--, ij--) {
900 stems[i] = INF;
901 if ((c[ij] != INF) &&
902 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
903 stems[i] = c[ij];
904
905 for (s = 0; s < n_seq; s++) {
906 type = vrna_get_ptype_md(S[s][i], S[s][j], md);
907 stems[i] += vrna_E_ext_stem(type, -1, -1, P);
908 }
909 }
910 }
911 break;
912 }
913
914 if (sc_spl_stem)
915 for (i = j - turn - 1; i > 1; i--)
916 if (stems[i] != INF)
917 stems[i] += sc_spl_stem(j, i - 1, i, sc_wrapper);
918
919 stems[1] = INF;
920 ij = indx[j] + 1;
921
922 if ((c[ij] != INF) &&
923 (evaluate(1, j, 1, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
924 stems[1] = c[ij];
925
926 switch (fc->type) {
927 case VRNA_FC_TYPE_SINGLE:
928 type = vrna_get_ptype(ij, ptype);
929 stems[1] += vrna_E_ext_stem(type, -1, -1, P);
930 break;
931
932 case VRNA_FC_TYPE_COMPARATIVE:
933 for (s = 0; s < n_seq; s++) {
934 type = vrna_get_ptype_md(S[s][1], S[s][j], md);
935 stems[1] += vrna_E_ext_stem(type, -1, -1, P);
936 }
937 break;
938 }
939
940 if (sc_red_stem)
941 stems[1] += sc_red_stem(j, 1, j, sc_wrapper);
942 }
943
944 return stems;
945 }
946
947
948 PRIVATE INLINE int *
f3_get_stem_contributions_d0(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)949 f3_get_stem_contributions_d0(vrna_fold_compound_t *fc,
950 int i,
951 vrna_callback_hc_evaluate *evaluate,
952 struct hc_ext_def_dat *hc_dat_local,
953 struct sc_f3_dat *sc_wrapper)
954 {
955 char **ptype;
956 short **S, *si;
957 unsigned int s, n_seq, type, length;
958 int energy, j, max_j, turn, *c, *stems, maxdist;
959 vrna_param_t *P;
960 vrna_md_t *md;
961
962 #ifdef VRNA_WITH_SVM
963 int zsc_pre_filter;
964 vrna_zsc_dat_t zsc_data;
965 #endif
966
967 sc_f3_cb *sc_spl_stem;
968 sc_f3_cb *sc_red_stem;
969
970 length = fc->length;
971 maxdist = fc->window_size;
972 P = fc->params;
973 md = &(P->model_details);
974 c = fc->matrices->c_local[i];
975 c -= i;
976 turn = md->min_loop_size;
977 si = NULL;
978 ptype = (fc->type == VRNA_FC_TYPE_SINGLE) ? fc->ptype_local : NULL;
979 n_seq = (fc->type == VRNA_FC_TYPE_SINGLE) ? 1 : fc->n_seq;
980 S = (fc->type == VRNA_FC_TYPE_SINGLE) ? NULL : fc->S;
981 #ifdef VRNA_WITH_SVM
982 zsc_data = fc->zscore_data;
983 zsc_pre_filter = ((zsc_data) &&
984 (zsc_data->filter_on) &&
985 (zsc_data->pre_filter)) ? 1 : 0;
986 #endif
987 stems = (int *)vrna_alloc(sizeof(int) * (maxdist + 6));
988 stems -= i;
989
990 sc_spl_stem = sc_wrapper->decomp_stem;
991 sc_red_stem = sc_wrapper->red_stem;
992
993 max_j = MIN2(length - 1, i + maxdist);
994
995 #ifdef VRNA_WITH_SVM
996 /* re-set pointer */
997 if (zsc_pre_filter) {
998 zsc_data->current_z += zsc_data->current_i;
999 /* initialize */
1000 memset(zsc_data->current_z, 0, sizeof(double) * (maxdist + 2));
1001 /* move pointer for convenience */
1002 zsc_data->current_i = i;
1003 zsc_data->current_z -= zsc_data->current_i;
1004 }
1005
1006 #endif
1007
1008 switch (fc->type) {
1009 case VRNA_FC_TYPE_SINGLE:
1010 for (j = i + turn + 1; j <= max_j; j++) {
1011 stems[j] = INF;
1012 if ((c[j] != INF) &&
1013 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1014 type = vrna_get_ptype_window(i, j, ptype);
1015 stems[j] = c[j] +
1016 vrna_E_ext_stem(type, -1, -1, P);
1017 }
1018 }
1019
1020 #ifdef VRNA_WITH_SVM
1021 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1022 if (zsc_pre_filter) {
1023 for (j = i + turn + 1; j <= max_j; j++) {
1024 if (stems[j] != INF) {
1025 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1026 if (zsc_data->current_z[j] > zsc_data->min_z)
1027 stems[j] = INF;
1028 }
1029 }
1030 }
1031
1032 #endif
1033
1034 break;
1035
1036 case VRNA_FC_TYPE_COMPARATIVE:
1037 si = (short *)vrna_alloc(sizeof(short) * n_seq);
1038
1039 for (s = 0; s < n_seq; s++)
1040 si[s] = S[s][i];
1041
1042 for (j = i + turn + 1; j <= max_j; j++) {
1043 stems[j] = INF;
1044 if ((c[j] != INF) &&
1045 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1046 energy = c[j];
1047 for (s = 0; s < n_seq; s++) {
1048 type = vrna_get_ptype_md(si[s], S[s][j], md);
1049 energy += vrna_E_ext_stem(type, -1, -1, P);
1050 }
1051 stems[j] = energy;
1052 }
1053 }
1054 break;
1055 }
1056
1057
1058 if (sc_spl_stem)
1059 for (j = i + turn + 1; j <= max_j; j++)
1060 if (stems[j] != INF)
1061 stems[j] += sc_spl_stem(i, j, j + 1, sc_wrapper);
1062
1063 if (length <= i + maxdist) {
1064 j = length;
1065 stems[j] = INF;
1066
1067 if ((c[j] != INF) &&
1068 (evaluate(i, j, i, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1069 energy = c[j];
1070
1071 switch (fc->type) {
1072 case VRNA_FC_TYPE_SINGLE:
1073 type = vrna_get_ptype_window(i, j, ptype);
1074 energy += vrna_E_ext_stem(type, -1, -1, P);
1075
1076 break;
1077
1078 case VRNA_FC_TYPE_COMPARATIVE:
1079 for (s = 0; s < n_seq; s++) {
1080 type = vrna_get_ptype_md(si[s], S[s][j], md);
1081 energy += vrna_E_ext_stem(type, -1, -1, P);
1082 }
1083
1084 break;
1085 }
1086
1087 #ifdef VRNA_WITH_SVM
1088 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1089 if ((fc->type == VRNA_FC_TYPE_SINGLE) &&
1090 (zsc_pre_filter) &&
1091 (energy != INF)) {
1092 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1093 if (zsc_data->current_z[j] > zsc_data->min_z)
1094 energy = INF;
1095 }
1096
1097 #endif
1098
1099 if ((sc_red_stem) &&
1100 (energy != INF))
1101 energy += sc_red_stem(i, i, j, sc_wrapper);
1102
1103 stems[j] = energy;
1104 }
1105 } else {
1106 /*
1107 * make sure we do not take (i + maxdist + 1) into account when
1108 * decomposing for odd dangle models
1109 */
1110 stems[i + maxdist + 1] = INF;
1111 }
1112
1113 free(si);
1114
1115 return stems;
1116 }
1117
1118
1119 PRIVATE INLINE int *
get_stem_contributions_d2(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)1120 get_stem_contributions_d2(vrna_fold_compound_t *fc,
1121 int j,
1122 vrna_callback_hc_evaluate *evaluate,
1123 struct hc_ext_def_dat *hc_dat_local,
1124 struct sc_f5_dat *sc_wrapper)
1125 {
1126 char *ptype;
1127 short *S, sj1, *si1, **SS, **S5, **S3, *s3j, *sj;
1128 unsigned int s, n_seq, **a2s, type;
1129 int n, i, ij, *indx, turn, *c, *stems, mm5;
1130 vrna_param_t *P;
1131 vrna_md_t *md;
1132
1133 sc_f5_cb *sc_spl_stem;
1134 sc_f5_cb *sc_red_stem;
1135
1136 stems = (int *)vrna_alloc(sizeof(int) * j);
1137
1138 n = (int)fc->length;
1139 P = fc->params;
1140 md = &(P->model_details);
1141 indx = fc->jindx;
1142 c = fc->matrices->c;
1143 turn = md->min_loop_size;
1144 ij = indx[j] + j - turn - 1;
1145
1146 sc_spl_stem = sc_wrapper->decomp_stem;
1147 sc_red_stem = sc_wrapper->red_stem;
1148
1149 switch (fc->type) {
1150 case VRNA_FC_TYPE_SINGLE:
1151 S = fc->sequence_encoding;
1152 ptype = fc->ptype;
1153 si1 = S + j - turn - 2;
1154 sj1 = (j < n) ? S[j + 1] : -1;
1155
1156 for (i = j - turn - 1; i > 1; i--, ij--, si1--) {
1157 stems[i] = INF;
1158 if ((c[ij] != INF) &&
1159 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
1160 type = vrna_get_ptype(ij, ptype);
1161 stems[i] = c[ij] +
1162 vrna_E_ext_stem(type, *si1, sj1, P);
1163 }
1164 }
1165
1166 if (sc_spl_stem)
1167 for (i = j - turn - 1; i > 1; i--)
1168 if (stems[i] != INF)
1169 stems[i] += sc_spl_stem(j, i - 1, i, sc_wrapper);
1170
1171 stems[1] = INF;
1172 ij = indx[j] + 1;
1173
1174 if ((c[ij] != INF) && (evaluate(1, j, 1, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1175 type = vrna_get_ptype(ij, ptype);
1176 stems[1] = c[ij] +
1177 vrna_E_ext_stem(type, -1, sj1, P);
1178
1179 if (sc_red_stem)
1180 stems[1] += sc_red_stem(j, 1, j, sc_wrapper);
1181 }
1182
1183 break;
1184
1185 case VRNA_FC_TYPE_COMPARATIVE:
1186 n_seq = fc->n_seq;
1187 SS = fc->S;
1188 S5 = fc->S5;
1189 S3 = fc->S3;
1190 a2s = fc->a2s;
1191
1192 /* pre-compute S3[s][j - 1] */
1193 s3j = (short *)vrna_alloc(sizeof(short) * n_seq);
1194 sj = (short *)vrna_alloc(sizeof(short) * n_seq);
1195 for (s = 0; s < n_seq; s++) {
1196 s3j[s] = (a2s[s][j] < a2s[s][n]) ? S3[s][j] : -1;
1197 sj[s] = SS[s][j];
1198 }
1199
1200 for (i = j - turn - 1; i > 1; i--, ij--) {
1201 stems[i] = INF;
1202 if ((c[ij] != INF) &&
1203 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
1204 stems[i] = c[ij];
1205 for (s = 0; s < n_seq; s++) {
1206 type = vrna_get_ptype_md(SS[s][i], sj[s], md);
1207 mm5 = (a2s[s][i] > 1) ? S5[s][i] : -1;
1208 stems[i] += vrna_E_ext_stem(type, mm5, s3j[s], P);
1209 }
1210 }
1211 }
1212
1213 if (sc_spl_stem)
1214 for (i = j - turn - 1; i > 1; i--)
1215 if (stems[i] != INF)
1216 stems[i] += sc_spl_stem(j, i - 1, i, sc_wrapper);
1217
1218 stems[1] = INF;
1219 ij = indx[j] + 1;
1220
1221 if ((c[ij] != INF) && (evaluate(1, j, 1, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1222 stems[1] = c[ij];
1223
1224 for (s = 0; s < n_seq; s++) {
1225 type = vrna_get_ptype_md(SS[s][1], sj[s], md);
1226 stems[1] += vrna_E_ext_stem(type, -1, s3j[s], P);
1227 }
1228
1229 if (sc_red_stem)
1230 stems[1] += sc_red_stem(j, 1, j, sc_wrapper);
1231 }
1232
1233 free(s3j);
1234 free(sj);
1235
1236 break;
1237 }
1238
1239 return stems;
1240 }
1241
1242
1243 PRIVATE INLINE int *
f3_get_stem_contributions_d2(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)1244 f3_get_stem_contributions_d2(vrna_fold_compound_t *fc,
1245 int i,
1246 vrna_callback_hc_evaluate *evaluate,
1247 struct hc_ext_def_dat *hc_dat_local,
1248 struct sc_f3_dat *sc_wrapper)
1249 {
1250 char **ptype;
1251 short **S, **S5, **S3, *S1, si1, sj1, *s5i1, *si;
1252 unsigned int s, n_seq, type, length, **a2s;
1253 int energy, j, max_j, turn, *c, *stems, maxdist;
1254 vrna_param_t *P;
1255 vrna_md_t *md;
1256
1257 #ifdef VRNA_WITH_SVM
1258 int zsc_pre_filter;
1259 vrna_zsc_dat_t zsc_data;
1260 #endif
1261
1262 sc_f3_cb *sc_spl_stem;
1263 sc_f3_cb *sc_red_stem;
1264
1265 length = fc->length;
1266 maxdist = fc->window_size;
1267 P = fc->params;
1268 md = &(P->model_details);
1269 c = fc->matrices->c_local[i];
1270 c -= i;
1271 turn = md->min_loop_size;
1272 #ifdef VRNA_WITH_SVM
1273 zsc_data = fc->zscore_data;
1274 zsc_pre_filter = ((zsc_data) &&
1275 (zsc_data->filter_on) &&
1276 (zsc_data->pre_filter)) ? 1 : 0;
1277 #endif
1278
1279 stems = (int *)vrna_alloc(sizeof(int) * (maxdist + 6));
1280 stems -= i;
1281
1282 sc_spl_stem = sc_wrapper->decomp_stem;
1283 sc_red_stem = sc_wrapper->red_stem;
1284
1285 #ifdef VRNA_WITH_SVM
1286 /* re-set pointer */
1287 if (zsc_pre_filter) {
1288 zsc_data->current_z += zsc_data->current_i;
1289 /* initialize */
1290 memset(zsc_data->current_z, 0, sizeof(double) * (maxdist + 2));
1291 /* move pointer for convenience */
1292 zsc_data->current_i = i;
1293 zsc_data->current_z -= zsc_data->current_i;
1294 }
1295
1296 #endif
1297
1298 switch (fc->type) {
1299 case VRNA_FC_TYPE_SINGLE:
1300 ptype = fc->ptype_local;
1301 S1 = fc->sequence_encoding;
1302 si1 = (i > 1) ? S1[i - 1] : -1;
1303 max_j = MIN2((int)length - 1, i + maxdist);
1304
1305 for (j = i + turn + 1; j <= max_j; j++) {
1306 stems[j] = INF;
1307 if ((c[j] != INF) &&
1308 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1309 type = vrna_get_ptype_window(i, j, ptype);
1310 sj1 = S1[j + 1];
1311 stems[j] = c[j] +
1312 vrna_E_ext_stem(type, si1, sj1, P);
1313 }
1314 }
1315
1316 #ifdef VRNA_WITH_SVM
1317 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1318 if (zsc_pre_filter) {
1319 for (j = i + turn + 1; j <= max_j; j++) {
1320 if (stems[j] != INF) {
1321 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1322 if (zsc_data->current_z[j] > zsc_data->min_z)
1323 stems[j] = INF;
1324 }
1325 }
1326 }
1327
1328 #endif
1329
1330 if (sc_spl_stem)
1331 for (j = i + turn + 1; j <= max_j; j++)
1332 if (stems[j] != INF)
1333 stems[j] += sc_spl_stem(i, j, j + 1, sc_wrapper);
1334
1335 if (length <= (unsigned int)(i + maxdist)) {
1336 j = (int)length;
1337 stems[j] = INF;
1338
1339 if ((c[j] != INF) && (evaluate(i, j, i, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1340 type = vrna_get_ptype_window(i, j, ptype);
1341
1342 stems[j] = c[j] +
1343 vrna_E_ext_stem(type, si1, -1, P);
1344
1345 #ifdef VRNA_WITH_SVM
1346 if ((zsc_pre_filter) &&
1347 (stems[j] != INF)) {
1348 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1349 if (zsc_data->current_z[j] > zsc_data->min_z)
1350 stems[j] = INF;
1351 }
1352
1353 #endif
1354
1355 if ((sc_red_stem) &&
1356 (stems[j] != INF))
1357 stems[j] += sc_red_stem(i, i, j, sc_wrapper);
1358 }
1359 }
1360
1361 break;
1362
1363 case VRNA_FC_TYPE_COMPARATIVE:
1364 n_seq = fc->n_seq;
1365 S = fc->S;
1366 S5 = fc->S5;
1367 S3 = fc->S3;
1368 a2s = fc->a2s;
1369 max_j = MIN2((int)length - 1, i + maxdist);
1370
1371 s5i1 = (short *)vrna_alloc(sizeof(short) * n_seq);
1372 si = (short *)vrna_alloc(sizeof(short) * n_seq);
1373 for (s = 0; s < n_seq; s++) {
1374 s5i1[s] = (a2s[s][i] > 1) ? S5[s][i] : -1;
1375 si[s] = S[s][i];
1376 }
1377
1378 for (j = i + turn + 1; j <= max_j; j++) {
1379 stems[j] = INF;
1380 if ((c[j] != INF) &&
1381 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1382 energy = c[j];
1383 for (s = 0; s < n_seq; s++) {
1384 type = vrna_get_ptype_md(si[s], S[s][j], md);
1385 sj1 = (a2s[s][j] < a2s[s][length]) ? S3[s][j] : -1;
1386 energy += vrna_E_ext_stem(type, s5i1[s], sj1, P);
1387 }
1388 stems[j] = energy;
1389 }
1390 }
1391
1392 if (sc_spl_stem)
1393 for (j = i + turn + 1; j <= max_j; j++)
1394 if (stems[j] != INF)
1395 stems[j] += sc_spl_stem(i, j, j + 1, sc_wrapper);
1396
1397 if (length <= (unsigned int)(i + maxdist)) {
1398 j = (int)length;
1399 stems[j] = INF;
1400
1401 if ((c[j] != INF) && (evaluate(i, j, i, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1402 energy = c[j];
1403 for (s = 0; s < n_seq; s++) {
1404 type = vrna_get_ptype_md(si[s], S[s][j], md);
1405 energy += vrna_E_ext_stem(type, s5i1[s], -1, P);
1406 }
1407
1408 if (sc_red_stem)
1409 energy += sc_red_stem(i, i, j, sc_wrapper);
1410
1411 stems[j] = energy;
1412 }
1413 }
1414
1415 free(s5i1);
1416 free(si);
1417
1418 break;
1419 }
1420
1421 return stems;
1422 }
1423
1424
1425 PRIVATE INLINE int *
f5_get_stem_contributions_d5(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)1426 f5_get_stem_contributions_d5(vrna_fold_compound_t *fc,
1427 int j,
1428 vrna_callback_hc_evaluate *evaluate,
1429 struct hc_ext_def_dat *hc_dat_local,
1430 struct sc_f5_dat *sc_wrapper)
1431 {
1432 char *ptype;
1433 short *S, *si1, **SS, **S5, *sj;
1434 unsigned int s, n_seq, **a2s, type;
1435 int i, ij, *indx, turn, *c, *stems, mm5;
1436 vrna_param_t *P;
1437 vrna_md_t *md;
1438
1439 sc_f5_cb *sc_spl_stem;
1440 sc_f5_cb *sc_red_stem;
1441
1442 stems = (int *)vrna_alloc(sizeof(int) * j);
1443
1444 P = fc->params;
1445 md = &(P->model_details);
1446 indx = fc->jindx;
1447 c = fc->matrices->c;
1448 turn = md->min_loop_size;
1449 ij = indx[j] + j - turn;
1450
1451 sc_spl_stem = sc_wrapper->decomp_stem;
1452 sc_red_stem = sc_wrapper->red_stem;
1453
1454 switch (fc->type) {
1455 case VRNA_FC_TYPE_SINGLE:
1456 S = fc->sequence_encoding;
1457 ptype = fc->ptype;
1458 si1 = S + j - turn - 1;
1459
1460 for (i = j - turn - 1; i > 1; i--, ij--, si1--) {
1461 stems[i] = INF;
1462 if ((c[ij] != INF) &&
1463 (evaluate(1, j, i - 1, i + 1, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
1464 type = vrna_get_ptype(ij, ptype);
1465 stems[i] = c[ij] +
1466 vrna_E_ext_stem(type, *si1, -1, P);
1467 }
1468 }
1469
1470 if (sc_spl_stem)
1471 for (i = j - turn - 1; i > 1; i--)
1472 if (stems[i] != INF)
1473 stems[i] += sc_spl_stem(j, i - 1, i + 1, sc_wrapper);
1474
1475 stems[1] = INF;
1476 ij = indx[j] + 2;
1477
1478 if ((c[ij] != INF) && (evaluate(1, j, 2, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1479 type = vrna_get_ptype(ij, ptype);
1480 stems[1] = c[ij] +
1481 vrna_E_ext_stem(type, S[1], -1, P);
1482
1483 if (sc_red_stem)
1484 stems[1] += sc_red_stem(j, 2, j, sc_wrapper);
1485 }
1486
1487 break;
1488
1489 case VRNA_FC_TYPE_COMPARATIVE:
1490 n_seq = fc->n_seq;
1491 SS = fc->S;
1492 S5 = fc->S5;
1493 a2s = fc->a2s;
1494
1495 sj = (short *)vrna_alloc(sizeof(short) * n_seq);
1496 for (s = 0; s < n_seq; s++)
1497 sj[s] = SS[s][j];
1498
1499 for (i = j - turn - 1; i > 1; i--, ij--) {
1500 stems[i] = INF;
1501 if ((c[ij] != INF) &&
1502 (evaluate(1, j, i - 1, i + 1, VRNA_DECOMP_EXT_EXT_STEM, hc_dat_local))) {
1503 stems[i] = c[ij];
1504 for (s = 0; s < n_seq; s++) {
1505 type = vrna_get_ptype_md(SS[s][i + 1], sj[s], md);
1506 mm5 = (a2s[s][i + 1] > 1) ? S5[s][i + 1] : -1;
1507 stems[i] = vrna_E_ext_stem(type, mm5, -1, P);
1508 }
1509 }
1510 }
1511
1512 if (sc_spl_stem)
1513 for (i = j - turn - 1; i > 1; i--)
1514 if (stems[i] != INF)
1515 stems[i] += sc_spl_stem(j, i - 1, i + 1, sc_wrapper);
1516
1517 stems[1] = INF;
1518 ij = indx[j] + 2;
1519
1520 if ((c[ij] != INF) && (evaluate(1, j, 2, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1521 stems[1] = c[ij];
1522 for (s = 0; s < n_seq; s++) {
1523 type = vrna_get_ptype_md(SS[s][2], sj[s], md);
1524 mm5 = (a2s[s][2] > 1) ? S5[s][2] : -1;
1525 stems[i] = vrna_E_ext_stem(type, mm5, -1, P);
1526 }
1527
1528 if (sc_red_stem)
1529 stems[1] += sc_red_stem(j, 2, j, sc_wrapper);
1530 }
1531
1532 free(sj);
1533
1534 break;
1535 }
1536
1537 return stems;
1538 }
1539
1540
1541 PRIVATE INLINE int *
f3_get_stem_contributions_d3(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)1542 f3_get_stem_contributions_d3(vrna_fold_compound_t *fc,
1543 int i,
1544 vrna_callback_hc_evaluate *evaluate,
1545 struct hc_ext_def_dat *hc_dat_local,
1546 struct sc_f3_dat *sc_wrapper)
1547 {
1548 char **ptype;
1549 short *S1, **S, **S3, sj1, *si;
1550 unsigned int s, n_seq, **a2s, type;
1551 int energy, j, max_j, turn, *c, *stems, length, maxdist;
1552 vrna_param_t *P;
1553 vrna_md_t *md;
1554
1555 #ifdef VRNA_WITH_SVM
1556 int zsc_pre_filter;
1557 vrna_zsc_dat_t zsc_data;
1558 #endif
1559
1560 sc_f3_cb *sc_spl_stem;
1561 sc_f3_cb *sc_red_stem;
1562
1563 length = (int)fc->length;
1564 maxdist = fc->window_size;
1565 P = fc->params;
1566 md = &(P->model_details);
1567 c = fc->matrices->c_local[i];
1568 c -= i;
1569 turn = md->min_loop_size;
1570 #ifdef VRNA_WITH_SVM
1571 zsc_data = fc->zscore_data;
1572 zsc_pre_filter = ((zsc_data) &&
1573 (zsc_data->filter_on) &&
1574 (zsc_data->pre_filter)) ? 1 : 0;
1575 #endif
1576
1577 stems = (int *)vrna_alloc(sizeof(int) * (maxdist + 6));
1578 stems -= i;
1579
1580 sc_spl_stem = sc_wrapper->decomp_stem;
1581 sc_red_stem = sc_wrapper->red_stem;
1582
1583 #ifdef VRNA_WITH_SVM
1584 /* re-set pointer */
1585 if (zsc_pre_filter) {
1586 zsc_data->current_z += zsc_data->current_i;
1587 /* initialize */
1588 memset(zsc_data->current_z, 0, sizeof(double) * (maxdist + 2));
1589 /* move pointer for convenience */
1590 zsc_data->current_i = i;
1591 zsc_data->current_z -= zsc_data->current_i;
1592 }
1593
1594 #endif
1595
1596 switch (fc->type) {
1597 case VRNA_FC_TYPE_SINGLE:
1598 S1 = fc->sequence_encoding;
1599 ptype = fc->ptype_local;
1600 max_j = MIN2(length - 1, i + maxdist + 1);
1601
1602 for (j = i + turn + 1; j <= max_j; j++) {
1603 stems[j] = INF;
1604 if ((c[j - 1] != INF) &&
1605 (evaluate(i, length, j - 1, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1606 type = vrna_get_ptype_window(i, j - 1, ptype);
1607 stems[j] = c[j - 1] +
1608 vrna_E_ext_stem(type, -1, S1[j], P);
1609 }
1610 }
1611
1612 #ifdef VRNA_WITH_SVM
1613 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1614 if (zsc_pre_filter) {
1615 for (j = i + turn + 1; j <= max_j; j++) {
1616 if (stems[j] != INF) {
1617 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1618 if (zsc_data->current_z[j] > zsc_data->min_z)
1619 stems[j] = INF;
1620 }
1621 }
1622 }
1623
1624 #endif
1625
1626 if (sc_spl_stem)
1627 for (j = i + turn + 1; j <= max_j; j++)
1628 if (stems[j] != INF)
1629 stems[j] += sc_spl_stem(i, j - 1, j + 1, sc_wrapper);
1630
1631 if (length <= i + maxdist) {
1632 j = length;
1633
1634 if ((c[j - 1] != INF) &&
1635 (evaluate(i, j, i, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1636 type = vrna_get_ptype_window(i, j - 1, ptype);
1637 stems[j] = c[j - 1] +
1638 vrna_E_ext_stem(type, -1, S1[j], P);
1639
1640 #ifdef VRNA_WITH_SVM
1641 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1642 if ((zsc_pre_filter) &&
1643 (stems[j] != INF)) {
1644 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1645 if (zsc_data->current_z[j] > zsc_data->min_z)
1646 stems[j] = INF;
1647 }
1648
1649 #endif
1650 if ((sc_red_stem) &&
1651 (stems[j] != INF))
1652 stems[j] += sc_red_stem(i, i, j - 1, sc_wrapper);
1653 }
1654 }
1655
1656 break;
1657
1658 case VRNA_FC_TYPE_COMPARATIVE:
1659 n_seq = fc->n_seq;
1660 S = fc->S;
1661 S3 = fc->S3;
1662 a2s = fc->a2s;
1663 max_j = MIN2(length - 1, i + maxdist + 1);
1664
1665 si = (short *)vrna_alloc(sizeof(short) * n_seq);
1666 for (s = 0; s < n_seq; s++)
1667 si[s] = S[s][i];
1668
1669 for (j = i + turn + 1; j <= max_j; j++) {
1670 stems[j] = INF;
1671 if ((c[j - 1] != INF) &&
1672 (evaluate(i, length, j - 1, j + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat_local))) {
1673 energy = c[j - 1];
1674 for (s = 0; s < n_seq; s++) {
1675 type = vrna_get_ptype_md(si[s], S[s][j - 1], md);
1676 sj1 = (a2s[s][j - 1] < a2s[s][length]) ? S3[s][j - 1] : -1;
1677 energy += vrna_E_ext_stem(type, -1, sj1, P);
1678 }
1679 stems[j] = energy;
1680 }
1681 }
1682
1683 if (sc_spl_stem)
1684 for (j = i + turn + 1; j <= max_j; j++)
1685 if (stems[j] != INF)
1686 stems[j] += sc_spl_stem(i, j - 1, j + 1, sc_wrapper);
1687
1688 if (length <= i + maxdist) {
1689 j = length;
1690
1691 if ((c[j - 1] != INF) &&
1692 (evaluate(i, j, i, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1693 energy = c[j - 1];
1694 for (s = 0; s < n_seq; s++) {
1695 type = vrna_get_ptype_md(si[s], S[s][j - 1], md);
1696 sj1 = (a2s[s][j - 1] < a2s[s][length]) ? S3[s][j - 1] : -1;
1697 energy += vrna_E_ext_stem(type, -1, sj1, P);
1698 }
1699
1700 if (sc_red_stem)
1701 energy += sc_red_stem(i, i, j - 1, sc_wrapper);
1702
1703 stems[j] = energy;
1704 }
1705 }
1706
1707 free(si);
1708
1709 break;
1710 }
1711
1712 return stems;
1713 }
1714
1715
1716 PRIVATE INLINE int *
f5_get_stem_contributions_d3(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)1717 f5_get_stem_contributions_d3(vrna_fold_compound_t *fc,
1718 int j,
1719 vrna_callback_hc_evaluate *evaluate,
1720 struct hc_ext_def_dat *hc_dat_local,
1721 struct sc_f5_dat *sc_wrapper)
1722 {
1723 char *ptype;
1724 short *S, sj1, **SS, **S3, *s3j1, *ssj1;
1725 unsigned int n, s, n_seq, **a2s, type;
1726 int i, ij, *indx, turn, *c, *stems;
1727 vrna_param_t *P;
1728 vrna_md_t *md;
1729
1730 sc_f5_cb *sc_spl_stem;
1731 sc_f5_cb *sc_red_stem;
1732
1733 stems = (int *)vrna_alloc(sizeof(int) * j);
1734
1735 n = fc->length;
1736 P = fc->params;
1737 md = &(P->model_details);
1738 indx = fc->jindx;
1739 c = fc->matrices->c;
1740 turn = P->model_details.min_loop_size;
1741 ij = indx[j - 1] + j - turn - 1;
1742
1743 sc_spl_stem = sc_wrapper->decomp_stem1;
1744 sc_red_stem = sc_wrapper->red_stem;
1745
1746 switch (fc->type) {
1747 case VRNA_FC_TYPE_SINGLE:
1748 S = fc->sequence_encoding;
1749 ptype = fc->ptype;
1750 sj1 = S[j];
1751
1752 for (i = j - turn - 1; i > 1; i--, ij--) {
1753 stems[i] = INF;
1754 if ((c[ij] != INF) &&
1755 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM1, hc_dat_local))) {
1756 type = vrna_get_ptype(ij, ptype);
1757 stems[i] = c[ij] +
1758 vrna_E_ext_stem(type, -1, sj1, P);
1759 }
1760 }
1761
1762 if (sc_spl_stem)
1763 for (i = j - turn - 1; i > 1; i--)
1764 if (stems[i] != INF)
1765 stems[i] += sc_spl_stem(j, i - 1, i, sc_wrapper);
1766
1767 stems[1] = INF;
1768 ij = indx[j - 1] + 1;
1769
1770 if ((c[ij] != INF) && (evaluate(1, j, 1, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1771 type = vrna_get_ptype(ij, ptype);
1772 stems[1] = c[ij] +
1773 vrna_E_ext_stem(type, -1, sj1, P);
1774
1775 if (sc_red_stem)
1776 stems[1] += sc_red_stem(j, 1, j - 1, sc_wrapper);
1777 }
1778
1779 break;
1780
1781 case VRNA_FC_TYPE_COMPARATIVE:
1782 n_seq = fc->n_seq;
1783 SS = fc->S;
1784 S3 = fc->S3;
1785 a2s = fc->a2s;
1786
1787 /* pre-compute S3[s][j - 1] */
1788 s3j1 = (short *)vrna_alloc(sizeof(short) * n_seq);
1789 ssj1 = (short *)vrna_alloc(sizeof(short) * n_seq);
1790 for (s = 0; s < n_seq; s++) {
1791 s3j1[s] = (a2s[s][j - 1] < a2s[s][n]) ? S3[s][j - 1] : -1;
1792 ssj1[s] = SS[s][j - 1];
1793 }
1794
1795 for (i = j - turn - 1; i > 1; i--, ij--) {
1796 stems[i] = INF;
1797 if ((c[ij] != INF) &&
1798 (evaluate(1, j, i - 1, i, VRNA_DECOMP_EXT_EXT_STEM1, hc_dat_local))) {
1799 stems[i] = c[ij];
1800 for (s = 0; s < n_seq; s++) {
1801 type = vrna_get_ptype_md(SS[s][i], ssj1[s], md);
1802 stems[i] += vrna_E_ext_stem(type, -1, s3j1[s], P);
1803 }
1804 }
1805 }
1806
1807 if (sc_spl_stem)
1808 for (i = j - turn - 1; i > 1; i--)
1809 if (stems[i] != INF)
1810 stems[i] += sc_spl_stem(j, i - 1, i, sc_wrapper);
1811
1812 stems[1] = INF;
1813 ij = indx[j - 1] + 1;
1814
1815 if ((c[ij] != INF) && (evaluate(1, j, 1, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1816 stems[1] = c[ij];
1817
1818 for (s = 0; s < n_seq; s++) {
1819 type = vrna_get_ptype_md(SS[s][1], ssj1[s], md);
1820 stems[1] += vrna_E_ext_stem(type, -1, s3j1[s], P);
1821 }
1822
1823 if (sc_red_stem)
1824 stems[1] += sc_red_stem(j, 1, j - 1, sc_wrapper);
1825 }
1826
1827 free(s3j1);
1828 free(ssj1);
1829
1830 break;
1831 }
1832
1833 return stems;
1834 }
1835
1836
1837 PRIVATE INLINE int *
f3_get_stem_contributions_d5(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)1838 f3_get_stem_contributions_d5(vrna_fold_compound_t *fc,
1839 int i,
1840 vrna_callback_hc_evaluate *evaluate,
1841 struct hc_ext_def_dat *hc_dat_local,
1842 struct sc_f3_dat *sc_wrapper)
1843 {
1844 char **ptype;
1845 short *S1, **S, **S5, *s5i1, si, *si1;
1846 unsigned int s, n_seq, **a2s, type;
1847 int energy, j, max_j, turn, *c, *stems, length, maxdist;
1848 vrna_param_t *P;
1849 vrna_md_t *md;
1850
1851 #ifdef VRNA_WITH_SVM
1852 int zsc_pre_filter;
1853 vrna_zsc_dat_t zsc_data;
1854 #endif
1855
1856 sc_f3_cb *sc_spl_stem;
1857 sc_f3_cb *sc_red_stem;
1858
1859 length = (int)fc->length;
1860 maxdist = fc->window_size;
1861 P = fc->params;
1862 md = &(P->model_details);
1863 c = fc->matrices->c_local[i + 1];
1864 c -= i + 1;
1865 turn = P->model_details.min_loop_size;
1866 #ifdef VRNA_WITH_SVM
1867 zsc_data = fc->zscore_data;
1868 zsc_pre_filter = ((zsc_data) &&
1869 (zsc_data->filter_on) &&
1870 (zsc_data->pre_filter)) ? 1 : 0;
1871 #endif
1872
1873 stems = (int *)vrna_alloc(sizeof(int) * (maxdist + 6));
1874 stems -= i;
1875
1876 sc_spl_stem = sc_wrapper->decomp_stem1;
1877 sc_red_stem = sc_wrapper->red_stem;
1878
1879 #ifdef VRNA_WITH_SVM
1880 /* re-set pointer */
1881 if (zsc_pre_filter) {
1882 zsc_data->current_z += zsc_data->current_i;
1883 /* initialize */
1884 memset(zsc_data->current_z, 0, sizeof(double) * (maxdist + 2));
1885 /* move pointer for convenience */
1886 zsc_data->current_i = i;
1887 zsc_data->current_z -= zsc_data->current_i;
1888 }
1889
1890 #endif
1891
1892 switch (fc->type) {
1893 case VRNA_FC_TYPE_SINGLE:
1894 S1 = fc->sequence_encoding;
1895 ptype = fc->ptype_local;
1896 si = S1[i];
1897 max_j = MIN2(length - 1, i + maxdist + 1);
1898
1899 for (j = i + turn + 1; j <= max_j; j++) {
1900 stems[j] = INF;
1901 if ((c[j] != INF) &&
1902 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT1, hc_dat_local))) {
1903 type = vrna_get_ptype_window(i + 1, j, ptype);
1904 stems[j] = c[j] +
1905 vrna_E_ext_stem(type, si, -1, P);
1906 }
1907 }
1908
1909 #ifdef VRNA_WITH_SVM
1910 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1911 if (zsc_pre_filter) {
1912 for (j = i + turn + 1; j <= max_j; j++) {
1913 if (stems[j] != INF) {
1914 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1915 if (zsc_data->current_z[j] > zsc_data->min_z)
1916 stems[j] = INF;
1917 }
1918 }
1919 }
1920
1921 #endif
1922
1923 if (sc_spl_stem)
1924 for (j = i + turn + 1; j <= max_j; j++)
1925 if (stems[j] != INF)
1926 stems[j] += sc_spl_stem(i, j, j + 1, sc_wrapper);
1927
1928 if (length <= i + maxdist) {
1929 j = length;
1930 stems[j] = INF;
1931
1932 if ((c[j] != INF) &&
1933 (evaluate(i, j, i + 1, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1934 type = vrna_get_ptype_window(i + 1, j, ptype);
1935 stems[j] = c[j] +
1936 vrna_E_ext_stem(type, si, -1, P);
1937
1938 #ifdef VRNA_WITH_SVM
1939 /* if necessary, remove those stems where the z-score threshold is not satisfied */
1940 if ((zsc_pre_filter) &&
1941 (stems[j] != INF)) {
1942 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
1943 if (zsc_data->current_z[j] > zsc_data->min_z)
1944 stems[j] = INF;
1945 }
1946
1947 #endif
1948
1949 if ((sc_red_stem) &&
1950 (stems[j] != INF))
1951 stems[j] += sc_red_stem(i, i + 1, j, sc_wrapper);
1952 }
1953 }
1954
1955 break;
1956
1957 case VRNA_FC_TYPE_COMPARATIVE:
1958 n_seq = fc->n_seq;
1959 S = fc->S;
1960 S5 = fc->S5;
1961 a2s = fc->a2s;
1962 max_j = MIN2(length - 1, i + maxdist + 1);
1963
1964 /* pre-compute S5[s][i] */
1965 s5i1 = (short *)vrna_alloc(sizeof(short) * n_seq);
1966 si1 = (short *)vrna_alloc(sizeof(short) * n_seq);
1967 for (s = 0; s < n_seq; s++) {
1968 s5i1[s] = (a2s[s][i + 1] > 1) ? S5[s][i + 1] : -1;
1969 si1[s] = S[s][i + 1];
1970 }
1971
1972 for (j = i + turn + 1; j <= max_j; j++) {
1973 stems[j] = INF;
1974 if ((c[j] != INF) &&
1975 (evaluate(i, length, j, j + 1, VRNA_DECOMP_EXT_STEM_EXT1, hc_dat_local))) {
1976 energy = c[j];
1977 for (s = 0; s < n_seq; s++) {
1978 type = vrna_get_ptype_md(si1[s], S[s][j], md);
1979 energy += vrna_E_ext_stem(type, s5i1[s], -1, P);
1980 }
1981 stems[j] = energy;
1982 }
1983 }
1984
1985 if (sc_spl_stem)
1986 for (j = i + turn + 1; j <= max_j; j++)
1987 if (stems[j] != INF)
1988 stems[j] += sc_spl_stem(i, j, j + 1, sc_wrapper);
1989
1990 if (length <= i + maxdist) {
1991 j = length;
1992 stems[j] = INF;
1993
1994 if ((c[j] != INF) &&
1995 (evaluate(i, j, i + 1, j, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
1996 energy = c[j];
1997 for (s = 0; s < n_seq; s++) {
1998 type = vrna_get_ptype_md(si1[s], S[s][j], md);
1999 energy += vrna_E_ext_stem(type, s5i1[s], -1, P);
2000 }
2001
2002 if (sc_red_stem)
2003 energy += sc_red_stem(i, i + 1, j, sc_wrapper);
2004
2005 stems[j] = energy;
2006 }
2007 }
2008
2009 free(s5i1);
2010 free(si1);
2011
2012 break;
2013 }
2014
2015 return stems;
2016 }
2017
2018
2019 PRIVATE INLINE int *
f5_get_stem_contributions_d53(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)2020 f5_get_stem_contributions_d53(vrna_fold_compound_t *fc,
2021 int j,
2022 vrna_callback_hc_evaluate *evaluate,
2023 struct hc_ext_def_dat *hc_dat_local,
2024 struct sc_f5_dat *sc_wrapper)
2025 {
2026 char *ptype;
2027 short *S, *si1, sj1, **SS, **S5, **S3, *s3j1, *ssj1;
2028 unsigned int n, s, n_seq, **a2s, type;
2029 int i, ij, *indx, turn, *c, *stems;
2030 vrna_param_t *P;
2031 vrna_md_t *md;
2032
2033 sc_f5_cb *sc_spl_stem;
2034 sc_f5_cb *sc_red_stem;
2035
2036 stems = (int *)vrna_alloc(sizeof(int) * j);
2037
2038 n = fc->length;
2039 P = fc->params;
2040 md = &(P->model_details);
2041 indx = fc->jindx;
2042 c = fc->matrices->c;
2043 turn = md->min_loop_size;
2044 ij = indx[j - 1] + j - turn;
2045
2046 sc_spl_stem = sc_wrapper->decomp_stem1;
2047 sc_red_stem = sc_wrapper->red_stem;
2048
2049 switch (fc->type) {
2050 case VRNA_FC_TYPE_SINGLE:
2051 S = fc->sequence_encoding;
2052 ptype = fc->ptype;
2053 sj1 = S[j];
2054 si1 = S + j - turn - 1;
2055
2056 for (i = j - turn - 1; i > 1; i--, ij--, si1--) {
2057 stems[i] = INF;
2058 if ((c[ij] != INF) &&
2059 (evaluate(1, j, i - 1, i + 1, VRNA_DECOMP_EXT_EXT_STEM1, hc_dat_local))) {
2060 type = vrna_get_ptype(ij, ptype);
2061 stems[i] = c[ij] +
2062 vrna_E_ext_stem(type, *si1, sj1, P);
2063 }
2064 }
2065
2066 if (sc_spl_stem)
2067 for (i = j - turn - 1; i > 1; i--)
2068 if (stems[i] != INF)
2069 stems[i] += sc_spl_stem(j, i - 1, i + 1, sc_wrapper);
2070
2071 stems[1] = INF;
2072 ij = indx[j - 1] + 2;
2073
2074 if ((c[ij] != INF) && (evaluate(1, j, 2, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
2075 type = vrna_get_ptype(ij, ptype);
2076 stems[1] = c[ij] +
2077 vrna_E_ext_stem(type, S[1], sj1, P);
2078
2079 if (sc_red_stem)
2080 stems[1] += sc_red_stem(j, 2, j - 1, sc_wrapper);
2081 }
2082
2083 break;
2084
2085 case VRNA_FC_TYPE_COMPARATIVE:
2086 n_seq = fc->n_seq;
2087 SS = fc->S;
2088 S5 = fc->S5;
2089 S3 = fc->S3;
2090 a2s = fc->a2s;
2091
2092 /* pre-compute S3[s][j - 1] */
2093 s3j1 = (short *)vrna_alloc(sizeof(short) * n_seq);
2094 ssj1 = (short *)vrna_alloc(sizeof(short) * n_seq);
2095 for (s = 0; s < n_seq; s++) {
2096 s3j1[s] = (a2s[s][j - 1] < a2s[s][n]) ? S3[s][j - 1] : -1;
2097 ssj1[s] = SS[s][j - 1];
2098 }
2099
2100 for (i = j - turn - 1; i > 1; i--, ij--, si1--) {
2101 stems[i] = INF;
2102 if ((c[ij] != INF) &&
2103 (evaluate(1, j, i - 1, i + 1, VRNA_DECOMP_EXT_EXT_STEM1, hc_dat_local))) {
2104 stems[i] = c[ij];
2105 for (s = 0; s < n_seq; s++) {
2106 type = vrna_get_ptype_md(SS[s][i + 1], ssj1[s], md);
2107 stems[i] += vrna_E_ext_stem(type, (a2s[s][i + 1] > 1) ? S5[s][i + 1] : -1, s3j1[s], P);
2108 }
2109 }
2110 }
2111
2112 if (sc_spl_stem)
2113 for (i = j - turn - 1; i > 1; i--)
2114 if (stems[i] != INF)
2115 stems[i] += sc_spl_stem(j, i - 1, i + 1, sc_wrapper);
2116
2117 stems[1] = INF;
2118 ij = indx[j - 1] + 2;
2119
2120 if ((c[ij] != INF) && (evaluate(1, j, 2, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
2121 stems[1] = c[ij];
2122 for (s = 0; s < n_seq; s++) {
2123 type = vrna_get_ptype_md(SS[s][2], ssj1[s], md);
2124 stems[1] += vrna_E_ext_stem(type, (a2s[s][2] > 1) ? S5[s][2] : -1, s3j1[s], P);
2125 }
2126
2127 if (sc_red_stem)
2128 stems[1] += sc_red_stem(j, 2, j - 1, sc_wrapper);
2129 }
2130
2131 free(s3j1);
2132 free(ssj1);
2133
2134 break;
2135 }
2136
2137 return stems;
2138 }
2139
2140
2141 PRIVATE INLINE int *
f3_get_stem_contributions_d53(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)2142 f3_get_stem_contributions_d53(vrna_fold_compound_t *fc,
2143 int i,
2144 vrna_callback_hc_evaluate *evaluate,
2145 struct hc_ext_def_dat *hc_dat_local,
2146 struct sc_f3_dat *sc_wrapper)
2147 {
2148 char **ptype;
2149 short *S1, **S, **S5, **S3, *s5i1, si1, sj1, *ssi1;
2150 unsigned int s, n_seq, **a2s, type;
2151 int energy, j, max_j, turn, *c, *stems, length, maxdist;
2152 vrna_param_t *P;
2153 vrna_md_t *md;
2154
2155 #ifdef VRNA_WITH_SVM
2156 int zsc_pre_filter;
2157 vrna_zsc_dat_t zsc_data;
2158 #endif
2159
2160 sc_f3_cb *sc_spl_stem;
2161 sc_f3_cb *sc_red_stem;
2162
2163 length = (int)fc->length;
2164 maxdist = fc->window_size;
2165 P = fc->params;
2166 md = &(P->model_details);
2167 c = fc->matrices->c_local[i + 1];
2168 c -= i + 1;
2169 turn = md->min_loop_size;
2170 #ifdef VRNA_WITH_SVM
2171 zsc_data = fc->zscore_data;
2172 zsc_pre_filter = ((zsc_data) &&
2173 (zsc_data->filter_on) &&
2174 (zsc_data->pre_filter)) ? 1 : 0;
2175 #endif
2176
2177 stems = (int *)vrna_alloc(sizeof(int) * (maxdist + 6));
2178 stems -= i;
2179
2180 sc_spl_stem = sc_wrapper->decomp_stem1;
2181 sc_red_stem = sc_wrapper->red_stem;
2182
2183 #ifdef VRNA_WITH_SVM
2184 /* re-set pointer */
2185 if (zsc_pre_filter) {
2186 zsc_data->current_z += zsc_data->current_i;
2187 /* initialize */
2188 memset(zsc_data->current_z, 0, sizeof(double) * (maxdist + 2));
2189 /* move pointer for convenience */
2190 zsc_data->current_i = i;
2191 zsc_data->current_z -= zsc_data->current_i;
2192 }
2193
2194 #endif
2195
2196 switch (fc->type) {
2197 case VRNA_FC_TYPE_SINGLE:
2198 S1 = fc->sequence_encoding;
2199 ptype = fc->ptype_local;
2200 si1 = S1[i];
2201 max_j = MIN2(length - 1, i + maxdist + 1);
2202
2203 for (j = i + turn + 1; j <= max_j; j++) {
2204 stems[j] = INF;
2205 if ((c[j - 1] != INF) &&
2206 (evaluate(i, length, j - 1, j + 1, VRNA_DECOMP_EXT_STEM_EXT1, hc_dat_local))) {
2207 type = vrna_get_ptype_window(i + 1, j - 1, ptype);
2208 stems[j] = c[j - 1] +
2209 vrna_E_ext_stem(type, si1, S1[j], P);
2210 }
2211 }
2212
2213 #ifdef VRNA_WITH_SVM
2214 /* if necessary, remove those stems where the z-score threshold is not satisfied */
2215 if (zsc_pre_filter) {
2216 for (j = i + turn + 1; j <= max_j; j++) {
2217 if (stems[j] != INF) {
2218 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
2219 if (zsc_data->current_z[j] > zsc_data->min_z)
2220 stems[j] = INF;
2221 }
2222 }
2223 }
2224
2225 #endif
2226
2227 if (sc_spl_stem)
2228 for (j = i + turn + 1; j <= max_j; j++)
2229 if (stems[j] != INF)
2230 stems[j] += sc_spl_stem(i, j - 1, j + 1, sc_wrapper);
2231
2232 if (length <= i + maxdist) {
2233 j = length;
2234 if ((c[j - 1] != INF) &&
2235 (evaluate(i, length, i + 1, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
2236 type = vrna_get_ptype_window(i + 1, j - 1, ptype);
2237 stems[j] = c[j - 1] +
2238 vrna_E_ext_stem(type, si1, S1[j], P);
2239
2240 #ifdef VRNA_WITH_SVM
2241 /* if necessary, remove those stems where the z-score threshold is not satisfied */
2242 if ((zsc_pre_filter) &&
2243 (stems[j] != INF)) {
2244 zsc_data->current_z[j] = vrna_zsc_compute(fc, i, j, stems[j]);
2245 if (zsc_data->current_z[j] > zsc_data->min_z)
2246 stems[j] = INF;
2247 }
2248
2249 #endif
2250
2251 if ((sc_red_stem) &&
2252 (stems[j] != INF))
2253 stems[j] += sc_red_stem(i, i + 1, j - 1, sc_wrapper);
2254 }
2255 }
2256
2257 break;
2258
2259 case VRNA_FC_TYPE_COMPARATIVE:
2260 n_seq = fc->n_seq;
2261 S = fc->S;
2262 S5 = fc->S5;
2263 S3 = fc->S3;
2264 a2s = fc->a2s;
2265 max_j = MIN2(length - 1, i + maxdist + 1);
2266
2267 s5i1 = (short *)vrna_alloc(sizeof(short) * n_seq);
2268 ssi1 = (short *)vrna_alloc(sizeof(short) * n_seq);
2269 for (s = 0; s < n_seq; s++) {
2270 s5i1[s] = (a2s[s][i + 1] > 1) ? S5[s][i + 1] : -1;
2271 ssi1[s] = S[s][i + 1];
2272 }
2273
2274 for (j = i + turn + 1; j <= max_j; j++) {
2275 stems[j] = INF;
2276 if ((c[j - 1] != INF) &&
2277 (evaluate(i, length, j - 1, j + 1, VRNA_DECOMP_EXT_STEM_EXT1, hc_dat_local))) {
2278 energy = c[j - 1];
2279 for (s = 0; s < n_seq; s++) {
2280 type = vrna_get_ptype_md(ssi1[s], S[s][j - 1], md);
2281 sj1 = (a2s[s][j - 1] < a2s[s][length]) ? S3[s][j - 1] : -1;
2282 energy += vrna_E_ext_stem(type, s5i1[s], sj1, P);
2283 }
2284 stems[j] = energy;
2285 }
2286 }
2287
2288 if (sc_spl_stem)
2289 for (j = i + turn + 1; j <= max_j; j++)
2290 if (stems[j] != INF)
2291 stems[j] += sc_spl_stem(i, j - 1, j + 1, sc_wrapper);
2292
2293 if (length <= i + maxdist) {
2294 j = length;
2295 if ((c[j - 1] != INF) &&
2296 (evaluate(i, length, i + 1, j - 1, VRNA_DECOMP_EXT_STEM, hc_dat_local))) {
2297 energy = c[j - 1];
2298 for (s = 0; s < n_seq; s++) {
2299 type = vrna_get_ptype_md(ssi1[s], S[s][j - 1], md);
2300 sj1 = (a2s[s][j - 1] < a2s[s][length]) ? S3[s][j - 1] : -1;
2301 energy += vrna_E_ext_stem(type, s5i1[s], sj1, P);
2302 }
2303
2304 if (sc_red_stem)
2305 energy += sc_red_stem(i, i + 1, j - 1, sc_wrapper);
2306
2307 stems[j] = energy;
2308 }
2309 }
2310
2311 free(s5i1);
2312 free(ssi1);
2313
2314 break;
2315 }
2316
2317 return stems;
2318 }
2319
2320
2321 PRIVATE INLINE int
add_f5_gquad(vrna_fold_compound_t * fc,int j,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f5_dat * sc_wrapper)2322 add_f5_gquad(vrna_fold_compound_t *fc,
2323 int j,
2324 vrna_callback_hc_evaluate *evaluate,
2325 struct hc_ext_def_dat *hc_dat_local,
2326 struct sc_f5_dat *sc_wrapper)
2327 {
2328 int e, i, ij, *indx, turn, *f5, *ggg;
2329
2330 indx = fc->jindx;
2331 f5 = fc->matrices->f5;
2332 ggg = fc->matrices->ggg;
2333 turn = fc->params->model_details.min_loop_size;
2334 ij = indx[j] + j - turn - 1;
2335 e = INF;
2336
2337 for (i = j - turn - 1; i > 1; i--, ij--)
2338 if ((f5[i - 1] != INF) && (ggg[ij] != INF))
2339 e = MIN2(e, f5[i - 1] + ggg[ij]);
2340
2341 ij = indx[j] + 1;
2342 e = MIN2(e, ggg[ij]);
2343
2344 return e;
2345 }
2346
2347
2348 PRIVATE INLINE int
add_f3_gquad(vrna_fold_compound_t * fc,int i,vrna_callback_hc_evaluate * evaluate,struct hc_ext_def_dat * hc_dat_local,struct sc_f3_dat * sc_wrapper)2349 add_f3_gquad(vrna_fold_compound_t *fc,
2350 int i,
2351 vrna_callback_hc_evaluate *evaluate,
2352 struct hc_ext_def_dat *hc_dat_local,
2353 struct sc_f3_dat *sc_wrapper)
2354 {
2355 int e, j, length, turn, *f3, *ggg, maxdist;
2356
2357 length = (int)fc->length;
2358 maxdist = fc->window_size;
2359 f3 = fc->matrices->f3_local;
2360 ggg = fc->matrices->ggg_local[i];
2361 turn = fc->params->model_details.min_loop_size;
2362 e = INF;
2363
2364 for (j = i + turn + 1; (j < length) && (j <= i + maxdist); j++)
2365 if ((f3[j + 1] != INF) && (ggg[j - i] != INF))
2366 e = MIN2(e, f3[j + 1] + ggg[j - i]);
2367
2368 if (length <= i + maxdist)
2369 e = MIN2(e, ggg[length - i]);
2370
2371 return e;
2372 }
2373
2374
2375 PRIVATE INLINE int
decompose_f5_ext_stem(vrna_fold_compound_t * fc,int j,int * stems)2376 decompose_f5_ext_stem(vrna_fold_compound_t *fc,
2377 int j,
2378 int *stems)
2379 {
2380 int e, *f5, turn;
2381
2382 f5 = fc->matrices->f5;
2383 turn = fc->params->model_details.min_loop_size;
2384 e = INF;
2385
2386 const int count = j - turn;
2387
2388 e = vrna_fun_zip_add_min(f5 + 1, stems + 2, count - 2);
2389
2390 return e;
2391 }
2392
2393
2394 PRIVATE INLINE int
decompose_f3_ext_stem(vrna_fold_compound_t * fc,int i,int max_j,int * stems)2395 decompose_f3_ext_stem(vrna_fold_compound_t *fc,
2396 int i,
2397 int max_j,
2398 int *stems)
2399 {
2400 int e, *f3, turn, count;
2401
2402 f3 = fc->matrices->f3_local;
2403 turn = fc->params->model_details.min_loop_size;
2404 count = max_j - i - turn;
2405 e = INF;
2406
2407 /* modular decomposition */
2408 e = vrna_fun_zip_add_min(stems + i + turn + 1, f3 + i + turn + 2, count);
2409
2410 return e;
2411 }
2412
2413
2414 /*
2415 *###########################################
2416 *# deprecated functions below #
2417 *###########################################
2418 */
2419
2420 #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
2421
2422 PUBLIC int
E_Stem(int type,int si1,int sj1,int extLoop,vrna_param_t * P)2423 E_Stem(int type,
2424 int si1,
2425 int sj1,
2426 int extLoop,
2427 vrna_param_t *P)
2428 {
2429 int energy = 0;
2430 int d5 = (si1 >= 0) ? P->dangle5[type][si1] : 0;
2431 int d3 = (sj1 >= 0) ? P->dangle3[type][sj1] : 0;
2432
2433 if (type > 2)
2434 energy += P->TerminalAU;
2435
2436 if (si1 >= 0 && sj1 >= 0)
2437 energy += (extLoop) ? P->mismatchExt[type][si1][sj1] : P->mismatchM[type][si1][sj1];
2438 else
2439 energy += d5 + d3;
2440
2441 if (!extLoop)
2442 energy += P->MLintern[type];
2443
2444 return energy;
2445 }
2446
2447
2448 PUBLIC int
E_ExtLoop(int type,int si1,int sj1,vrna_param_t * P)2449 E_ExtLoop(int type,
2450 int si1,
2451 int sj1,
2452 vrna_param_t *P)
2453 {
2454 int energy = 0;
2455
2456 if (si1 >= 0 && sj1 >= 0)
2457 energy += P->mismatchExt[type][si1][sj1];
2458 else if (si1 >= 0)
2459 energy += P->dangle5[type][si1];
2460 else if (sj1 >= 0)
2461 energy += P->dangle3[type][sj1];
2462
2463 if (type > 2)
2464 energy += P->TerminalAU;
2465
2466 return energy;
2467 }
2468
2469
2470 #endif
2471