1 /** \file dp_matrices.c **/
2 
3 /*
4  *                Dynamic Programming matrix related functions
5  *
6  *                This file contains everything necessary to
7  *                obtain and destroy data structures representing
8  *                dynamic programming (DP) matrices used in the folding
9  *                recurrences throughout the VienneRNA paclage
10  *
11  *                c Ronny Lorenz
12  *
13  *                ViennaRNA package
14  */
15 
16 #ifdef HAVE_CONFIG_H
17 #include "config.h"
18 #endif
19 
20 #include <stdlib.h>
21 #include <math.h>
22 
23 #include "ViennaRNA/datastructures/basic.h"
24 #include "ViennaRNA/model.h"
25 #include "ViennaRNA/utils/basic.h"
26 #include "ViennaRNA/gquad.h"
27 #include "ViennaRNA/dp_matrices.h"
28 
29 /*
30  #################################
31  # PRIVATE MACROS                #
32  #################################
33  */
34 
35 /* the definitions below indicate which arrays should be allocated upon retrieval of a matrices data structure */
36 #define ALLOC_NOTHING     0
37 #define ALLOC_F           1
38 #define ALLOC_F5          2
39 #define ALLOC_F3          4
40 #define ALLOC_FC          8
41 #define ALLOC_C           16
42 #define ALLOC_FML         32
43 #define ALLOC_PROBS       256
44 #define ALLOC_AUX         512
45 
46 #define ALLOC_CIRC        1024
47 #define ALLOC_HYBRID      2048
48 #define ALLOC_UNIQ        4096
49 
50 
51 #define ALLOC_MFE_DEFAULT         (ALLOC_F5 | ALLOC_C | ALLOC_FML)
52 #define ALLOC_MFE_LOCAL           (ALLOC_F3 | ALLOC_C | ALLOC_FML)
53 
54 #define ALLOC_PF_WO_PROBS         (ALLOC_F | ALLOC_C | ALLOC_FML)
55 #define ALLOC_PF_DEFAULT          (ALLOC_PF_WO_PROBS | ALLOC_PROBS | ALLOC_AUX)
56 
57 /*
58  #################################
59  # GLOBAL VARIABLES              #
60  #################################
61  */
62 
63 /*
64  #################################
65  # PRIVATE VARIABLES             #
66  #################################
67  */
68 
69 /*
70  #################################
71  # PRIVATE FUNCTION DECLARATIONS #
72  #################################
73  */
74 PRIVATE unsigned int    get_mx_alloc_vector(vrna_md_t       *md_p,
75                                             vrna_mx_type_e  type,
76                                             unsigned int    options);
77 
78 
79 PRIVATE unsigned int    get_mx_mfe_alloc_vector_current(vrna_mx_mfe_t   *mx,
80                                                         vrna_mx_type_e  mx_type);
81 
82 
83 PRIVATE unsigned int    get_mx_pf_alloc_vector_current(vrna_mx_pf_t   *mx,
84                                                        vrna_mx_type_e mx_type);
85 
86 
87 PRIVATE void            mfe_matrices_alloc_default(vrna_mx_mfe_t  *vars,
88                                                    unsigned int   m,
89                                                    unsigned int   alloc_vector);
90 
91 
92 PRIVATE void            mfe_matrices_free_default(vrna_mx_mfe_t *self);
93 
94 
95 PRIVATE void            mfe_matrices_alloc_window(vrna_mx_mfe_t *vars,
96                                                   unsigned int  m,
97                                                   unsigned int  alloc_vector);
98 
99 
100 PRIVATE void            mfe_matrices_free_window(vrna_mx_mfe_t  *self,
101                                                  unsigned int   length,
102                                                  unsigned int   window_size);
103 
104 
105 PRIVATE void            mfe_matrices_alloc_2Dfold(vrna_mx_mfe_t *vars,
106                                                   unsigned int  m,
107                                                   unsigned int  alloc_vector);
108 
109 
110 PRIVATE void            mfe_matrices_free_2Dfold(vrna_mx_mfe_t  *self,
111                                                  unsigned int   length,
112                                                  int            min_loop_size,
113                                                  int            *indx);
114 
115 
116 PRIVATE void            pf_matrices_alloc_default(vrna_mx_pf_t  *vars,
117                                                   unsigned int  m,
118                                                   unsigned int  alloc_vector);
119 
120 
121 PRIVATE void            pf_matrices_free_default(vrna_mx_pf_t *self);
122 
123 
124 PRIVATE void            pf_matrices_alloc_window(vrna_mx_pf_t *vars,
125                                                  unsigned int m,
126                                                  unsigned int alloc_vector);
127 
128 
129 PRIVATE void            pf_matrices_free_window(vrna_mx_pf_t  *self,
130                                                 unsigned int  length,
131                                                 unsigned int  window_size);
132 
133 
134 PRIVATE void            pf_matrices_alloc_2Dfold(vrna_mx_pf_t *vars,
135                                                  unsigned int m,
136                                                  unsigned int alloc_vector);
137 
138 
139 PRIVATE void            pf_matrices_free_2Dfold(vrna_mx_pf_t  *self,
140                                                 unsigned int  length,
141                                                 int           turn,
142                                                 int           *indx,
143                                                 int           *jindx);
144 
145 
146 PRIVATE vrna_mx_mfe_t *get_mfe_matrices_alloc(unsigned int    n,
147                                               unsigned int    m,
148                                               vrna_mx_type_e  type,
149                                               unsigned int    alloc_vector);
150 
151 
152 PRIVATE vrna_mx_pf_t *get_pf_matrices_alloc(unsigned int    n,
153                                             unsigned int    m,
154                                             vrna_mx_type_e  type,
155                                             unsigned int    alloc_vector);
156 
157 
158 PRIVATE int
159 add_pf_matrices(vrna_fold_compound_t  *vc,
160                 vrna_mx_type_e        type,
161                 unsigned int          alloc_vector);
162 
163 
164 PRIVATE int
165 add_mfe_matrices(vrna_fold_compound_t *vc,
166                  vrna_mx_type_e       type,
167                  unsigned int         alloc_vector);
168 
169 
170 /*
171  #################################
172  # BEGIN OF FUNCTION DEFINITIONS #
173  #################################
174  */
175 PUBLIC void
vrna_mx_mfe_free(vrna_fold_compound_t * vc)176 vrna_mx_mfe_free(vrna_fold_compound_t *vc)
177 {
178   if (vc) {
179     vrna_mx_mfe_t *self = vc->matrices;
180     if (self) {
181       switch (self->type) {
182         case VRNA_MX_DEFAULT:
183           mfe_matrices_free_default(self);
184           break;
185 
186         case VRNA_MX_WINDOW:
187           mfe_matrices_free_window(self, vc->length, vc->window_size);
188           break;
189 
190         case VRNA_MX_2DFOLD:
191           mfe_matrices_free_2Dfold(self, vc->length, vc->params->model_details.min_loop_size, vc->iindx);
192           break;
193 
194         default:                /* do nothing */
195           break;
196       }
197       free(self);
198       vc->matrices = NULL;
199     }
200   }
201 }
202 
203 
204 PUBLIC void
vrna_mx_pf_free(vrna_fold_compound_t * vc)205 vrna_mx_pf_free(vrna_fold_compound_t *vc)
206 {
207   if (vc) {
208     vrna_mx_pf_t *self = vc->exp_matrices;
209     if (self) {
210       switch (self->type) {
211         case VRNA_MX_DEFAULT:
212           pf_matrices_free_default(self);
213           break;
214 
215         case VRNA_MX_WINDOW:
216           pf_matrices_free_window(self, vc->length, vc->window_size);
217           break;
218 
219         case VRNA_MX_2DFOLD:
220           pf_matrices_free_2Dfold(self, vc->length, vc->exp_params->model_details.min_loop_size, vc->iindx, vc->jindx);
221           break;
222 
223         default:                /* do nothing */
224           break;
225       }
226 
227       free(self->expMLbase);
228       free(self->scale);
229 
230       free(self);
231       vc->exp_matrices = NULL;
232     }
233   }
234 }
235 
236 
237 PUBLIC int
vrna_mx_add(vrna_fold_compound_t * vc,vrna_mx_type_e mx_type,unsigned int options)238 vrna_mx_add(vrna_fold_compound_t  *vc,
239             vrna_mx_type_e        mx_type,
240             unsigned int          options)
241 {
242   int ret;
243 
244   ret = 1;
245 
246   if (options & VRNA_OPTION_MFE)
247     ret &= vrna_mx_mfe_add(vc, mx_type, options);
248 
249   if (options & VRNA_OPTION_PF)
250     ret &= vrna_mx_pf_add(vc, mx_type, options);
251 
252   return ret;
253 }
254 
255 
256 PUBLIC int
vrna_mx_mfe_add(vrna_fold_compound_t * vc,vrna_mx_type_e mx_type,unsigned int options)257 vrna_mx_mfe_add(vrna_fold_compound_t  *vc,
258                 vrna_mx_type_e        mx_type,
259                 unsigned int          options)
260 {
261   unsigned int mx_alloc_vector;
262 
263   if (vc->params) {
264     options |= VRNA_OPTION_MFE;
265     if (vc->strands > 1)
266       options |= VRNA_OPTION_HYBRID;
267 
268     mx_alloc_vector = get_mx_alloc_vector(&(vc->params->model_details),
269                                           mx_type,
270                                           options);
271     vrna_mx_mfe_free(vc);
272     return add_mfe_matrices(vc, mx_type, mx_alloc_vector);
273   }
274 
275   return 0;
276 }
277 
278 
279 PUBLIC int
vrna_mx_pf_add(vrna_fold_compound_t * vc,vrna_mx_type_e mx_type,unsigned int options)280 vrna_mx_pf_add(vrna_fold_compound_t *vc,
281                vrna_mx_type_e       mx_type,
282                unsigned int         options)
283 {
284   unsigned int mx_alloc_vector;
285 
286   if (vc->exp_params) {
287     mx_alloc_vector = get_mx_alloc_vector(&(vc->exp_params->model_details),
288                                           mx_type,
289                                           options | VRNA_OPTION_PF);
290     vrna_mx_pf_free(vc);
291     return add_pf_matrices(vc, mx_type, mx_alloc_vector);
292   }
293 
294   return 0;
295 }
296 
297 
298 PUBLIC int
vrna_mx_prepare(vrna_fold_compound_t * vc,unsigned int options)299 vrna_mx_prepare(vrna_fold_compound_t  *vc,
300                 unsigned int          options)
301 {
302   int             ret, realloc;
303   unsigned int    mx_alloc_vector, mx_alloc_vector_current;
304   vrna_mx_type_e  mx_type;
305 
306   ret = 1;
307 
308   if (vc) {
309     /*  check whether we have the correct DP matrices attached, and if there is
310      *  enough memory allocated
311      */
312     if (options & VRNA_OPTION_MFE) {
313       /* prepare for MFE computation */
314       if (options & VRNA_OPTION_WINDOW) /* Windowing approach, a.k.a. locally optimal */
315         mx_type = VRNA_MX_WINDOW;
316       else                              /* default is regular MFE */
317         mx_type = VRNA_MX_DEFAULT;
318 
319       if (vc->strands > 1)
320         options |= VRNA_OPTION_HYBRID;
321 
322       realloc = 0;
323 
324       if (!vc->matrices || (vc->matrices->type != mx_type) || (vc->matrices->length < vc->length)) {
325         realloc = 1;
326       } else {
327         mx_alloc_vector =
328           get_mx_alloc_vector(&(vc->params->model_details), mx_type, options);
329         mx_alloc_vector_current = get_mx_mfe_alloc_vector_current(vc->matrices, mx_type);
330         if ((mx_alloc_vector & mx_alloc_vector_current) != mx_alloc_vector)
331           realloc = 1;
332       }
333 
334       if (realloc) /* Add DP matrices, if not they are not present */
335         ret &= vrna_mx_mfe_add(vc, mx_type, options);
336     }
337 
338     if (options & VRNA_OPTION_PF) {
339       /* prepare for partition function computations */
340       if (!vc->exp_params) /* return failure if exp_params data is not present */
341         return 0;
342 
343       if (options & VRNA_OPTION_WINDOW) /* Windowing approach, a.k.a. locally optimal */
344         mx_type = VRNA_MX_WINDOW;
345       else                              /* default is regular MFE */
346         mx_type = VRNA_MX_DEFAULT;
347 
348       if (vc->strands > 1)
349         options |= VRNA_OPTION_HYBRID;
350 
351       realloc = 0;
352 
353       /*  Add DP matrices, if not they are not present */
354       if (!vc->exp_matrices || (vc->exp_matrices->type != mx_type) ||
355           (vc->exp_matrices->length < vc->length)) {
356         realloc = 1;
357       } else {
358         mx_alloc_vector = get_mx_alloc_vector(&(vc->exp_params->model_details),
359                                               mx_type,
360                                               options);
361         mx_alloc_vector_current = get_mx_pf_alloc_vector_current(vc->exp_matrices, mx_type);
362         if ((mx_alloc_vector & mx_alloc_vector_current) != mx_alloc_vector)
363           realloc = 1;
364       }
365 
366       if (realloc) /* Add DP matrices, if not they are not present */
367         ret &= vrna_mx_pf_add(vc, mx_type, options);
368 
369 #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY
370       else   /* re-compute pf_scale and MLbase contributions (for RNAup)*/
371         vrna_exp_params_rescale(vc, NULL);
372 
373 #endif
374     }
375   } else {
376     ret = 0;
377   }
378 
379   return ret;
380 }
381 
382 
383 /*
384  #####################################
385  # BEGIN OF STATIC HELPER FUNCTIONS  #
386  #####################################
387  */
388 PRIVATE unsigned int
get_mx_mfe_alloc_vector_current(vrna_mx_mfe_t * mx,vrna_mx_type_e mx_type)389 get_mx_mfe_alloc_vector_current(vrna_mx_mfe_t   *mx,
390                                 vrna_mx_type_e  mx_type)
391 {
392   unsigned int mx_alloc_vector = ALLOC_NOTHING;
393 
394   if (mx) {
395     switch (mx_type) {
396       case VRNA_MX_DEFAULT:
397         if (mx->f5)
398           mx_alloc_vector |= ALLOC_F5;
399 
400         if (mx->f3)
401           mx_alloc_vector |= ALLOC_F3;
402 
403         if (mx->fc)
404           mx_alloc_vector |= ALLOC_HYBRID;
405 
406         if (mx->c)
407           mx_alloc_vector |= ALLOC_C;
408 
409         if (mx->fML)
410           mx_alloc_vector |= ALLOC_FML;
411 
412         if (mx->fM1)
413           mx_alloc_vector |= ALLOC_UNIQ;
414 
415         if (mx->fM2)
416           mx_alloc_vector |= ALLOC_CIRC;
417 
418         break;
419 
420       default:
421         break;
422     }
423   }
424 
425   return mx_alloc_vector;
426 }
427 
428 
429 PRIVATE unsigned int
get_mx_pf_alloc_vector_current(vrna_mx_pf_t * mx,vrna_mx_type_e mx_type)430 get_mx_pf_alloc_vector_current(vrna_mx_pf_t   *mx,
431                                vrna_mx_type_e mx_type)
432 {
433   unsigned int mx_alloc_vector = ALLOC_NOTHING;
434 
435   if (mx) {
436     switch (mx_type) {
437       case VRNA_MX_DEFAULT:
438         if (mx->q)
439           mx_alloc_vector |= ALLOC_F;
440 
441         if (mx->qb)
442           mx_alloc_vector |= ALLOC_C;
443 
444         if (mx->qm)
445           mx_alloc_vector |= ALLOC_FML;
446 
447         if (mx->qm1)
448           mx_alloc_vector |= ALLOC_UNIQ;
449 
450         if (mx->qm2)
451           mx_alloc_vector |= ALLOC_CIRC;
452 
453         if (mx->probs)
454           mx_alloc_vector |= ALLOC_PROBS;
455 
456         if (mx->q1k && mx->qln)
457           mx_alloc_vector |= ALLOC_AUX;
458 
459         break;
460 
461       default:
462         break;
463     }
464   }
465 
466   return mx_alloc_vector;
467 }
468 
469 
470 PRIVATE int
add_pf_matrices(vrna_fold_compound_t * vc,vrna_mx_type_e mx_type,unsigned int alloc_vector)471 add_pf_matrices(vrna_fold_compound_t  *vc,
472                 vrna_mx_type_e        mx_type,
473                 unsigned int          alloc_vector)
474 {
475   if (vc) {
476     switch (mx_type) {
477       case VRNA_MX_WINDOW:
478         vc->exp_matrices = get_pf_matrices_alloc(vc->length,
479                                                  vc->window_size,
480                                                  mx_type,
481                                                  alloc_vector);
482         break;
483       default:
484         vc->exp_matrices = get_pf_matrices_alloc(vc->length,
485                                                  vc->length,
486                                                  mx_type,
487                                                  alloc_vector);
488         break;
489     }
490 
491     if (!vc->exp_matrices)
492       return 0;
493 
494     if (vc->exp_params->model_details.gquad) {
495       switch (vc->type) {
496         case VRNA_FC_TYPE_SINGLE:
497           vc->exp_matrices->G = NULL;
498           /* can't do that here, since scale[] is not filled yet :(
499            * vc->exp_matrices->G = get_gquad_pf_matrix(vc->sequence_encoding2, vc->exp_matrices->scale, vc->exp_params);
500            */
501           break;
502         default:                    /* do nothing */
503           break;
504       }
505     }
506 
507     vrna_exp_params_rescale(vc, NULL);
508   }
509 
510   return 1;
511 }
512 
513 
514 PRIVATE int
add_mfe_matrices(vrna_fold_compound_t * vc,vrna_mx_type_e mx_type,unsigned int alloc_vector)515 add_mfe_matrices(vrna_fold_compound_t *vc,
516                  vrna_mx_type_e       mx_type,
517                  unsigned int         alloc_vector)
518 {
519   if (vc) {
520     switch (mx_type) {
521       case VRNA_MX_WINDOW:
522         vc->matrices = get_mfe_matrices_alloc(vc->length, vc->window_size, mx_type, alloc_vector);
523         break;
524       default:
525         vc->matrices = get_mfe_matrices_alloc(vc->length, vc->length, mx_type, alloc_vector);
526         break;
527     }
528 
529     if (!vc->matrices)
530       return 0;
531 
532     if (vc->params->model_details.gquad) {
533       switch (vc->type) {
534         case VRNA_FC_TYPE_SINGLE:
535           switch (mx_type) {
536             case VRNA_MX_WINDOW:                              /* do nothing, since we handle memory somewhere else */
537               break;
538             default:
539               vc->matrices->ggg = get_gquad_matrix(vc->sequence_encoding2, vc->params);
540               break;
541           }
542           break;
543         case VRNA_FC_TYPE_COMPARATIVE:
544           switch (mx_type) {
545             case VRNA_MX_WINDOW:                              /* do nothing, since we handle memory somewhere else */
546               break;
547             default:
548               vc->matrices->ggg = get_gquad_ali_matrix(vc->length,
549                                                        vc->S_cons,
550                                                        vc->S,
551                                                        vc->a2s,
552                                                        vc->n_seq,
553                                                        vc->params);
554               break;
555           }
556           break;
557         default:                      /* do nothing */
558           break;
559       }
560     }
561   }
562 
563   return 1;
564 }
565 
566 
567 PRIVATE vrna_mx_mfe_t *
get_mfe_matrices_alloc(unsigned int n,unsigned int m,vrna_mx_type_e type,unsigned int alloc_vector)568 get_mfe_matrices_alloc(unsigned int   n,
569                        unsigned int   m,
570                        vrna_mx_type_e type,
571                        unsigned int   alloc_vector)
572 {
573   vrna_mx_mfe_t *vars;
574 
575   if ((int)(n * m) >= (int)INT_MAX) {
576     vrna_message_warning("get_mfe_matrices_alloc: "
577                          "sequence length %d exceeds addressable range",
578                          n);
579     return NULL;
580   }
581 
582   vars          = (vrna_mx_mfe_t *)vrna_alloc(sizeof(vrna_mx_mfe_t));
583   vars->length  = n;
584   vars->type    = type;
585 
586   switch (type) {
587     case VRNA_MX_DEFAULT:
588       mfe_matrices_alloc_default(vars, m, alloc_vector);
589       break;
590 
591     case VRNA_MX_WINDOW:
592       mfe_matrices_alloc_window(vars, m, alloc_vector);
593       break;
594 
595     case VRNA_MX_2DFOLD:
596       mfe_matrices_alloc_2Dfold(vars, m, alloc_vector);
597       break;
598 
599     default:                /* do nothing */
600       break;
601   }
602 
603   return vars;
604 }
605 
606 
607 PRIVATE vrna_mx_pf_t *
get_pf_matrices_alloc(unsigned int n,unsigned int m,vrna_mx_type_e type,unsigned int alloc_vector)608 get_pf_matrices_alloc(unsigned int    n,
609                       unsigned int    m,
610                       vrna_mx_type_e  type,
611                       unsigned int    alloc_vector)
612 {
613   unsigned int  lin_size;
614   vrna_mx_pf_t  *vars;
615 
616   if ((int)(n * m) >= (int)INT_MAX) {
617     vrna_message_warning("get_pf_matrices_alloc: "
618                          "sequence length %d exceeds addressable range",
619                          n);
620     return NULL;
621   }
622 
623   lin_size      = n + 2;
624   vars          = (vrna_mx_pf_t *)vrna_alloc(sizeof(vrna_mx_pf_t));
625   vars->length  = n;
626   vars->type    = type;
627 
628 
629   switch (type) {
630     case VRNA_MX_DEFAULT:
631       pf_matrices_alloc_default(vars, n, alloc_vector);
632       break;
633 
634     case VRNA_MX_WINDOW:
635       pf_matrices_alloc_window(vars, m, alloc_vector);
636       break;
637 
638     case VRNA_MX_2DFOLD:
639       pf_matrices_alloc_2Dfold(vars, n, alloc_vector);
640       break;
641 
642     default:                /* do nothing */
643       break;
644   }
645 
646   /*
647    *  always alloc the helper arrays for unpaired nucleotides in multi-
648    *  branch loops and scaling
649    */
650   vars->scale     = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
651   vars->expMLbase = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
652 
653   return vars;
654 }
655 
656 
657 PRIVATE unsigned int
get_mx_alloc_vector(vrna_md_t * md_p,vrna_mx_type_e mx_type,unsigned int options)658 get_mx_alloc_vector(vrna_md_t       *md_p,
659                     vrna_mx_type_e  mx_type,
660                     unsigned int    options)
661 {
662   unsigned int v;
663 
664   v = ALLOC_NOTHING;
665 
666   /* default MFE matrices ? */
667   if (options & VRNA_OPTION_MFE)
668     v |= (mx_type == VRNA_MX_WINDOW) ? ALLOC_MFE_LOCAL : ALLOC_MFE_DEFAULT;
669 
670   /* default PF matrices ? */
671   if (options & VRNA_OPTION_PF)
672     v |= (md_p->compute_bpp) ? ALLOC_PF_DEFAULT : ALLOC_PF_WO_PROBS;
673 
674   if (options & VRNA_OPTION_HYBRID)
675     v |= ALLOC_HYBRID;
676 
677   /* matrices for circular folding ? */
678   if (md_p->circ) {
679     md_p->uniq_ML = 1; /* we need unique ML arrays for circular folding */
680     v             |= ALLOC_CIRC;
681   }
682 
683   /* unique ML decomposition ? */
684   if (md_p->uniq_ML)
685     v |= ALLOC_UNIQ;
686 
687   return v;
688 }
689 
690 
691 PRIVATE void
mfe_matrices_alloc_default(vrna_mx_mfe_t * vars,unsigned int m,unsigned int alloc_vector)692 mfe_matrices_alloc_default(vrna_mx_mfe_t  *vars,
693                            unsigned int   m,
694                            unsigned int   alloc_vector)
695 {
696   unsigned int n, size, lin_size;
697 
698   n         = vars->length;
699   size      = ((n + 1) * (m + 2)) / 2;
700   lin_size  = n + 2;
701 
702   vars->f5  = NULL;
703   vars->f3  = NULL;
704   vars->fc  = NULL;
705   vars->c   = NULL;
706   vars->fML = NULL;
707   vars->fM1 = NULL;
708   vars->fM2 = NULL;
709   vars->ggg = NULL;
710 
711   if (alloc_vector & ALLOC_F5)
712     vars->f5 = (int *)vrna_alloc(sizeof(int) * lin_size);
713 
714   if (alloc_vector & ALLOC_F3)
715     vars->f3 = (int *)vrna_alloc(sizeof(int) * lin_size);
716 
717   if (alloc_vector & ALLOC_HYBRID)
718     vars->fc = (int *)vrna_alloc(sizeof(int) * lin_size);
719 
720   if (alloc_vector & ALLOC_C)
721     vars->c = (int *)vrna_alloc(sizeof(int) * size);
722 
723   if (alloc_vector & ALLOC_FML)
724     vars->fML = (int *)vrna_alloc(sizeof(int) * size);
725 
726   if (alloc_vector & ALLOC_UNIQ)
727     vars->fM1 = (int *)vrna_alloc(sizeof(int) * size);
728 
729   if (alloc_vector & ALLOC_CIRC)
730     vars->fM2 = (int *)vrna_alloc(sizeof(int) * lin_size);
731 
732   /* setting exterior loop energies for circular case to INF is always safe */
733   vars->FcH = vars->FcI = vars->FcM = vars->Fc = INF;
734 }
735 
736 
737 PRIVATE void
mfe_matrices_free_default(vrna_mx_mfe_t * self)738 mfe_matrices_free_default(vrna_mx_mfe_t *self)
739 {
740   free(self->f5);
741   free(self->f3);
742   free(self->fc);
743   free(self->c);
744   free(self->fML);
745   free(self->fM1);
746   free(self->fM2);
747   free(self->ggg);
748 }
749 
750 
751 PRIVATE void
mfe_matrices_alloc_window(vrna_mx_mfe_t * vars,unsigned int m,unsigned int alloc_vector)752 mfe_matrices_alloc_window(vrna_mx_mfe_t *vars,
753                           unsigned int  m,
754                           unsigned int  alloc_vector)
755 {
756   unsigned int n, lin_size;
757 
758   n         = vars->length;
759   lin_size  = n + 2;
760 
761   vars->f3_local  = NULL;
762   vars->c_local   = NULL;
763   vars->fML_local = NULL;
764   vars->ggg_local = NULL;
765 
766   if (alloc_vector & ALLOC_F3)
767     vars->f3_local = (int *)vrna_alloc(sizeof(int) * lin_size);
768 
769   if (alloc_vector & ALLOC_C)
770     vars->c_local = (int **)vrna_alloc(sizeof(int *) * lin_size);
771 
772   if (alloc_vector & ALLOC_FML)
773     vars->fML_local = (int **)vrna_alloc(sizeof(int *) * lin_size);
774 }
775 
776 
777 PRIVATE void
mfe_matrices_free_window(vrna_mx_mfe_t * self,unsigned int length,unsigned int window_size)778 mfe_matrices_free_window(vrna_mx_mfe_t  *self,
779                          unsigned int   length,
780                          unsigned int   window_size)
781 {
782   free(self->c_local);
783   free(self->fML_local);
784   free(self->ggg_local);
785   free(self->f3_local);
786 }
787 
788 
789 PRIVATE void
mfe_matrices_alloc_2Dfold(vrna_mx_mfe_t * vars,unsigned int m,unsigned int alloc_vector)790 mfe_matrices_alloc_2Dfold(vrna_mx_mfe_t *vars,
791                           unsigned int  m,
792                           unsigned int  alloc_vector)
793 {
794   unsigned int n, i, size, lin_size;
795 
796   n         = vars->length;
797   size      = ((n + 1) * (m + 2)) / 2;
798   lin_size  = n + 2;
799 
800   vars->E_F5      = NULL;
801   vars->l_min_F5  = NULL;
802   vars->l_max_F5  = NULL;
803   vars->k_min_F5  = NULL;
804   vars->k_max_F5  = NULL;
805   vars->E_F5_rem  = NULL;
806 
807   vars->E_F3      = NULL;
808   vars->l_min_F3  = NULL;
809   vars->l_max_F3  = NULL;
810   vars->k_min_F3  = NULL;
811   vars->k_max_F3  = NULL;
812   vars->E_F3_rem  = NULL;
813 
814   vars->E_C     = NULL;
815   vars->l_min_C = NULL;
816   vars->l_max_C = NULL;
817   vars->k_min_C = NULL;
818   vars->k_max_C = NULL;
819   vars->E_C_rem = NULL;
820 
821   vars->E_M     = NULL;
822   vars->l_min_M = NULL;
823   vars->l_max_M = NULL;
824   vars->k_min_M = NULL;
825   vars->k_max_M = NULL;
826   vars->E_M_rem = NULL;
827 
828   vars->E_M1      = NULL;
829   vars->l_min_M1  = NULL;
830   vars->l_max_M1  = NULL;
831   vars->k_min_M1  = NULL;
832   vars->k_max_M1  = NULL;
833   vars->E_M1_rem  = NULL;
834 
835   vars->E_M2      = NULL;
836   vars->l_min_M2  = NULL;
837   vars->l_max_M2  = NULL;
838   vars->k_min_M2  = NULL;
839   vars->k_max_M2  = NULL;
840   vars->E_M2_rem  = NULL;
841 
842   /* setting exterior loop energies for circular case to INF is always safe */
843   vars->E_Fc      = NULL;
844   vars->E_FcH     = NULL;
845   vars->E_FcI     = NULL;
846   vars->E_FcM     = NULL;
847   vars->E_Fc_rem  = INF;
848   vars->E_FcH_rem = INF;
849   vars->E_FcI_rem = INF;
850   vars->E_FcM_rem = INF;
851 
852   if (alloc_vector & ALLOC_F5) {
853     vars->E_F5      = (int ***)vrna_alloc(sizeof(int **) * lin_size);
854     vars->l_min_F5  = (int **)vrna_alloc(sizeof(int *) * lin_size);
855     vars->l_max_F5  = (int **)vrna_alloc(sizeof(int *) * lin_size);
856     vars->k_min_F5  = (int *)vrna_alloc(sizeof(int) * lin_size);
857     vars->k_max_F5  = (int *)vrna_alloc(sizeof(int) * lin_size);
858     vars->E_F5_rem  = (int *)vrna_alloc(sizeof(int) * lin_size);
859     for (i = 0; i <= n; i++)
860       vars->E_F5_rem[i] = INF;
861   }
862 
863   if (alloc_vector & ALLOC_F3) {
864     vars->E_F3      = (int ***)vrna_alloc(sizeof(int **) * lin_size);
865     vars->l_min_F3  = (int **)vrna_alloc(sizeof(int *) * lin_size);
866     vars->l_max_F3  = (int **)vrna_alloc(sizeof(int *) * lin_size);
867     vars->k_min_F3  = (int *)vrna_alloc(sizeof(int) * lin_size);
868     vars->k_max_F3  = (int *)vrna_alloc(sizeof(int) * lin_size);
869     vars->E_F3_rem  = (int *)vrna_alloc(sizeof(int) * lin_size);
870     for (i = 0; i <= n; i++)
871       vars->E_F3_rem[i] = INF;
872   }
873 
874   if (alloc_vector & ALLOC_C) {
875     vars->E_C     = (int ***)vrna_alloc(sizeof(int **) * size);
876     vars->l_min_C = (int **)vrna_alloc(sizeof(int *) * size);
877     vars->l_max_C = (int **)vrna_alloc(sizeof(int *) * size);
878     vars->k_min_C = (int *)vrna_alloc(sizeof(int) * size);
879     vars->k_max_C = (int *)vrna_alloc(sizeof(int) * size);
880     vars->E_C_rem = (int *)vrna_alloc(sizeof(int) * size);
881     for (i = 0; i < size; i++)
882       vars->E_C_rem[i] = INF;
883   }
884 
885   if (alloc_vector & ALLOC_FML) {
886     vars->E_M     = (int ***)vrna_alloc(sizeof(int **) * size);
887     vars->l_min_M = (int **)vrna_alloc(sizeof(int *) * size);
888     vars->l_max_M = (int **)vrna_alloc(sizeof(int *) * size);
889     vars->k_min_M = (int *)vrna_alloc(sizeof(int) * size);
890     vars->k_max_M = (int *)vrna_alloc(sizeof(int) * size);
891     vars->E_M_rem = (int *)vrna_alloc(sizeof(int) * size);
892     for (i = 0; i < size; i++)
893       vars->E_M_rem[i] = INF;
894   }
895 
896   if (alloc_vector & ALLOC_UNIQ) {
897     vars->E_M1      = (int ***)vrna_alloc(sizeof(int **) * size);
898     vars->l_min_M1  = (int **)vrna_alloc(sizeof(int *) * size);
899     vars->l_max_M1  = (int **)vrna_alloc(sizeof(int *) * size);
900     vars->k_min_M1  = (int *)vrna_alloc(sizeof(int) * size);
901     vars->k_max_M1  = (int *)vrna_alloc(sizeof(int) * size);
902     vars->E_M1_rem  = (int *)vrna_alloc(sizeof(int) * size);
903     for (i = 0; i < size; i++)
904       vars->E_M1_rem[i] = INF;
905   }
906 
907   if (alloc_vector & ALLOC_CIRC) {
908     vars->E_M2      = (int ***)vrna_alloc(sizeof(int **) * lin_size);
909     vars->l_min_M2  = (int **)vrna_alloc(sizeof(int *) * lin_size);
910     vars->l_max_M2  = (int **)vrna_alloc(sizeof(int *) * lin_size);
911     vars->k_min_M2  = (int *)vrna_alloc(sizeof(int) * lin_size);
912     vars->k_max_M2  = (int *)vrna_alloc(sizeof(int) * lin_size);
913     vars->E_M2_rem  = (int *)vrna_alloc(sizeof(int) * lin_size);
914     for (i = 0; i <= n; i++)
915       vars->E_M2_rem[i] = INF;
916   }
917 
918 #ifdef COUNT_STATES
919   vars->N_C   = (unsigned long ***)vrna_alloc(sizeof(unsigned long **) * size);
920   vars->N_F5  = (unsigned long ***)vrna_alloc(sizeof(unsigned long **) * lin_size);
921   vars->N_M   = (unsigned long ***)vrna_alloc(sizeof(unsigned long **) * size);
922   vars->N_M1  = (unsigned long ***)vrna_alloc(sizeof(unsigned long **) * size);
923 #endif
924 }
925 
926 
927 PRIVATE void
mfe_matrices_free_2Dfold(vrna_mx_mfe_t * self,unsigned int length,int turn,int * indx)928 mfe_matrices_free_2Dfold(vrna_mx_mfe_t  *self,
929                          unsigned int   length,
930                          int            turn,
931                          int            *indx)
932 {
933   unsigned int  i, j, ij;
934   int           cnt1;
935 
936   /* This will be some fun... */
937 #ifdef COUNT_STATES
938   if (self->N_F5 != NULL) {
939     for (i = 1; i <= length; i++) {
940       if (!self->N_F5[i])
941         continue;
942 
943       for (cnt1 = self->k_min_F5[i]; cnt1 <= vars->k_max_F5[i]; cnt1++)
944         if (vars->l_min_F5[i][cnt1] < INF) {
945           vars->N_F5[i][cnt1] += vars->l_min_F5[i][cnt1] / 2;
946           free(vars->N_F5[i][cnt1]);
947         }
948 
949       if (vars->k_min_F5[i] < INF) {
950         vars->N_F5[i] += vars->k_min_F5[i];
951         free(vars->N_F5[i]);
952       }
953     }
954     free(vars->N_F5);
955   }
956 
957 #endif
958 
959   if (self->E_F5 != NULL) {
960     for (i = 1; i <= length; i++) {
961       if (!self->E_F5[i])
962         continue;
963 
964       for (cnt1 = self->k_min_F5[i]; cnt1 <= self->k_max_F5[i]; cnt1++)
965         if (self->l_min_F5[i][cnt1] < INF) {
966           self->E_F5[i][cnt1] += self->l_min_F5[i][cnt1] / 2;
967           free(self->E_F5[i][cnt1]);
968         }
969 
970       if (self->k_min_F5[i] < INF) {
971         self->E_F5[i] += self->k_min_F5[i];
972         free(self->E_F5[i]);
973         self->l_min_F5[i] += self->k_min_F5[i];
974         self->l_max_F5[i] += self->k_min_F5[i];
975         free(self->l_min_F5[i]);
976         free(self->l_max_F5[i]);
977       }
978     }
979     free(self->E_F5);
980     free(self->l_min_F5);
981     free(self->l_max_F5);
982     free(self->k_min_F5);
983     free(self->k_max_F5);
984   }
985 
986   if (self->E_F3 != NULL) {
987     for (i = 1; i <= length; i++) {
988       if (!self->E_F3[i])
989         continue;
990 
991       for (cnt1 = self->k_min_F3[i]; cnt1 <= self->k_max_F3[i]; cnt1++)
992         if (self->l_min_F3[i][cnt1] < INF) {
993           self->E_F3[i][cnt1] += self->l_min_F3[i][cnt1] / 2;
994           free(self->E_F3[i][cnt1]);
995         }
996 
997       if (self->k_min_F3[i] < INF) {
998         self->E_F3[i] += self->k_min_F3[i];
999         free(self->E_F3[i]);
1000         self->l_min_F3[i] += self->k_min_F3[i];
1001         self->l_max_F3[i] += self->k_min_F3[i];
1002         free(self->l_min_F3[i]);
1003         free(self->l_max_F3[i]);
1004       }
1005     }
1006     free(self->E_F3);
1007     free(self->l_min_F3);
1008     free(self->l_max_F3);
1009     free(self->k_min_F3);
1010     free(self->k_max_F3);
1011   }
1012 
1013 #ifdef COUNT_STATES
1014   if (self->N_C != NULL) {
1015     for (i = 1; i < length; i++) {
1016       for (j = i; j <= length; j++) {
1017         ij = indx[i] - j;
1018         if (!self->N_C[ij])
1019           continue;
1020 
1021         for (cnt1 = self->k_min_C[ij]; cnt1 <= self->k_max_C[ij]; cnt1++)
1022           if (self->l_min_C[ij][cnt1] < INF) {
1023             self->N_C[ij][cnt1] += self->l_min_C[ij][cnt1] / 2;
1024             free(self->N_C[ij][cnt1]);
1025           }
1026 
1027         if (self->k_min_C[ij] < INF) {
1028           self->N_C[ij] += self->k_min_C[ij];
1029           free(self->N_C[ij]);
1030         }
1031       }
1032     }
1033     free(self->N_C);
1034   }
1035 
1036 #endif
1037 
1038   if (self->E_C != NULL) {
1039     for (i = 1; i < length; i++) {
1040       for (j = i; j <= length; j++) {
1041         ij = indx[i] - j;
1042         if (!self->E_C[ij])
1043           continue;
1044 
1045         for (cnt1 = self->k_min_C[ij]; cnt1 <= self->k_max_C[ij]; cnt1++)
1046           if (self->l_min_C[ij][cnt1] < INF) {
1047             self->E_C[ij][cnt1] += self->l_min_C[ij][cnt1] / 2;
1048             free(self->E_C[ij][cnt1]);
1049           }
1050 
1051         if (self->k_min_C[ij] < INF) {
1052           self->E_C[ij] += self->k_min_C[ij];
1053           free(self->E_C[ij]);
1054           self->l_min_C[ij] += self->k_min_C[ij];
1055           self->l_max_C[ij] += self->k_min_C[ij];
1056           free(self->l_min_C[ij]);
1057           free(self->l_max_C[ij]);
1058         }
1059       }
1060     }
1061     free(self->E_C);
1062     free(self->l_min_C);
1063     free(self->l_max_C);
1064     free(self->k_min_C);
1065     free(self->k_max_C);
1066   }
1067 
1068 #ifdef COUNT_STATES
1069   if (self->N_M != NULL) {
1070     for (i = 1; i < length; i++) {
1071       for (j = i; j <= length; j++) {
1072         ij = indx[i] - j;
1073         if (!self->N_M[ij])
1074           continue;
1075 
1076         for (cnt1 = self->k_min_M[ij]; cnt1 <= self->k_max_M[ij]; cnt1++)
1077           if (self->l_min_M[ij][cnt1] < INF) {
1078             self->N_M[ij][cnt1] += self->l_min_M[ij][cnt1] / 2;
1079             free(self->N_M[ij][cnt1]);
1080           }
1081 
1082         if (self->k_min_M[ij] < INF) {
1083           self->N_M[ij] += self->k_min_M[ij];
1084           free(self->N_M[ij]);
1085         }
1086       }
1087     }
1088     free(self->N_M);
1089   }
1090 
1091 #endif
1092 
1093   if (self->E_M != NULL) {
1094     for (i = 1; i < length; i++) {
1095       for (j = i; j <= length; j++) {
1096         ij = indx[i] - j;
1097         if (!self->E_M[ij])
1098           continue;
1099 
1100         for (cnt1 = self->k_min_M[ij]; cnt1 <= self->k_max_M[ij]; cnt1++)
1101           if (self->l_min_M[ij][cnt1] < INF) {
1102             self->E_M[ij][cnt1] += self->l_min_M[ij][cnt1] / 2;
1103             free(self->E_M[ij][cnt1]);
1104           }
1105 
1106         if (self->k_min_M[ij] < INF) {
1107           self->E_M[ij] += self->k_min_M[ij];
1108           free(self->E_M[ij]);
1109           self->l_min_M[ij] += self->k_min_M[ij];
1110           self->l_max_M[ij] += self->k_min_M[ij];
1111           free(self->l_min_M[ij]);
1112           free(self->l_max_M[ij]);
1113         }
1114       }
1115     }
1116     free(self->E_M);
1117     free(self->l_min_M);
1118     free(self->l_max_M);
1119     free(self->k_min_M);
1120     free(self->k_max_M);
1121   }
1122 
1123 #ifdef COUNT_STATES
1124   if (self->N_M1 != NULL) {
1125     for (i = 1; i < length; i++) {
1126       for (j = i; j <= length; j++) {
1127         ij = indx[i] - j;
1128         if (!self->N_M1[ij])
1129           continue;
1130 
1131         for (cnt1 = self->k_min_M1[ij]; cnt1 <= self->k_max_M1[ij]; cnt1++)
1132           if (self->l_min_M1[ij][cnt1] < INF) {
1133             self->N_M1[ij][cnt1] += self->l_min_M1[ij][cnt1] / 2;
1134             free(self->N_M1[ij][cnt1]);
1135           }
1136 
1137         if (self->k_min_M1[ij] < INF) {
1138           self->N_M1[ij] += self->k_min_M1[ij];
1139           free(self->N_M1[ij]);
1140         }
1141       }
1142     }
1143     free(self->N_M1);
1144   }
1145 
1146 #endif
1147 
1148   if (self->E_M1 != NULL) {
1149     for (i = 1; i < length; i++) {
1150       for (j = i; j <= length; j++) {
1151         ij = indx[i] - j;
1152         if (!self->E_M1[ij])
1153           continue;
1154 
1155         for (cnt1 = self->k_min_M1[ij]; cnt1 <= self->k_max_M1[ij]; cnt1++)
1156           if (self->l_min_M1[ij][cnt1] < INF) {
1157             self->E_M1[ij][cnt1] += self->l_min_M1[ij][cnt1] / 2;
1158             free(self->E_M1[ij][cnt1]);
1159           }
1160 
1161         if (self->k_min_M1[ij] < INF) {
1162           self->E_M1[ij] += self->k_min_M1[ij];
1163           free(self->E_M1[ij]);
1164           self->l_min_M1[ij]  += self->k_min_M1[ij];
1165           self->l_max_M1[ij]  += self->k_min_M1[ij];
1166           free(self->l_min_M1[ij]);
1167           free(self->l_max_M1[ij]);
1168         }
1169       }
1170     }
1171     free(self->E_M1);
1172     free(self->l_min_M1);
1173     free(self->l_max_M1);
1174     free(self->k_min_M1);
1175     free(self->k_max_M1);
1176   }
1177 
1178   if (self->E_M2 != NULL) {
1179     for (i = 1; i < length - turn - 1; i++) {
1180       if (!self->E_M2[i])
1181         continue;
1182 
1183       for (cnt1 = self->k_min_M2[i]; cnt1 <= self->k_max_M2[i]; cnt1++)
1184         if (self->l_min_M2[i][cnt1] < INF) {
1185           self->E_M2[i][cnt1] += self->l_min_M2[i][cnt1] / 2;
1186           free(self->E_M2[i][cnt1]);
1187         }
1188 
1189       if (self->k_min_M2[i] < INF) {
1190         self->E_M2[i] += self->k_min_M2[i];
1191         free(self->E_M2[i]);
1192         self->l_min_M2[i] += self->k_min_M2[i];
1193         self->l_max_M2[i] += self->k_min_M2[i];
1194         free(self->l_min_M2[i]);
1195         free(self->l_max_M2[i]);
1196       }
1197     }
1198     free(self->E_M2);
1199     free(self->l_min_M2);
1200     free(self->l_max_M2);
1201     free(self->k_min_M2);
1202     free(self->k_max_M2);
1203   }
1204 
1205   if (self->E_Fc != NULL) {
1206     for (cnt1 = self->k_min_Fc; cnt1 <= self->k_max_Fc; cnt1++)
1207       if (self->l_min_Fc[cnt1] < INF) {
1208         self->E_Fc[cnt1] += self->l_min_Fc[cnt1] / 2;
1209         free(self->E_Fc[cnt1]);
1210       }
1211 
1212     if (self->k_min_Fc < INF) {
1213       self->E_Fc += self->k_min_Fc;
1214       free(self->E_Fc);
1215       self->l_min_Fc  += self->k_min_Fc;
1216       self->l_max_Fc  += self->k_min_Fc;
1217       free(self->l_min_Fc);
1218       free(self->l_max_Fc);
1219     }
1220   }
1221 
1222   if (self->E_FcI != NULL) {
1223     for (cnt1 = self->k_min_FcI; cnt1 <= self->k_max_FcI; cnt1++)
1224       if (self->l_min_FcI[cnt1] < INF) {
1225         self->E_FcI[cnt1] += self->l_min_FcI[cnt1] / 2;
1226         free(self->E_FcI[cnt1]);
1227       }
1228 
1229     if (self->k_min_FcI < INF) {
1230       self->E_FcI += self->k_min_FcI;
1231       free(self->E_FcI);
1232       self->l_min_FcI += self->k_min_FcI;
1233       self->l_max_FcI += self->k_min_FcI;
1234       free(self->l_min_FcI);
1235       free(self->l_max_FcI);
1236     }
1237   }
1238 
1239   if (self->E_FcH != NULL) {
1240     for (cnt1 = self->k_min_FcH; cnt1 <= self->k_max_FcH; cnt1++)
1241       if (self->l_min_FcH[cnt1] < INF) {
1242         self->E_FcH[cnt1] += self->l_min_FcH[cnt1] / 2;
1243         free(self->E_FcH[cnt1]);
1244       }
1245 
1246     if (self->k_min_FcH < INF) {
1247       self->E_FcH += self->k_min_FcH;
1248       free(self->E_FcH);
1249       self->l_min_FcH += self->k_min_FcH;
1250       self->l_max_FcH += self->k_min_FcH;
1251       free(self->l_min_FcH);
1252       free(self->l_max_FcH);
1253     }
1254   }
1255 
1256   if (self->E_FcM != NULL) {
1257     for (cnt1 = self->k_min_FcM; cnt1 <= self->k_max_FcM; cnt1++)
1258       if (self->l_min_FcM[cnt1] < INF) {
1259         self->E_FcM[cnt1] += self->l_min_FcM[cnt1] / 2;
1260         free(self->E_FcM[cnt1]);
1261       }
1262 
1263     if (self->k_min_FcM < INF) {
1264       self->E_FcM += self->k_min_FcM;
1265       free(self->E_FcM);
1266       self->l_min_FcM += self->k_min_FcM;
1267       self->l_max_FcM += self->k_min_FcM;
1268       free(self->l_min_FcM);
1269       free(self->l_max_FcM);
1270     }
1271   }
1272 
1273   free(self->E_F5_rem);
1274   free(self->E_F3_rem);
1275   free(self->E_C_rem);
1276   free(self->E_M_rem);
1277   free(self->E_M1_rem);
1278   free(self->E_M2_rem);
1279 }
1280 
1281 
1282 PRIVATE void
pf_matrices_alloc_default(vrna_mx_pf_t * vars,unsigned int m,unsigned int alloc_vector)1283 pf_matrices_alloc_default(vrna_mx_pf_t  *vars,
1284                           unsigned int  m,
1285                           unsigned int  alloc_vector)
1286 {
1287   unsigned int n, size, lin_size;
1288 
1289   n         = vars->length;
1290   size      = ((n + 1) * (n + 2)) / 2;
1291   lin_size  = n + 2;
1292 
1293   vars->q     = NULL;
1294   vars->qb    = NULL;
1295   vars->qm    = NULL;
1296   vars->qm1   = NULL;
1297   vars->qm2   = NULL;
1298   vars->probs = NULL;
1299   vars->q1k   = NULL;
1300   vars->qln   = NULL;
1301 
1302   if (alloc_vector & ALLOC_F)
1303     vars->q = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1304 
1305   if (alloc_vector & ALLOC_C)
1306     vars->qb = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1307 
1308   if (alloc_vector & ALLOC_FML)
1309     vars->qm = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1310 
1311   if (alloc_vector & ALLOC_UNIQ)
1312     vars->qm1 = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1313 
1314   if (alloc_vector & ALLOC_CIRC)
1315     vars->qm2 = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
1316 
1317   if (alloc_vector & ALLOC_PROBS)
1318     vars->probs = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1319 
1320   if (alloc_vector & ALLOC_AUX) {
1321     vars->q1k = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
1322     vars->qln = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
1323   }
1324 }
1325 
1326 
1327 PRIVATE void
pf_matrices_free_default(vrna_mx_pf_t * self)1328 pf_matrices_free_default(vrna_mx_pf_t *self)
1329 {
1330   free(self->q);
1331   free(self->qb);
1332   free(self->qm);
1333   free(self->qm1);
1334   free(self->qm2);
1335   free(self->probs);
1336   free(self->G);
1337   free(self->q1k);
1338   free(self->qln);
1339 }
1340 
1341 
1342 PRIVATE void
pf_matrices_alloc_window(vrna_mx_pf_t * vars,unsigned int m,unsigned int alloc_vector)1343 pf_matrices_alloc_window(vrna_mx_pf_t *vars,
1344                          unsigned int m,
1345                          unsigned int alloc_vector)
1346 {
1347   unsigned int n, lin_size;
1348 
1349   n         = vars->length;
1350   lin_size  = n + 2;
1351 
1352   vars->q_local   = NULL;
1353   vars->qb_local  = NULL;
1354   vars->qm_local  = NULL;
1355   vars->qm2_local = NULL;
1356   vars->pR        = NULL;
1357   vars->QI5       = NULL;
1358   vars->q2l       = NULL;
1359   vars->qmb       = NULL;
1360   vars->G_local   = NULL;
1361 
1362   if (alloc_vector & ALLOC_F)
1363     vars->q_local = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1364 
1365   if (alloc_vector & ALLOC_C)
1366     vars->qb_local = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1367 
1368   if (alloc_vector & ALLOC_FML)
1369     vars->qm_local = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1370 
1371   vars->pR = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1372 
1373   if (alloc_vector & ALLOC_PROBS) {
1374     vars->QI5       = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1375     vars->qmb       = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1376     vars->qm2_local = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1377     vars->q2l       = (FLT_OR_DBL **)vrna_alloc(sizeof(FLT_OR_DBL *) * lin_size);
1378   }
1379 }
1380 
1381 
1382 PRIVATE void
pf_matrices_free_window(vrna_mx_pf_t * self,unsigned int length,unsigned int window_size)1383 pf_matrices_free_window(vrna_mx_pf_t  *self,
1384                         unsigned int  length,
1385                         unsigned int  window_size)
1386 {
1387   free(self->q_local);
1388   free(self->qb_local);
1389   free(self->qm_local);
1390   free(self->qm2_local);
1391   free(self->pR);
1392   free(self->QI5);
1393   free(self->q2l);
1394   free(self->qmb);
1395   free(self->G_local);
1396 }
1397 
1398 
1399 PRIVATE void
pf_matrices_alloc_2Dfold(vrna_mx_pf_t * vars,unsigned int m,unsigned int alloc_vector)1400 pf_matrices_alloc_2Dfold(vrna_mx_pf_t *vars,
1401                          unsigned int m,
1402                          unsigned int alloc_vector)
1403 {
1404   unsigned int n, size, lin_size;
1405 
1406   n         = vars->length;
1407   size      = ((n + 1) * (n + 2)) / 2;
1408   lin_size  = n + 2;
1409 
1410   vars->Q       = NULL;
1411   vars->l_min_Q = NULL;
1412   vars->l_max_Q = NULL;
1413   vars->k_min_Q = NULL;
1414   vars->k_max_Q = NULL;
1415   vars->Q_rem   = NULL;
1416 
1417   vars->Q_B       = NULL;
1418   vars->l_min_Q_B = NULL;
1419   vars->l_max_Q_B = NULL;
1420   vars->k_min_Q_B = NULL;
1421   vars->k_max_Q_B = NULL;
1422   vars->Q_B_rem   = NULL;
1423 
1424   vars->Q_M       = NULL;
1425   vars->l_min_Q_M = NULL;
1426   vars->l_max_Q_M = NULL;
1427   vars->k_min_Q_M = NULL;
1428   vars->k_max_Q_M = NULL;
1429   vars->Q_M_rem   = NULL;
1430 
1431   vars->Q_M1        = NULL;
1432   vars->l_min_Q_M1  = NULL;
1433   vars->l_max_Q_M1  = NULL;
1434   vars->k_min_Q_M1  = NULL;
1435   vars->k_max_Q_M1  = NULL;
1436   vars->Q_M1_rem    = NULL;
1437 
1438   vars->Q_M2        = NULL;
1439   vars->l_min_Q_M2  = NULL;
1440   vars->l_max_Q_M2  = NULL;
1441   vars->k_min_Q_M2  = NULL;
1442   vars->k_max_Q_M2  = NULL;
1443   vars->Q_M2_rem    = NULL;
1444 
1445   vars->Q_c       = NULL;
1446   vars->Q_cH      = NULL;
1447   vars->Q_cI      = NULL;
1448   vars->Q_cM      = NULL;
1449   vars->Q_c_rem   = 0.;
1450   vars->Q_cH_rem  = 0.;
1451   vars->Q_cI_rem  = 0.;
1452   vars->Q_cM_rem  = 0.;
1453 
1454   if (alloc_vector & ALLOC_F) {
1455     vars->Q       = (FLT_OR_DBL ***)vrna_alloc(sizeof(FLT_OR_DBL * *) * size);
1456     vars->l_min_Q = (int **)vrna_alloc(sizeof(int *) * size);
1457     vars->l_max_Q = (int **)vrna_alloc(sizeof(int *) * size);
1458     vars->k_min_Q = (int *)vrna_alloc(sizeof(int) * size);
1459     vars->k_max_Q = (int *)vrna_alloc(sizeof(int) * size);
1460     vars->Q_rem   = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1461   }
1462 
1463   if (alloc_vector & ALLOC_C) {
1464     vars->Q_B       = (FLT_OR_DBL ***)vrna_alloc(sizeof(FLT_OR_DBL * *) * size);
1465     vars->l_min_Q_B = (int **)vrna_alloc(sizeof(int *) * size);
1466     vars->l_max_Q_B = (int **)vrna_alloc(sizeof(int *) * size);
1467     vars->k_min_Q_B = (int *)vrna_alloc(sizeof(int) * size);
1468     vars->k_max_Q_B = (int *)vrna_alloc(sizeof(int) * size);
1469     vars->Q_B_rem   = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1470   }
1471 
1472   if (alloc_vector & ALLOC_FML) {
1473     vars->Q_M       = (FLT_OR_DBL ***)vrna_alloc(sizeof(FLT_OR_DBL * *) * size);
1474     vars->l_min_Q_M = (int **)vrna_alloc(sizeof(int *) * size);
1475     vars->l_max_Q_M = (int **)vrna_alloc(sizeof(int *) * size);
1476     vars->k_min_Q_M = (int *)vrna_alloc(sizeof(int) * size);
1477     vars->k_max_Q_M = (int *)vrna_alloc(sizeof(int) * size);
1478     vars->Q_M_rem   = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1479   }
1480 
1481   if (alloc_vector & ALLOC_UNIQ) {
1482     vars->Q_M1        = (FLT_OR_DBL ***)vrna_alloc(sizeof(FLT_OR_DBL * *) * size);
1483     vars->l_min_Q_M1  = (int **)vrna_alloc(sizeof(int *) * size);
1484     vars->l_max_Q_M1  = (int **)vrna_alloc(sizeof(int *) * size);
1485     vars->k_min_Q_M1  = (int *)vrna_alloc(sizeof(int) * size);
1486     vars->k_max_Q_M1  = (int *)vrna_alloc(sizeof(int) * size);
1487     vars->Q_M1_rem    = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * size);
1488   }
1489 
1490   if (alloc_vector & ALLOC_CIRC) {
1491     vars->Q_M2        = (FLT_OR_DBL ***)vrna_alloc(sizeof(FLT_OR_DBL * *) * lin_size);
1492     vars->l_min_Q_M2  = (int **)vrna_alloc(sizeof(int *) * lin_size);
1493     vars->l_max_Q_M2  = (int **)vrna_alloc(sizeof(int *) * lin_size);
1494     vars->k_min_Q_M2  = (int *)vrna_alloc(sizeof(int) * lin_size);
1495     vars->k_max_Q_M2  = (int *)vrna_alloc(sizeof(int) * lin_size);
1496     vars->Q_M2_rem    = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * lin_size);
1497   }
1498 }
1499 
1500 
1501 PRIVATE void
pf_matrices_free_2Dfold(vrna_mx_pf_t * self,unsigned int length,int turn,int * indx,int * jindx)1502 pf_matrices_free_2Dfold(vrna_mx_pf_t  *self,
1503                         unsigned int  length,
1504                         int           turn,
1505                         int           *indx,
1506                         int           *jindx)
1507 {
1508   unsigned int  i, j, ij;
1509   int           cnt1;
1510 
1511   /* This will be some fun... */
1512   if (self->Q != NULL) {
1513     for (i = 1; i <= length; i++) {
1514       for (j = i; j <= length; j++) {
1515         ij = indx[i] - j;
1516         if (!self->Q[ij])
1517           continue;
1518 
1519         for (cnt1 = self->k_min_Q[ij]; cnt1 <= self->k_max_Q[ij]; cnt1++)
1520           if (self->l_min_Q[ij][cnt1] < INF) {
1521             self->Q[ij][cnt1] += self->l_min_Q[ij][cnt1] / 2;
1522             free(self->Q[ij][cnt1]);
1523           }
1524 
1525         if (self->k_min_Q[ij] < INF) {
1526           self->Q[ij] += self->k_min_Q[ij];
1527           free(self->Q[ij]);
1528           self->l_min_Q[ij] += self->k_min_Q[ij];
1529           self->l_max_Q[ij] += self->k_min_Q[ij];
1530           free(self->l_min_Q[ij]);
1531           free(self->l_max_Q[ij]);
1532         }
1533       }
1534     }
1535   }
1536 
1537   free(self->Q);
1538   free(self->l_min_Q);
1539   free(self->l_max_Q);
1540   free(self->k_min_Q);
1541   free(self->k_max_Q);
1542 
1543   if (self->Q_B != NULL) {
1544     for (i = 1; i < length; i++) {
1545       for (j = i; j <= length; j++) {
1546         ij = indx[i] - j;
1547         if (!self->Q_B[ij])
1548           continue;
1549 
1550         for (cnt1 = self->k_min_Q_B[ij]; cnt1 <= self->k_max_Q_B[ij]; cnt1++)
1551           if (self->l_min_Q_B[ij][cnt1] < INF) {
1552             self->Q_B[ij][cnt1] += self->l_min_Q_B[ij][cnt1] / 2;
1553             free(self->Q_B[ij][cnt1]);
1554           }
1555 
1556         if (self->k_min_Q_B[ij] < INF) {
1557           self->Q_B[ij] += self->k_min_Q_B[ij];
1558           free(self->Q_B[ij]);
1559           self->l_min_Q_B[ij] += self->k_min_Q_B[ij];
1560           self->l_max_Q_B[ij] += self->k_min_Q_B[ij];
1561           free(self->l_min_Q_B[ij]);
1562           free(self->l_max_Q_B[ij]);
1563         }
1564       }
1565     }
1566   }
1567 
1568   free(self->Q_B);
1569   free(self->l_min_Q_B);
1570   free(self->l_max_Q_B);
1571   free(self->k_min_Q_B);
1572   free(self->k_max_Q_B);
1573 
1574   if (self->Q_M != NULL) {
1575     for (i = 1; i < length; i++) {
1576       for (j = i; j <= length; j++) {
1577         ij = indx[i] - j;
1578         if (!self->Q_M[ij])
1579           continue;
1580 
1581         for (cnt1 = self->k_min_Q_M[ij]; cnt1 <= self->k_max_Q_M[ij]; cnt1++)
1582           if (self->l_min_Q_M[ij][cnt1] < INF) {
1583             self->Q_M[ij][cnt1] += self->l_min_Q_M[ij][cnt1] / 2;
1584             free(self->Q_M[ij][cnt1]);
1585           }
1586 
1587         if (self->k_min_Q_M[ij] < INF) {
1588           self->Q_M[ij] += self->k_min_Q_M[ij];
1589           free(self->Q_M[ij]);
1590           self->l_min_Q_M[ij] += self->k_min_Q_M[ij];
1591           self->l_max_Q_M[ij] += self->k_min_Q_M[ij];
1592           free(self->l_min_Q_M[ij]);
1593           free(self->l_max_Q_M[ij]);
1594         }
1595       }
1596     }
1597   }
1598 
1599   free(self->Q_M);
1600   free(self->l_min_Q_M);
1601   free(self->l_max_Q_M);
1602   free(self->k_min_Q_M);
1603   free(self->k_max_Q_M);
1604 
1605   if (self->Q_M1 != NULL) {
1606     for (i = 1; i < length; i++) {
1607       for (j = i; j <= length; j++) {
1608         ij = jindx[j] + i;
1609         if (!self->Q_M1[ij])
1610           continue;
1611 
1612         for (cnt1 = self->k_min_Q_M1[ij]; cnt1 <= self->k_max_Q_M1[ij]; cnt1++)
1613           if (self->l_min_Q_M1[ij][cnt1] < INF) {
1614             self->Q_M1[ij][cnt1] += self->l_min_Q_M1[ij][cnt1] / 2;
1615             free(self->Q_M1[ij][cnt1]);
1616           }
1617 
1618         if (self->k_min_Q_M1[ij] < INF) {
1619           self->Q_M1[ij] += self->k_min_Q_M1[ij];
1620           free(self->Q_M1[ij]);
1621           self->l_min_Q_M1[ij]  += self->k_min_Q_M1[ij];
1622           self->l_max_Q_M1[ij]  += self->k_min_Q_M1[ij];
1623           free(self->l_min_Q_M1[ij]);
1624           free(self->l_max_Q_M1[ij]);
1625         }
1626       }
1627     }
1628   }
1629 
1630   free(self->Q_M1);
1631   free(self->l_min_Q_M1);
1632   free(self->l_max_Q_M1);
1633   free(self->k_min_Q_M1);
1634   free(self->k_max_Q_M1);
1635 
1636   if (self->Q_M2 != NULL) {
1637     for (i = 1; i < length - turn - 1; i++) {
1638       if (!self->Q_M2[i])
1639         continue;
1640 
1641       for (cnt1 = self->k_min_Q_M2[i]; cnt1 <= self->k_max_Q_M2[i]; cnt1++)
1642         if (self->l_min_Q_M2[i][cnt1] < INF) {
1643           self->Q_M2[i][cnt1] += self->l_min_Q_M2[i][cnt1] / 2;
1644           free(self->Q_M2[i][cnt1]);
1645         }
1646 
1647       if (self->k_min_Q_M2[i] < INF) {
1648         self->Q_M2[i] += self->k_min_Q_M2[i];
1649         free(self->Q_M2[i]);
1650         self->l_min_Q_M2[i] += self->k_min_Q_M2[i];
1651         self->l_max_Q_M2[i] += self->k_min_Q_M2[i];
1652         free(self->l_min_Q_M2[i]);
1653         free(self->l_max_Q_M2[i]);
1654       }
1655     }
1656   }
1657 
1658   free(self->Q_M2);
1659   free(self->l_min_Q_M2);
1660   free(self->l_max_Q_M2);
1661   free(self->k_min_Q_M2);
1662   free(self->k_max_Q_M2);
1663 
1664   if (self->Q_c != NULL) {
1665     for (cnt1 = self->k_min_Q_c; cnt1 <= self->k_max_Q_c; cnt1++)
1666       if (self->l_min_Q_c[cnt1] < INF) {
1667         self->Q_c[cnt1] += self->l_min_Q_c[cnt1] / 2;
1668         free(self->Q_c[cnt1]);
1669       }
1670 
1671     if (self->k_min_Q_c < INF) {
1672       self->Q_c += self->k_min_Q_c;
1673       free(self->Q_c);
1674       self->l_min_Q_c += self->k_min_Q_c;
1675       self->l_max_Q_c += self->k_min_Q_c;
1676       free(self->l_min_Q_c);
1677       free(self->l_max_Q_c);
1678     }
1679   }
1680 
1681   if (self->Q_cI != NULL) {
1682     for (cnt1 = self->k_min_Q_cI; cnt1 <= self->k_max_Q_cI; cnt1++)
1683       if (self->l_min_Q_cI[cnt1] < INF) {
1684         self->Q_cI[cnt1] += self->l_min_Q_cI[cnt1] / 2;
1685         free(self->Q_cI[cnt1]);
1686       }
1687 
1688     if (self->k_min_Q_cI < INF) {
1689       self->Q_cI += self->k_min_Q_cI;
1690       free(self->Q_cI);
1691       self->l_min_Q_cI  += self->k_min_Q_cI;
1692       self->l_max_Q_cI  += self->k_min_Q_cI;
1693       free(self->l_min_Q_cI);
1694       free(self->l_max_Q_cI);
1695     }
1696   }
1697 
1698   if (self->Q_cH != NULL) {
1699     for (cnt1 = self->k_min_Q_cH; cnt1 <= self->k_max_Q_cH; cnt1++)
1700       if (self->l_min_Q_cH[cnt1] < INF) {
1701         self->Q_cH[cnt1] += self->l_min_Q_cH[cnt1] / 2;
1702         free(self->Q_cH[cnt1]);
1703       }
1704 
1705     if (self->k_min_Q_cH < INF) {
1706       self->Q_cH += self->k_min_Q_cH;
1707       free(self->Q_cH);
1708       self->l_min_Q_cH  += self->k_min_Q_cH;
1709       self->l_max_Q_cH  += self->k_min_Q_cH;
1710       free(self->l_min_Q_cH);
1711       free(self->l_max_Q_cH);
1712     }
1713   }
1714 
1715   if (self->Q_cM != NULL) {
1716     for (cnt1 = self->k_min_Q_cM; cnt1 <= self->k_max_Q_cM; cnt1++)
1717       if (self->l_min_Q_cM[cnt1] < INF) {
1718         self->Q_cM[cnt1] += self->l_min_Q_cM[cnt1] / 2;
1719         free(self->Q_cM[cnt1]);
1720       }
1721 
1722     if (self->k_min_Q_cM < INF) {
1723       self->Q_cM += self->k_min_Q_cM;
1724       free(self->Q_cM);
1725       self->l_min_Q_cM  += self->k_min_Q_cM;
1726       self->l_max_Q_cM  += self->k_min_Q_cM;
1727       free(self->l_min_Q_cM);
1728       free(self->l_max_Q_cM);
1729     }
1730   }
1731 
1732   free(self->Q_rem);
1733   free(self->Q_B_rem);
1734   free(self->Q_M_rem);
1735   free(self->Q_M1_rem);
1736   free(self->Q_M2_rem);
1737 }
1738