1 #include <stdlib.h>
2 #include <string.h>
3 #include <stdio.h>
4 #include <assert.h>
5 #include <limits.h>
6 #include <math.h>
7 #ifdef HAVE_PTHREAD
8 #include <pthread.h>
9 #endif
10 
11 #include "kstring.h"
12 #include "bwamem.h"
13 #include "bntseq.h"
14 #include "ksw.h"
15 #include "kvec.h"
16 #include "ksort.h"
17 #include "utils.h"
18 
19 #ifdef USE_MALLOC_WRAPPERS
20 #  include "malloc_wrap.h"
21 #endif
22 
23 /* Theory on probability and scoring *ungapped* alignment
24  *
25  * s'(a,b) = log[P(b|a)/P(b)] = log[4P(b|a)], assuming uniform base distribution
26  * s'(a,a) = log(4), s'(a,b) = log(4e/3), where e is the error rate
27  *
28  * Scale s'(a,b) to s(a,a) s.t. s(a,a)=x. Then s(a,b) = x*s'(a,b)/log(4), or conversely: s'(a,b)=s(a,b)*log(4)/x
29  *
30  * If the matching score is x and mismatch penalty is -y, we can compute error rate e:
31  *   e = .75 * exp[-log(4) * y/x]
32  *
33  * log P(seq) = \sum_i log P(b_i|a_i) = \sum_i {s'(a,b) - log(4)}
34  *   = \sum_i { s(a,b)*log(4)/x - log(4) } = log(4) * (S/x - l)
35  *
36  * where S=\sum_i s(a,b) is the alignment score. Converting to the phred scale:
37  *   Q(seq) = -10/log(10) * log P(seq) = 10*log(4)/log(10) * (l - S/x) = 6.02 * (l - S/x)
38  *
39  *
40  * Gap open (zero gap): q' = log[P(gap-open)], r' = log[P(gap-ext)] (see Durbin et al. (1998) Section 4.1)
41  * Then q = x*log[P(gap-open)]/log(4), r = x*log[P(gap-ext)]/log(4)
42  *
43  * When there are gaps, l should be the length of alignment matches (i.e. the M operator in CIGAR)
44  */
45 
46 static const bntseq_t *global_bns = 0; // for debugging only
47 
mem_opt_init()48 mem_opt_t *mem_opt_init()
49 {
50 	mem_opt_t *o;
51 	o = calloc(1, sizeof(mem_opt_t));
52 	o->flag = 0;
53 	o->a = 1; o->b = 4;
54 	o->o_del = o->o_ins = 6;
55 	o->e_del = o->e_ins = 1;
56 	o->w = 100;
57 	o->T = 30;
58 	o->zdrop = 100;
59 	o->pen_unpaired = 17;
60 	o->pen_clip5 = o->pen_clip3 = 5;
61 
62 	o->max_mem_intv = 20;
63 
64 	o->min_seed_len = 19;
65 	o->split_width = 10;
66 	o->max_occ = 500;
67 	o->max_chain_gap = 10000;
68 	o->max_ins = 10000;
69 	o->mask_level = 0.50;
70 	o->drop_ratio = 0.50;
71 	o->XA_drop_ratio = 0.80;
72 	o->split_factor = 1.5;
73 	o->chunk_size = 10000000;
74 	o->n_threads = 1;
75 	o->max_XA_hits = 5;
76 	o->max_XA_hits_alt = 200;
77 	o->max_matesw = 50;
78 	o->mask_level_redun = 0.95;
79 	o->min_chain_weight = 0;
80 	o->max_chain_extend = 1<<30;
81 	o->mapQ_coef_len = 50; o->mapQ_coef_fac = log(o->mapQ_coef_len);
82 	bwa_fill_scmat(o->a, o->b, o->mat);
83 	return o;
84 }
85 
86 /***************************
87  * Collection SA invervals *
88  ***************************/
89 
90 #define intv_lt(a, b) ((a).info < (b).info)
91 KSORT_INIT(mem_intv, bwtintv_t, intv_lt)
92 
93 typedef struct {
94 	bwtintv_v mem, mem1, *tmpv[2];
95 } smem_aux_t;
96 
smem_aux_init()97 static smem_aux_t *smem_aux_init()
98 {
99 	smem_aux_t *a;
100 	a = calloc(1, sizeof(smem_aux_t));
101 	a->tmpv[0] = calloc(1, sizeof(bwtintv_v));
102 	a->tmpv[1] = calloc(1, sizeof(bwtintv_v));
103 	return a;
104 }
105 
smem_aux_destroy(smem_aux_t * a)106 static void smem_aux_destroy(smem_aux_t *a)
107 {
108 	free(a->tmpv[0]->a); free(a->tmpv[0]);
109 	free(a->tmpv[1]->a); free(a->tmpv[1]);
110 	free(a->mem.a); free(a->mem1.a);
111 	free(a);
112 }
113 
mem_collect_intv(const mem_opt_t * opt,const bwt_t * bwt,int len,const uint8_t * seq,smem_aux_t * a)114 static void mem_collect_intv(const mem_opt_t *opt, const bwt_t *bwt, int len, const uint8_t *seq, smem_aux_t *a)
115 {
116 	int i, k, x = 0, old_n;
117 	int start_width = 1;
118 	int split_len = (int)(opt->min_seed_len * opt->split_factor + .499);
119 	a->mem.n = 0;
120 	// first pass: find all SMEMs
121 	while (x < len) {
122 		if (seq[x] < 4) {
123 			x = bwt_smem1(bwt, len, seq, x, start_width, &a->mem1, a->tmpv);
124 			for (i = 0; i < a->mem1.n; ++i) {
125 				bwtintv_t *p = &a->mem1.a[i];
126 				int slen = (uint32_t)p->info - (p->info>>32); // seed length
127 				if (slen >= opt->min_seed_len)
128 					kv_push(bwtintv_t, a->mem, *p);
129 			}
130 		} else ++x;
131 	}
132 	// second pass: find MEMs inside a long SMEM
133 	old_n = a->mem.n;
134 	for (k = 0; k < old_n; ++k) {
135 		bwtintv_t *p = &a->mem.a[k];
136 		int start = p->info>>32, end = (int32_t)p->info;
137 		if (end - start < split_len || p->x[2] > opt->split_width) continue;
138 		bwt_smem1(bwt, len, seq, (start + end)>>1, p->x[2]+1, &a->mem1, a->tmpv);
139 		for (i = 0; i < a->mem1.n; ++i)
140 			if ((uint32_t)a->mem1.a[i].info - (a->mem1.a[i].info>>32) >= opt->min_seed_len)
141 				kv_push(bwtintv_t, a->mem, a->mem1.a[i]);
142 	}
143 	// third pass: LAST-like
144 	if (opt->max_mem_intv > 0) {
145 		x = 0;
146 		while (x < len) {
147 			if (seq[x] < 4) {
148 				if (1) {
149 					bwtintv_t m;
150 					x = bwt_seed_strategy1(bwt, len, seq, x, opt->min_seed_len, opt->max_mem_intv, &m);
151 					if (m.x[2] > 0) kv_push(bwtintv_t, a->mem, m);
152 				} else { // for now, we never come to this block which is slower
153 					x = bwt_smem1a(bwt, len, seq, x, start_width, opt->max_mem_intv, &a->mem1, a->tmpv);
154 					for (i = 0; i < a->mem1.n; ++i)
155 						kv_push(bwtintv_t, a->mem, a->mem1.a[i]);
156 				}
157 			} else ++x;
158 		}
159 	}
160 	// sort
161 	ks_introsort(mem_intv, a->mem.n, a->mem.a);
162 }
163 
164 /************
165  * Chaining *
166  ************/
167 
168 typedef struct {
169 	int64_t rbeg;
170 	int32_t qbeg, len;
171 	int score;
172 } mem_seed_t; // unaligned memory
173 
174 typedef struct {
175 	int n, m, first, rid;
176 	uint32_t w:29, kept:2, is_alt:1;
177 	float frac_rep;
178 	int64_t pos;
179 	mem_seed_t *seeds;
180 } mem_chain_t;
181 
182 typedef struct { size_t n, m; mem_chain_t *a;  } mem_chain_v;
183 
184 #include "kbtree.h"
185 
186 #define chain_cmp(a, b) (((b).pos < (a).pos) - ((a).pos < (b).pos))
KBTREE_INIT(chn,mem_chain_t,chain_cmp)187 KBTREE_INIT(chn, mem_chain_t, chain_cmp)
188 
189 // return 1 if the seed is merged into the chain
190 static int test_and_merge(const mem_opt_t *opt, int64_t l_pac, mem_chain_t *c, const mem_seed_t *p, int seed_rid)
191 {
192 	int64_t qend, rend, x, y;
193 	const mem_seed_t *last = &c->seeds[c->n-1];
194 	qend = last->qbeg + last->len;
195 	rend = last->rbeg + last->len;
196 	if (seed_rid != c->rid) return 0; // different chr; request a new chain
197 	if (p->qbeg >= c->seeds[0].qbeg && p->qbeg + p->len <= qend && p->rbeg >= c->seeds[0].rbeg && p->rbeg + p->len <= rend)
198 		return 1; // contained seed; do nothing
199 	if ((last->rbeg < l_pac || c->seeds[0].rbeg < l_pac) && p->rbeg >= l_pac) return 0; // don't chain if on different strand
200 	x = p->qbeg - last->qbeg; // always non-negtive
201 	y = p->rbeg - last->rbeg;
202 	if (y >= 0 && x - y <= opt->w && y - x <= opt->w && x - last->len < opt->max_chain_gap && y - last->len < opt->max_chain_gap) { // grow the chain
203 		if (c->n == c->m) {
204 			c->m <<= 1;
205 			c->seeds = realloc(c->seeds, c->m * sizeof(mem_seed_t));
206 		}
207 		c->seeds[c->n++] = *p;
208 		return 1;
209 	}
210 	return 0; // request to add a new chain
211 }
212 
mem_chain_weight(const mem_chain_t * c)213 int mem_chain_weight(const mem_chain_t *c)
214 {
215 	int64_t end;
216 	int j, w = 0, tmp;
217 	for (j = 0, end = 0; j < c->n; ++j) {
218 		const mem_seed_t *s = &c->seeds[j];
219 		if (s->qbeg >= end) w += s->len;
220 		else if (s->qbeg + s->len > end) w += s->qbeg + s->len - end;
221 		end = end > s->qbeg + s->len? end : s->qbeg + s->len;
222 	}
223 	tmp = w; w = 0;
224 	for (j = 0, end = 0; j < c->n; ++j) {
225 		const mem_seed_t *s = &c->seeds[j];
226 		if (s->rbeg >= end) w += s->len;
227 		else if (s->rbeg + s->len > end) w += s->rbeg + s->len - end;
228 		end = end > s->rbeg + s->len? end : s->rbeg + s->len;
229 	}
230 	w = w < tmp? w : tmp;
231 	return w < 1<<30? w : (1<<30)-1;
232 }
233 
mem_print_chain(const bntseq_t * bns,mem_chain_v * chn)234 void mem_print_chain(const bntseq_t *bns, mem_chain_v *chn)
235 {
236 	int i, j;
237 	for (i = 0; i < chn->n; ++i) {
238 		mem_chain_t *p = &chn->a[i];
239 		err_printf("* Found CHAIN(%d): n=%d; weight=%d", i, p->n, mem_chain_weight(p));
240 		for (j = 0; j < p->n; ++j) {
241 			bwtint_t pos;
242 			int is_rev;
243 			pos = bns_depos(bns, p->seeds[j].rbeg, &is_rev);
244 			if (is_rev) pos -= p->seeds[j].len - 1;
245 			err_printf("\t%d;%d;%d,%ld(%s:%c%ld)", p->seeds[j].score, p->seeds[j].len, p->seeds[j].qbeg, (long)p->seeds[j].rbeg, bns->anns[p->rid].name, "+-"[is_rev], (long)(pos - bns->anns[p->rid].offset) + 1);
246 		}
247 		err_putchar('\n');
248 	}
249 }
250 
mem_chain(const mem_opt_t * opt,const bwt_t * bwt,const bntseq_t * bns,int len,const uint8_t * seq,void * buf)251 mem_chain_v mem_chain(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, int len, const uint8_t *seq, void *buf)
252 {
253 	int i, b, e, l_rep;
254 	int64_t l_pac = bns->l_pac;
255 	mem_chain_v chain;
256 	kbtree_t(chn) *tree;
257 	smem_aux_t *aux;
258 
259 	kv_init(chain);
260 	if (len < opt->min_seed_len) return chain; // if the query is shorter than the seed length, no match
261 	tree = kb_init(chn, KB_DEFAULT_SIZE);
262 
263 	aux = buf? (smem_aux_t*)buf : smem_aux_init();
264 	mem_collect_intv(opt, bwt, len, seq, aux);
265 	for (i = 0, b = e = l_rep = 0; i < aux->mem.n; ++i) { // compute frac_rep
266 		bwtintv_t *p = &aux->mem.a[i];
267 		int sb = (p->info>>32), se = (uint32_t)p->info;
268 		if (p->x[2] <= opt->max_occ) continue;
269 		if (sb > e) l_rep += e - b, b = sb, e = se;
270 		else e = e > se? e : se;
271 	}
272 	l_rep += e - b;
273 	for (i = 0; i < aux->mem.n; ++i) {
274 		bwtintv_t *p = &aux->mem.a[i];
275 		int step, count, slen = (uint32_t)p->info - (p->info>>32); // seed length
276 		int64_t k;
277 		// if (slen < opt->min_seed_len) continue; // ignore if too short or too repetitive
278 		step = p->x[2] > opt->max_occ? p->x[2] / opt->max_occ : 1;
279 		for (k = count = 0; k < p->x[2] && count < opt->max_occ; k += step, ++count) {
280 			mem_chain_t tmp, *lower, *upper;
281 			mem_seed_t s;
282 			int rid, to_add = 0;
283 			s.rbeg = tmp.pos = bwt_sa(bwt, p->x[0] + k); // this is the base coordinate in the forward-reverse reference
284 			s.qbeg = p->info>>32;
285 			s.score= s.len = slen;
286 			rid = bns_intv2rid(bns, s.rbeg, s.rbeg + s.len);
287 			if (rid < 0) continue; // bridging multiple reference sequences or the forward-reverse boundary; TODO: split the seed; don't discard it!!!
288 			if (kb_size(tree)) {
289 				kb_intervalp(chn, tree, &tmp, &lower, &upper); // find the closest chain
290 				if (!lower || !test_and_merge(opt, l_pac, lower, &s, rid)) to_add = 1;
291 			} else to_add = 1;
292 			if (to_add) { // add the seed as a new chain
293 				tmp.n = 1; tmp.m = 4;
294 				tmp.seeds = calloc(tmp.m, sizeof(mem_seed_t));
295 				tmp.seeds[0] = s;
296 				tmp.rid = rid;
297 				tmp.is_alt = !!bns->anns[rid].is_alt;
298 				kb_putp(chn, tree, &tmp);
299 			}
300 		}
301 	}
302 	if (buf == 0) smem_aux_destroy(aux);
303 
304 	kv_resize(mem_chain_t, chain, kb_size(tree));
305 
306 	#define traverse_func(p_) (chain.a[chain.n++] = *(p_))
307 	__kb_traverse(mem_chain_t, tree, traverse_func);
308 	#undef traverse_func
309 
310 	for (i = 0; i < chain.n; ++i) chain.a[i].frac_rep = (float)l_rep / len;
311 	if (bwa_verbose >= 4) printf("* fraction of repetitive seeds: %.3f\n", (float)l_rep / len);
312 
313 	kb_destroy(chn, tree);
314 	return chain;
315 }
316 
317 /********************
318  * Filtering chains *
319  ********************/
320 
321 #define chn_beg(ch) ((ch).seeds->qbeg)
322 #define chn_end(ch) ((ch).seeds[(ch).n-1].qbeg + (ch).seeds[(ch).n-1].len)
323 
324 #define flt_lt(a, b) ((a).w > (b).w)
KSORT_INIT(mem_flt,mem_chain_t,flt_lt)325 KSORT_INIT(mem_flt, mem_chain_t, flt_lt)
326 
327 int mem_chain_flt(const mem_opt_t *opt, int n_chn, mem_chain_t *a)
328 {
329 	int i, k;
330 	kvec_t(int) chains = {0,0,0}; // this keeps int indices of the non-overlapping chains
331 	if (n_chn == 0) return 0; // no need to filter
332 	// compute the weight of each chain and drop chains with small weight
333 	for (i = k = 0; i < n_chn; ++i) {
334 		mem_chain_t *c = &a[i];
335 		c->first = -1; c->kept = 0;
336 		c->w = mem_chain_weight(c);
337 		if (c->w < opt->min_chain_weight) free(c->seeds);
338 		else a[k++] = *c;
339 	}
340 	n_chn = k;
341 	ks_introsort(mem_flt, n_chn, a);
342 	// pairwise chain comparisons
343 	a[0].kept = 3;
344 	kv_push(int, chains, 0);
345 	for (i = 1; i < n_chn; ++i) {
346 		int large_ovlp = 0;
347 		for (k = 0; k < chains.n; ++k) {
348 			int j = chains.a[k];
349 			int b_max = chn_beg(a[j]) > chn_beg(a[i])? chn_beg(a[j]) : chn_beg(a[i]);
350 			int e_min = chn_end(a[j]) < chn_end(a[i])? chn_end(a[j]) : chn_end(a[i]);
351 			if (e_min > b_max && (!a[j].is_alt || a[i].is_alt)) { // have overlap; don't consider ovlp where the kept chain is ALT while the current chain is primary
352 				int li = chn_end(a[i]) - chn_beg(a[i]);
353 				int lj = chn_end(a[j]) - chn_beg(a[j]);
354 				int min_l = li < lj? li : lj;
355 				if (e_min - b_max >= min_l * opt->mask_level && min_l < opt->max_chain_gap) { // significant overlap
356 					large_ovlp = 1;
357 					if (a[j].first < 0) a[j].first = i; // keep the first shadowed hit s.t. mapq can be more accurate
358 					if (a[i].w < a[j].w * opt->drop_ratio && a[j].w - a[i].w >= opt->min_seed_len<<1)
359 						break;
360 				}
361 			}
362 		}
363 		if (k == chains.n) {
364 			kv_push(int, chains, i);
365 			a[i].kept = large_ovlp? 2 : 3;
366 		}
367 	}
368 	for (i = 0; i < chains.n; ++i) {
369 		mem_chain_t *c = &a[chains.a[i]];
370 		if (c->first >= 0) a[c->first].kept = 1;
371 	}
372 	free(chains.a);
373 	for (i = k = 0; i < n_chn; ++i) { // don't extend more than opt->max_chain_extend .kept=1/2 chains
374 		if (a[i].kept == 0 || a[i].kept == 3) continue;
375 		if (++k >= opt->max_chain_extend) break;
376 	}
377 	for (; i < n_chn; ++i)
378 		if (a[i].kept < 3) a[i].kept = 0;
379 	for (i = k = 0; i < n_chn; ++i) { // free discarded chains
380 		mem_chain_t *c = &a[i];
381 		if (c->kept == 0) free(c->seeds);
382 		else a[k++] = a[i];
383 	}
384 	return k;
385 }
386 
387 /******************************
388  * De-overlap single-end hits *
389  ******************************/
390 
391 #define alnreg_slt2(a, b) ((a).re < (b).re)
KSORT_INIT(mem_ars2,mem_alnreg_t,alnreg_slt2)392 KSORT_INIT(mem_ars2, mem_alnreg_t, alnreg_slt2)
393 
394 #define alnreg_slt(a, b) ((a).score > (b).score || ((a).score == (b).score && ((a).rb < (b).rb || ((a).rb == (b).rb && (a).qb < (b).qb))))
395 KSORT_INIT(mem_ars, mem_alnreg_t, alnreg_slt)
396 
397 #define alnreg_hlt(a, b)  ((a).score > (b).score || ((a).score == (b).score && ((a).is_alt < (b).is_alt || ((a).is_alt == (b).is_alt && (a).hash < (b).hash))))
398 KSORT_INIT(mem_ars_hash, mem_alnreg_t, alnreg_hlt)
399 
400 #define alnreg_hlt2(a, b) ((a).is_alt < (b).is_alt || ((a).is_alt == (b).is_alt && ((a).score > (b).score || ((a).score == (b).score && (a).hash < (b).hash))))
401 KSORT_INIT(mem_ars_hash2, mem_alnreg_t, alnreg_hlt2)
402 
403 #define PATCH_MAX_R_BW 0.05f
404 #define PATCH_MIN_SC_RATIO 0.90f
405 
406 int mem_patch_reg(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, uint8_t *query, const mem_alnreg_t *a, const mem_alnreg_t *b, int *_w)
407 {
408 	int w, score, q_s, r_s;
409 	double r;
410 	if (bns == 0 || pac == 0 || query == 0) return 0;
411 	assert(a->rid == b->rid && a->rb <= b->rb);
412 	if (a->rb < bns->l_pac && b->rb >= bns->l_pac) return 0; // on different strands
413 	if (a->qb >= b->qb || a->qe >= b->qe || a->re >= b->re) return 0; // not colinear
414 	w = (a->re - b->rb) - (a->qe - b->qb); // required bandwidth
415 	w = w > 0? w : -w; // l = abs(l)
416 	r = (double)(a->re - b->rb) / (b->re - a->rb) - (double)(a->qe - b->qb) / (b->qe - a->qb); // relative bandwidth
417 	r = r > 0.? r : -r; // r = fabs(r)
418 	if (bwa_verbose >= 4)
419 		printf("* potential hit merge between [%d,%d)<=>[%ld,%ld) and [%d,%d)<=>[%ld,%ld), @ %s; w=%d, r=%.4g\n",
420 			   a->qb, a->qe, (long)a->rb, (long)a->re, b->qb, b->qe, (long)b->rb, (long)b->re, bns->anns[a->rid].name, w, r);
421 	if (a->re < b->rb || a->qe < b->qb) { // no overlap on query or on ref
422 		if (w > opt->w<<1 || r >= PATCH_MAX_R_BW) return 0; // the bandwidth or the relative bandwidth is too large
423 	} else if (w > opt->w<<2 || r >= PATCH_MAX_R_BW*2) return 0; // more permissive if overlapping on both ref and query
424 	// global alignment
425 	w += a->w + b->w;
426 	w = w < opt->w<<2? w : opt->w<<2;
427 	if (bwa_verbose >= 4) printf("* test potential hit merge with global alignment; w=%d\n", w);
428 	bwa_gen_cigar2(opt->mat, opt->o_del, opt->e_del, opt->o_ins, opt->e_ins, w, bns->l_pac, pac, b->qe - a->qb, query + a->qb, a->rb, b->re, &score, 0, 0);
429 	q_s = (int)((double)(b->qe - a->qb) / ((b->qe - b->qb) + (a->qe - a->qb)) * (b->score + a->score) + .499); // predicted score from query
430 	r_s = (int)((double)(b->re - a->rb) / ((b->re - b->rb) + (a->re - a->rb)) * (b->score + a->score) + .499); // predicted score from ref
431 	if (bwa_verbose >= 4) printf("* score=%d;(%d,%d)\n", score, q_s, r_s);
432 	if ((double)score / (q_s > r_s? q_s : r_s) < PATCH_MIN_SC_RATIO) return 0;
433 	*_w = w;
434 	return score;
435 }
436 
mem_sort_dedup_patch(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,uint8_t * query,int n,mem_alnreg_t * a)437 int mem_sort_dedup_patch(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, uint8_t *query, int n, mem_alnreg_t *a)
438 {
439 	int m, i, j;
440 	if (n <= 1) return n;
441 	ks_introsort(mem_ars2, n, a); // sort by the END position, not START!
442 	for (i = 0; i < n; ++i) a[i].n_comp = 1;
443 	for (i = 1; i < n; ++i) {
444 		mem_alnreg_t *p = &a[i];
445 		if (p->rid != a[i-1].rid || p->rb >= a[i-1].re + opt->max_chain_gap) continue; // then no need to go into the loop below
446 		for (j = i - 1; j >= 0 && p->rid == a[j].rid && p->rb < a[j].re + opt->max_chain_gap; --j) {
447 			mem_alnreg_t *q = &a[j];
448 			int64_t or, oq, mr, mq;
449 			int score, w;
450 			if (q->qe == q->qb) continue; // a[j] has been excluded
451 			or = q->re - p->rb; // overlap length on the reference
452 			oq = q->qb < p->qb? q->qe - p->qb : p->qe - q->qb; // overlap length on the query
453 			mr = q->re - q->rb < p->re - p->rb? q->re - q->rb : p->re - p->rb; // min ref len in alignment
454 			mq = q->qe - q->qb < p->qe - p->qb? q->qe - q->qb : p->qe - p->qb; // min qry len in alignment
455 			if (or > opt->mask_level_redun * mr && oq > opt->mask_level_redun * mq) { // one of the hits is redundant
456 				if (p->score < q->score) {
457 					p->qe = p->qb;
458 					break;
459 				} else q->qe = q->qb;
460 			} else if (q->rb < p->rb && (score = mem_patch_reg(opt, bns, pac, query, q, p, &w)) > 0) { // then merge q into p
461 				p->n_comp += q->n_comp + 1;
462 				p->seedcov = p->seedcov > q->seedcov? p->seedcov : q->seedcov;
463 				p->sub = p->sub > q->sub? p->sub : q->sub;
464 				p->csub = p->csub > q->csub? p->csub : q->csub;
465 				p->qb = q->qb, p->rb = q->rb;
466 				p->truesc = p->score = score;
467 				p->w = w;
468 				q->qb = q->qe;
469 			}
470 		}
471 	}
472 	for (i = 0, m = 0; i < n; ++i) // exclude identical hits
473 		if (a[i].qe > a[i].qb) {
474 			if (m != i) a[m++] = a[i];
475 			else ++m;
476 		}
477 	n = m;
478 	ks_introsort(mem_ars, n, a);
479 	for (i = 1; i < n; ++i) { // mark identical hits
480 		if (a[i].score == a[i-1].score && a[i].rb == a[i-1].rb && a[i].qb == a[i-1].qb)
481 			a[i].qe = a[i].qb;
482 	}
483 	for (i = 1, m = 1; i < n; ++i) // exclude identical hits
484 		if (a[i].qe > a[i].qb) {
485 			if (m != i) a[m++] = a[i];
486 			else ++m;
487 		}
488 	return m;
489 }
490 
491 typedef kvec_t(int) int_v;
492 
mem_mark_primary_se_core(const mem_opt_t * opt,int n,mem_alnreg_t * a,int_v * z)493 static void mem_mark_primary_se_core(const mem_opt_t *opt, int n, mem_alnreg_t *a, int_v *z)
494 { // similar to the loop in mem_chain_flt()
495 	int i, k, tmp;
496 	tmp = opt->a + opt->b;
497 	tmp = opt->o_del + opt->e_del > tmp? opt->o_del + opt->e_del : tmp;
498 	tmp = opt->o_ins + opt->e_ins > tmp? opt->o_ins + opt->e_ins : tmp;
499 	z->n = 0;
500 	kv_push(int, *z, 0);
501 	for (i = 1; i < n; ++i) {
502 		for (k = 0; k < z->n; ++k) {
503 			int j = z->a[k];
504 			int b_max = a[j].qb > a[i].qb? a[j].qb : a[i].qb;
505 			int e_min = a[j].qe < a[i].qe? a[j].qe : a[i].qe;
506 			if (e_min > b_max) { // have overlap
507 				int min_l = a[i].qe - a[i].qb < a[j].qe - a[j].qb? a[i].qe - a[i].qb : a[j].qe - a[j].qb;
508 				if (e_min - b_max >= min_l * opt->mask_level) { // significant overlap
509 					if (a[j].sub == 0) a[j].sub = a[i].score;
510 					if (a[j].score - a[i].score <= tmp && (a[j].is_alt || !a[i].is_alt))
511 						++a[j].sub_n;
512 					break;
513 				}
514 			}
515 		}
516 		if (k == z->n) kv_push(int, *z, i);
517 		else a[i].secondary = z->a[k];
518 	}
519 }
520 
mem_mark_primary_se(const mem_opt_t * opt,int n,mem_alnreg_t * a,int64_t id)521 int mem_mark_primary_se(const mem_opt_t *opt, int n, mem_alnreg_t *a, int64_t id)
522 {
523 	int i, n_pri;
524 	int_v z = {0,0,0};
525 	if (n == 0) return 0;
526 	for (i = n_pri = 0; i < n; ++i) {
527 		a[i].sub = a[i].alt_sc = 0, a[i].secondary = a[i].secondary_all = -1, a[i].hash = hash_64(id+i);
528 		if (!a[i].is_alt) ++n_pri;
529 	}
530 	ks_introsort(mem_ars_hash, n, a);
531 	mem_mark_primary_se_core(opt, n, a, &z);
532 	for (i = 0; i < n; ++i) {
533 		mem_alnreg_t *p = &a[i];
534 		p->secondary_all = i; // keep the rank in the first round
535 		if (!p->is_alt && p->secondary >= 0 && a[p->secondary].is_alt)
536 			p->alt_sc = a[p->secondary].score;
537 	}
538 	if (n_pri >= 0 && n_pri < n) {
539 		kv_resize(int, z, n);
540 		if (n_pri > 0) ks_introsort(mem_ars_hash2, n, a);
541 		for (i = 0; i < n; ++i) z.a[a[i].secondary_all] = i;
542 		for (i = 0; i < n; ++i) {
543 			if (a[i].secondary >= 0) {
544 				a[i].secondary_all = z.a[a[i].secondary];
545 				if (a[i].is_alt) a[i].secondary = INT_MAX;
546 			} else a[i].secondary_all = -1;
547 		}
548 		if (n_pri > 0) { // mark primary for hits to the primary assembly only
549 			for (i = 0; i < n_pri; ++i) a[i].sub = 0, a[i].secondary = -1;
550 			mem_mark_primary_se_core(opt, n_pri, a, &z);
551 		}
552 	} else {
553 		for (i = 0; i < n; ++i)
554 			a[i].secondary_all = a[i].secondary;
555 	}
556 	free(z.a);
557 	return n_pri;
558 }
559 
560 /*********************************
561  * Test if a seed is good enough *
562  *********************************/
563 
564 #define MEM_SHORT_EXT 50
565 #define MEM_SHORT_LEN 200
566 
567 #define MEM_HSP_COEF 1.1f
568 #define MEM_MINSC_COEF 5.5f
569 #define MEM_SEEDSW_COEF 0.05f
570 
mem_seed_sw(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,int l_query,const uint8_t * query,const mem_seed_t * s)571 int mem_seed_sw(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_query, const uint8_t *query, const mem_seed_t *s)
572 {
573 	int qb, qe, rid;
574 	int64_t rb, re, mid, l_pac = bns->l_pac;
575 	uint8_t *rseq = 0;
576 	kswr_t x;
577 
578 	if (s->len >= MEM_SHORT_LEN) return -1; // the seed is longer than the max-extend; no need to do SW
579 	qb = s->qbeg, qe = s->qbeg + s->len;
580 	rb = s->rbeg, re = s->rbeg + s->len;
581 	mid = (rb + re) >> 1;
582 	qb -= MEM_SHORT_EXT; qb = qb > 0? qb : 0;
583 	qe += MEM_SHORT_EXT; qe = qe < l_query? qe : l_query;
584 	rb -= MEM_SHORT_EXT; rb = rb > 0? rb : 0;
585 	re += MEM_SHORT_EXT; re = re < l_pac<<1? re : l_pac<<1;
586 	if (rb < l_pac && l_pac < re) {
587 		if (mid < l_pac) re = l_pac;
588 		else rb = l_pac;
589 	}
590 	if (qe - qb >= MEM_SHORT_LEN || re - rb >= MEM_SHORT_LEN) return -1; // the seed seems good enough; no need to do SW
591 
592 	rseq = bns_fetch_seq(bns, pac, &rb, mid, &re, &rid);
593 	x = ksw_align2(qe - qb, (uint8_t*)query + qb, re - rb, rseq, 5, opt->mat, opt->o_del, opt->e_del, opt->o_ins, opt->e_ins, KSW_XSTART, 0);
594 	free(rseq);
595 	return x.score;
596 }
597 
mem_flt_chained_seeds(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,int l_query,const uint8_t * query,int n_chn,mem_chain_t * a)598 void mem_flt_chained_seeds(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_query, const uint8_t *query, int n_chn, mem_chain_t *a)
599 {
600 	double min_l = opt->min_chain_weight? MEM_HSP_COEF * opt->min_chain_weight : MEM_MINSC_COEF * log(l_query);
601 	int i, j, k, min_HSP_score = (int)(opt->a * min_l + .499);
602 	if (min_l > MEM_SEEDSW_COEF * l_query) return; // don't run the following for short reads
603 	for (i = 0; i < n_chn; ++i) {
604 		mem_chain_t *c = &a[i];
605 		for (j = k = 0; j < c->n; ++j) {
606 			mem_seed_t *s = &c->seeds[j];
607 			s->score = mem_seed_sw(opt, bns, pac, l_query, query, s);
608 			if (s->score < 0 || s->score >= min_HSP_score) {
609 				s->score = s->score < 0? s->len * opt->a : s->score;
610 				c->seeds[k++] = *s;
611 			}
612 		}
613 		c->n = k;
614 	}
615 }
616 
617 /****************************************
618  * Construct the alignment from a chain *
619  ****************************************/
620 
cal_max_gap(const mem_opt_t * opt,int qlen)621 static inline int cal_max_gap(const mem_opt_t *opt, int qlen)
622 {
623 	int l_del = (int)((double)(qlen * opt->a - opt->o_del) / opt->e_del + 1.);
624 	int l_ins = (int)((double)(qlen * opt->a - opt->o_ins) / opt->e_ins + 1.);
625 	int l = l_del > l_ins? l_del : l_ins;
626 	l = l > 1? l : 1;
627 	return l < opt->w<<1? l : opt->w<<1;
628 }
629 
630 #define MAX_BAND_TRY  2
631 
mem_chain2aln(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,int l_query,const uint8_t * query,const mem_chain_t * c,mem_alnreg_v * av)632 void mem_chain2aln(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_query, const uint8_t *query, const mem_chain_t *c, mem_alnreg_v *av)
633 {
634 	int i, k, rid, max_off[2], aw[2]; // aw: actual bandwidth used in extension
635 	int64_t l_pac = bns->l_pac, rmax[2], tmp, max = 0;
636 	const mem_seed_t *s;
637 	uint8_t *rseq = 0;
638 	uint64_t *srt;
639 
640 	if (c->n == 0) return;
641 	// get the max possible span
642 	rmax[0] = l_pac<<1; rmax[1] = 0;
643 	for (i = 0; i < c->n; ++i) {
644 		int64_t b, e;
645 		const mem_seed_t *t = &c->seeds[i];
646 		b = t->rbeg - (t->qbeg + cal_max_gap(opt, t->qbeg));
647 		e = t->rbeg + t->len + ((l_query - t->qbeg - t->len) + cal_max_gap(opt, l_query - t->qbeg - t->len));
648 		rmax[0] = rmax[0] < b? rmax[0] : b;
649 		rmax[1] = rmax[1] > e? rmax[1] : e;
650 		if (t->len > max) max = t->len;
651 	}
652 	rmax[0] = rmax[0] > 0? rmax[0] : 0;
653 	rmax[1] = rmax[1] < l_pac<<1? rmax[1] : l_pac<<1;
654 	if (rmax[0] < l_pac && l_pac < rmax[1]) { // crossing the forward-reverse boundary; then choose one side
655 		if (c->seeds[0].rbeg < l_pac) rmax[1] = l_pac; // this works because all seeds are guaranteed to be on the same strand
656 		else rmax[0] = l_pac;
657 	}
658 	// retrieve the reference sequence
659 	rseq = bns_fetch_seq(bns, pac, &rmax[0], c->seeds[0].rbeg, &rmax[1], &rid);
660 	assert(c->rid == rid);
661 
662 	srt = malloc(c->n * 8);
663 	for (i = 0; i < c->n; ++i)
664 		srt[i] = (uint64_t)c->seeds[i].score<<32 | i;
665 	ks_introsort_64(c->n, srt);
666 
667 	for (k = c->n - 1; k >= 0; --k) {
668 		mem_alnreg_t *a;
669 		s = &c->seeds[(uint32_t)srt[k]];
670 
671 		for (i = 0; i < av->n; ++i) { // test whether extension has been made before
672 			mem_alnreg_t *p = &av->a[i];
673 			int64_t rd;
674 			int qd, w, max_gap;
675 			if (s->rbeg < p->rb || s->rbeg + s->len > p->re || s->qbeg < p->qb || s->qbeg + s->len > p->qe) continue; // not fully contained
676 			if (s->len - p->seedlen0 > .1 * l_query) continue; // this seed may give a better alignment
677 			// qd: distance ahead of the seed on query; rd: on reference
678 			qd = s->qbeg - p->qb; rd = s->rbeg - p->rb;
679 			max_gap = cal_max_gap(opt, qd < rd? qd : rd); // the maximal gap allowed in regions ahead of the seed
680 			w = max_gap < p->w? max_gap : p->w; // bounded by the band width
681 			if (qd - rd < w && rd - qd < w) break; // the seed is "around" a previous hit
682 			// similar to the previous four lines, but this time we look at the region behind
683 			qd = p->qe - (s->qbeg + s->len); rd = p->re - (s->rbeg + s->len);
684 			max_gap = cal_max_gap(opt, qd < rd? qd : rd);
685 			w = max_gap < p->w? max_gap : p->w;
686 			if (qd - rd < w && rd - qd < w) break;
687 		}
688 		if (i < av->n) { // the seed is (almost) contained in an existing alignment; further testing is needed to confirm it is not leading to a different aln
689 			if (bwa_verbose >= 4)
690 				printf("** Seed(%d) [%ld;%ld,%ld] is almost contained in an existing alignment [%d,%d) <=> [%ld,%ld)\n",
691 					   k, (long)s->len, (long)s->qbeg, (long)s->rbeg, av->a[i].qb, av->a[i].qe, (long)av->a[i].rb, (long)av->a[i].re);
692 			for (i = k + 1; i < c->n; ++i) { // check overlapping seeds in the same chain
693 				const mem_seed_t *t;
694 				if (srt[i] == 0) continue;
695 				t = &c->seeds[(uint32_t)srt[i]];
696 				if (t->len < s->len * .95) continue; // only check overlapping if t is long enough; TODO: more efficient by early stopping
697 				if (s->qbeg <= t->qbeg && s->qbeg + s->len - t->qbeg >= s->len>>2 && t->qbeg - s->qbeg != t->rbeg - s->rbeg) break;
698 				if (t->qbeg <= s->qbeg && t->qbeg + t->len - s->qbeg >= s->len>>2 && s->qbeg - t->qbeg != s->rbeg - t->rbeg) break;
699 			}
700 			if (i == c->n) { // no overlapping seeds; then skip extension
701 				srt[k] = 0; // mark that seed extension has not been performed
702 				continue;
703 			}
704 			if (bwa_verbose >= 4)
705 				printf("** Seed(%d) might lead to a different alignment even though it is contained. Extension will be performed.\n", k);
706 		}
707 
708 		a = kv_pushp(mem_alnreg_t, *av);
709 		memset(a, 0, sizeof(mem_alnreg_t));
710 		a->w = aw[0] = aw[1] = opt->w;
711 		a->score = a->truesc = -1;
712 		a->rid = c->rid;
713 
714 		if (bwa_verbose >= 4) err_printf("** ---> Extending from seed(%d) [%ld;%ld,%ld] @ %s <---\n", k, (long)s->len, (long)s->qbeg, (long)s->rbeg, bns->anns[c->rid].name);
715 		if (s->qbeg) { // left extension
716 			uint8_t *rs, *qs;
717 			int qle, tle, gtle, gscore;
718 			qs = malloc(s->qbeg);
719 			for (i = 0; i < s->qbeg; ++i) qs[i] = query[s->qbeg - 1 - i];
720 			tmp = s->rbeg - rmax[0];
721 			rs = malloc(tmp);
722 			for (i = 0; i < tmp; ++i) rs[i] = rseq[tmp - 1 - i];
723 			for (i = 0; i < MAX_BAND_TRY; ++i) {
724 				int prev = a->score;
725 				aw[0] = opt->w << i;
726 				if (bwa_verbose >= 4) {
727 					int j;
728 					printf("*** Left ref:   "); for (j = 0; j < tmp; ++j) putchar("ACGTN"[(int)rs[j]]); putchar('\n');
729 					printf("*** Left query: "); for (j = 0; j < s->qbeg; ++j) putchar("ACGTN"[(int)qs[j]]); putchar('\n');
730 				}
731 				a->score = ksw_extend2(s->qbeg, qs, tmp, rs, 5, opt->mat, opt->o_del, opt->e_del, opt->o_ins, opt->e_ins, aw[0], opt->pen_clip5, opt->zdrop, s->len * opt->a, &qle, &tle, &gtle, &gscore, &max_off[0]);
732 				if (bwa_verbose >= 4) { printf("*** Left extension: prev_score=%d; score=%d; bandwidth=%d; max_off_diagonal_dist=%d\n", prev, a->score, aw[0], max_off[0]); fflush(stdout); }
733 				if (a->score == prev || max_off[0] < (aw[0]>>1) + (aw[0]>>2)) break;
734 			}
735 			// check whether we prefer to reach the end of the query
736 			if (gscore <= 0 || gscore <= a->score - opt->pen_clip5) { // local extension
737 				a->qb = s->qbeg - qle, a->rb = s->rbeg - tle;
738 				a->truesc = a->score;
739 			} else { // to-end extension
740 				a->qb = 0, a->rb = s->rbeg - gtle;
741 				a->truesc = gscore;
742 			}
743 			free(qs); free(rs);
744 		} else a->score = a->truesc = s->len * opt->a, a->qb = 0, a->rb = s->rbeg;
745 
746 		if (s->qbeg + s->len != l_query) { // right extension
747 			int qle, tle, qe, re, gtle, gscore, sc0 = a->score;
748 			qe = s->qbeg + s->len;
749 			re = s->rbeg + s->len - rmax[0];
750 			assert(re >= 0);
751 			for (i = 0; i < MAX_BAND_TRY; ++i) {
752 				int prev = a->score;
753 				aw[1] = opt->w << i;
754 				if (bwa_verbose >= 4) {
755 					int j;
756 					printf("*** Right ref:   "); for (j = 0; j < rmax[1] - rmax[0] - re; ++j) putchar("ACGTN"[(int)rseq[re+j]]); putchar('\n');
757 					printf("*** Right query: "); for (j = 0; j < l_query - qe; ++j) putchar("ACGTN"[(int)query[qe+j]]); putchar('\n');
758 				}
759 				a->score = ksw_extend2(l_query - qe, query + qe, rmax[1] - rmax[0] - re, rseq + re, 5, opt->mat, opt->o_del, opt->e_del, opt->o_ins, opt->e_ins, aw[1], opt->pen_clip3, opt->zdrop, sc0, &qle, &tle, &gtle, &gscore, &max_off[1]);
760 				if (bwa_verbose >= 4) { printf("*** Right extension: prev_score=%d; score=%d; bandwidth=%d; max_off_diagonal_dist=%d\n", prev, a->score, aw[1], max_off[1]); fflush(stdout); }
761 				if (a->score == prev || max_off[1] < (aw[1]>>1) + (aw[1]>>2)) break;
762 			}
763 			// similar to the above
764 			if (gscore <= 0 || gscore <= a->score - opt->pen_clip3) { // local extension
765 				a->qe = qe + qle, a->re = rmax[0] + re + tle;
766 				a->truesc += a->score - sc0;
767 			} else { // to-end extension
768 				a->qe = l_query, a->re = rmax[0] + re + gtle;
769 				a->truesc += gscore - sc0;
770 			}
771 		} else a->qe = l_query, a->re = s->rbeg + s->len;
772 		if (bwa_verbose >= 4) printf("*** Added alignment region: [%d,%d) <=> [%ld,%ld); score=%d; {left,right}_bandwidth={%d,%d}\n", a->qb, a->qe, (long)a->rb, (long)a->re, a->score, aw[0], aw[1]);
773 
774 		// compute seedcov
775 		for (i = 0, a->seedcov = 0; i < c->n; ++i) {
776 			const mem_seed_t *t = &c->seeds[i];
777 			if (t->qbeg >= a->qb && t->qbeg + t->len <= a->qe && t->rbeg >= a->rb && t->rbeg + t->len <= a->re) // seed fully contained
778 				a->seedcov += t->len; // this is not very accurate, but for approx. mapQ, this is good enough
779 		}
780 		a->w = aw[0] > aw[1]? aw[0] : aw[1];
781 		a->seedlen0 = s->len;
782 
783 		a->frac_rep = c->frac_rep;
784 	}
785 	free(srt); free(rseq);
786 }
787 
788 /*****************************
789  * Basic hit->SAM conversion *
790  *****************************/
791 
infer_bw(int l1,int l2,int score,int a,int q,int r)792 static inline int infer_bw(int l1, int l2, int score, int a, int q, int r)
793 {
794 	int w;
795 	if (l1 == l2 && l1 * a - score < (q + r - a)<<1) return 0; // to get equal alignment length, we need at least two gaps
796 	w = ((double)((l1 < l2? l1 : l2) * a - score - q) / r + 2.);
797 	if (w < abs(l1 - l2)) w = abs(l1 - l2);
798 	return w;
799 }
800 
get_rlen(int n_cigar,const uint32_t * cigar)801 static inline int get_rlen(int n_cigar, const uint32_t *cigar)
802 {
803 	int k, l;
804 	for (k = l = 0; k < n_cigar; ++k) {
805 		int op = cigar[k]&0xf;
806 		if (op == 0 || op == 2)
807 			l += cigar[k]>>4;
808 	}
809 	return l;
810 }
811 
add_cigar(const mem_opt_t * opt,mem_aln_t * p,kstring_t * str,int which)812 static inline void add_cigar(const mem_opt_t *opt, mem_aln_t *p, kstring_t *str, int which)
813 {
814 	int i;
815 	if (p->n_cigar) { // aligned
816 		for (i = 0; i < p->n_cigar; ++i) {
817 			int c = p->cigar[i]&0xf;
818 			if (!(opt->flag&MEM_F_SOFTCLIP) && !p->is_alt && (c == 3 || c == 4))
819 				c = which? 4 : 3; // use hard clipping for supplementary alignments
820 			kputw(p->cigar[i]>>4, str); kputc("MIDSH"[c], str);
821 		}
822 	} else kputc('*', str); // having a coordinate but unaligned (e.g. when copy_mate is true)
823 }
824 
mem_aln2sam(const mem_opt_t * opt,const bntseq_t * bns,kstring_t * str,bseq1_t * s,int n,const mem_aln_t * list,int which,const mem_aln_t * m_)825 void mem_aln2sam(const mem_opt_t *opt, const bntseq_t *bns, kstring_t *str, bseq1_t *s, int n, const mem_aln_t *list, int which, const mem_aln_t *m_)
826 {
827 	int i, l_name;
828 	mem_aln_t ptmp = list[which], *p = &ptmp, mtmp, *m = 0; // make a copy of the alignment to convert
829 
830 	if (m_) mtmp = *m_, m = &mtmp;
831 	// set flag
832 	p->flag |= m? 0x1 : 0; // is paired in sequencing
833 	p->flag |= p->rid < 0? 0x4 : 0; // is mapped
834 	p->flag |= m && m->rid < 0? 0x8 : 0; // is mate mapped
835 	if (p->rid < 0 && m && m->rid >= 0) // copy mate to alignment
836 		p->rid = m->rid, p->pos = m->pos, p->is_rev = m->is_rev, p->n_cigar = 0;
837 	if (m && m->rid < 0 && p->rid >= 0) // copy alignment to mate
838 		m->rid = p->rid, m->pos = p->pos, m->is_rev = p->is_rev, m->n_cigar = 0;
839 	p->flag |= p->is_rev? 0x10 : 0; // is on the reverse strand
840 	p->flag |= m && m->is_rev? 0x20 : 0; // is mate on the reverse strand
841 
842 	// print up to CIGAR
843 	l_name = strlen(s->name);
844 	ks_resize(str, str->l + s->l_seq + l_name + (s->qual? s->l_seq : 0) + 20);
845 	kputsn(s->name, l_name, str); kputc('\t', str); // QNAME
846 	kputw((p->flag&0xffff) | (p->flag&0x10000? 0x100 : 0), str); kputc('\t', str); // FLAG
847 	if (p->rid >= 0) { // with coordinate
848 		kputs(bns->anns[p->rid].name, str); kputc('\t', str); // RNAME
849 		kputl(p->pos + 1, str); kputc('\t', str); // POS
850 		kputw(p->mapq, str); kputc('\t', str); // MAPQ
851 		add_cigar(opt, p, str, which);
852 	} else kputsn("*\t0\t0\t*", 7, str); // without coordinte
853 	kputc('\t', str);
854 
855 	// print the mate position if applicable
856 	if (m && m->rid >= 0) {
857 		if (p->rid == m->rid) kputc('=', str);
858 		else kputs(bns->anns[m->rid].name, str);
859 		kputc('\t', str);
860 		kputl(m->pos + 1, str); kputc('\t', str);
861 		if (p->rid == m->rid) {
862 			int64_t p0 = p->pos + (p->is_rev? get_rlen(p->n_cigar, p->cigar) - 1 : 0);
863 			int64_t p1 = m->pos + (m->is_rev? get_rlen(m->n_cigar, m->cigar) - 1 : 0);
864 			if (m->n_cigar == 0 || p->n_cigar == 0) kputc('0', str);
865 			else kputl(-(p0 - p1 + (p0 > p1? 1 : p0 < p1? -1 : 0)), str);
866 		} else kputc('0', str);
867 	} else kputsn("*\t0\t0", 5, str);
868 	kputc('\t', str);
869 
870 	// print SEQ and QUAL
871 	if (p->flag & 0x100) { // for secondary alignments, don't write SEQ and QUAL
872 		kputsn("*\t*", 3, str);
873 	} else if (!p->is_rev) { // the forward strand
874 		int i, qb = 0, qe = s->l_seq;
875 		if (p->n_cigar && which && !(opt->flag&MEM_F_SOFTCLIP) && !p->is_alt) { // have cigar && not the primary alignment && not softclip all
876 			if ((p->cigar[0]&0xf) == 4 || (p->cigar[0]&0xf) == 3) qb += p->cigar[0]>>4;
877 			if ((p->cigar[p->n_cigar-1]&0xf) == 4 || (p->cigar[p->n_cigar-1]&0xf) == 3) qe -= p->cigar[p->n_cigar-1]>>4;
878 		}
879 		ks_resize(str, str->l + (qe - qb) + 1);
880 		for (i = qb; i < qe; ++i) str->s[str->l++] = "ACGTN"[(int)s->seq[i]];
881 		kputc('\t', str);
882 		if (s->qual) { // printf qual
883 			ks_resize(str, str->l + (qe - qb) + 1);
884 			for (i = qb; i < qe; ++i) str->s[str->l++] = s->qual[i];
885 			str->s[str->l] = 0;
886 		} else kputc('*', str);
887 	} else { // the reverse strand
888 		int i, qb = 0, qe = s->l_seq;
889 		if (p->n_cigar && which && !(opt->flag&MEM_F_SOFTCLIP) && !p->is_alt) {
890 			if ((p->cigar[0]&0xf) == 4 || (p->cigar[0]&0xf) == 3) qe -= p->cigar[0]>>4;
891 			if ((p->cigar[p->n_cigar-1]&0xf) == 4 || (p->cigar[p->n_cigar-1]&0xf) == 3) qb += p->cigar[p->n_cigar-1]>>4;
892 		}
893 		ks_resize(str, str->l + (qe - qb) + 1);
894 		for (i = qe-1; i >= qb; --i) str->s[str->l++] = "TGCAN"[(int)s->seq[i]];
895 		kputc('\t', str);
896 		if (s->qual) { // printf qual
897 			ks_resize(str, str->l + (qe - qb) + 1);
898 			for (i = qe-1; i >= qb; --i) str->s[str->l++] = s->qual[i];
899 			str->s[str->l] = 0;
900 		} else kputc('*', str);
901 	}
902 
903 	// print optional tags
904 	if (p->n_cigar) {
905 		kputsn("\tNM:i:", 6, str); kputw(p->NM, str);
906 		kputsn("\tMD:Z:", 6, str); kputs((char*)(p->cigar + p->n_cigar), str);
907 	}
908 	if (m && m->n_cigar) { kputsn("\tMC:Z:", 6, str); add_cigar(opt, m, str, which); }
909 	if (p->score >= 0) { kputsn("\tAS:i:", 6, str); kputw(p->score, str); }
910 	if (p->sub >= 0) { kputsn("\tXS:i:", 6, str); kputw(p->sub, str); }
911 	if (bwa_rg_id[0]) { kputsn("\tRG:Z:", 6, str); kputs(bwa_rg_id, str); }
912 	if (!(p->flag & 0x100)) { // not multi-hit
913 		for (i = 0; i < n; ++i)
914 			if (i != which && !(list[i].flag&0x100)) break;
915 		if (i < n) { // there are other primary hits; output them
916 			kputsn("\tSA:Z:", 6, str);
917 			for (i = 0; i < n; ++i) {
918 				const mem_aln_t *r = &list[i];
919 				int k;
920 				if (i == which || (r->flag&0x100)) continue; // proceed if: 1) different from the current; 2) not shadowed multi hit
921 				kputs(bns->anns[r->rid].name, str); kputc(',', str);
922 				kputl(r->pos+1, str); kputc(',', str);
923 				kputc("+-"[r->is_rev], str); kputc(',', str);
924 				for (k = 0; k < r->n_cigar; ++k) {
925 					kputw(r->cigar[k]>>4, str); kputc("MIDSH"[r->cigar[k]&0xf], str);
926 				}
927 				kputc(',', str); kputw(r->mapq, str);
928 				kputc(',', str); kputw(r->NM, str);
929 				kputc(';', str);
930 			}
931 		}
932 		if (p->alt_sc > 0)
933 			ksprintf(str, "\tpa:f:%.3f", (double)p->score / p->alt_sc);
934 	}
935 	if (p->XA) { kputsn("\tXA:Z:", 6, str); kputs(p->XA, str); }
936 	if (s->comment) { kputc('\t', str); kputs(s->comment, str); }
937 	if ((opt->flag&MEM_F_REF_HDR) && p->rid >= 0 && bns->anns[p->rid].anno != 0 && bns->anns[p->rid].anno[0] != 0) {
938 		int tmp;
939 		kputsn("\tXR:Z:", 6, str);
940 		tmp = str->l;
941 		kputs(bns->anns[p->rid].anno, str);
942 		for (i = tmp; i < str->l; ++i) // replace TAB in the comment to SPACE
943 			if (str->s[i] == '\t') str->s[i] = ' ';
944 	}
945 	kputc('\n', str);
946 }
947 
948 /************************
949  * Integrated interface *
950  ************************/
951 
mem_approx_mapq_se(const mem_opt_t * opt,const mem_alnreg_t * a)952 int mem_approx_mapq_se(const mem_opt_t *opt, const mem_alnreg_t *a)
953 {
954 	int mapq, l, sub = a->sub? a->sub : opt->min_seed_len * opt->a;
955 	double identity;
956 	sub = a->csub > sub? a->csub : sub;
957 	if (sub >= a->score) return 0;
958 	l = a->qe - a->qb > a->re - a->rb? a->qe - a->qb : a->re - a->rb;
959 	identity = 1. - (double)(l * opt->a - a->score) / (opt->a + opt->b) / l;
960 	if (a->score == 0) {
961 		mapq = 0;
962 	} else if (opt->mapQ_coef_len > 0) {
963 		double tmp;
964 		tmp = l < opt->mapQ_coef_len? 1. : opt->mapQ_coef_fac / log(l);
965 		tmp *= identity * identity;
966 		mapq = (int)(6.02 * (a->score - sub) / opt->a * tmp * tmp + .499);
967 	} else {
968 		mapq = (int)(MEM_MAPQ_COEF * (1. - (double)sub / a->score) * log(a->seedcov) + .499);
969 		mapq = identity < 0.95? (int)(mapq * identity * identity + .499) : mapq;
970 	}
971 	if (a->sub_n > 0) mapq -= (int)(4.343 * log(a->sub_n+1) + .499);
972 	if (mapq > 60) mapq = 60;
973 	if (mapq < 0) mapq = 0;
974 	mapq = (int)(mapq * (1. - a->frac_rep) + .499);
975 	return mapq;
976 }
977 
mem_reorder_primary5(int T,mem_alnreg_v * a)978 void mem_reorder_primary5(int T, mem_alnreg_v *a)
979 {
980 	int k, n_pri = 0, left_st = INT_MAX, left_k = -1;
981 	mem_alnreg_t t;
982 	for (k = 0; k < a->n; ++k)
983 		if (a->a[k].secondary < 0 && !a->a[k].is_alt && a->a[k].score >= T) ++n_pri;
984 	if (n_pri <= 1) return; // only one alignment
985 	for (k = 0; k < a->n; ++k) {
986 		mem_alnreg_t *p = &a->a[k];
987 		if (p->secondary >= 0 || p->is_alt || p->score < T) continue;
988 		if (p->qb < left_st) left_st = p->qb, left_k = k;
989 	}
990 	assert(a->a[0].secondary < 0);
991 	if (left_k == 0) return; // no need to reorder
992 	t = a->a[0], a->a[0] = a->a[left_k], a->a[left_k] = t;
993 	for (k = 1; k < a->n; ++k) { // update secondary and secondary_all
994 		mem_alnreg_t *p = &a->a[k];
995 		if (p->secondary == 0) p->secondary = left_k;
996 		else if (p->secondary == left_k) p->secondary = 0;
997 		if (p->secondary_all == 0) p->secondary_all = left_k;
998 		else if (p->secondary_all == left_k) p->secondary_all = 0;
999 	}
1000 }
1001 
1002 // TODO (future plan): group hits into a uint64_t[] array. This will be cleaner and more flexible
mem_reg2sam(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,bseq1_t * s,mem_alnreg_v * a,int extra_flag,const mem_aln_t * m)1003 void mem_reg2sam(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, bseq1_t *s, mem_alnreg_v *a, int extra_flag, const mem_aln_t *m)
1004 {
1005 	extern char **mem_gen_alt(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, mem_alnreg_v *a, int l_query, const char *query);
1006 	kstring_t str;
1007 	kvec_t(mem_aln_t) aa;
1008 	int k, l;
1009 	char **XA = 0;
1010 
1011 	if (!(opt->flag & MEM_F_ALL))
1012 		XA = mem_gen_alt(opt, bns, pac, a, s->l_seq, s->seq);
1013 	kv_init(aa);
1014 	str.l = str.m = 0; str.s = 0;
1015 	for (k = l = 0; k < a->n; ++k) {
1016 		mem_alnreg_t *p = &a->a[k];
1017 		mem_aln_t *q;
1018 		if (p->score < opt->T) continue;
1019 		if (p->secondary >= 0 && (p->is_alt || !(opt->flag&MEM_F_ALL))) continue;
1020 		if (p->secondary >= 0 && p->secondary < INT_MAX && p->score < a->a[p->secondary].score * opt->drop_ratio) continue;
1021 		q = kv_pushp(mem_aln_t, aa);
1022 		*q = mem_reg2aln(opt, bns, pac, s->l_seq, s->seq, p);
1023 		assert(q->rid >= 0); // this should not happen with the new code
1024 		q->XA = XA? XA[k] : 0;
1025 		q->flag |= extra_flag; // flag secondary
1026 		if (p->secondary >= 0) q->sub = -1; // don't output sub-optimal score
1027 		if (l && p->secondary < 0) // if supplementary
1028 			q->flag |= (opt->flag&MEM_F_NO_MULTI)? 0x10000 : 0x800;
1029 		if (!(opt->flag & MEM_F_KEEP_SUPP_MAPQ) && l && !p->is_alt && q->mapq > aa.a[0].mapq)
1030 			q->mapq = aa.a[0].mapq; // lower mapq for supplementary mappings, unless -5 or -q is applied
1031 		++l;
1032 	}
1033 	if (aa.n == 0) { // no alignments good enough; then write an unaligned record
1034 		mem_aln_t t;
1035 		t = mem_reg2aln(opt, bns, pac, s->l_seq, s->seq, 0);
1036 		t.flag |= extra_flag;
1037 		mem_aln2sam(opt, bns, &str, s, 1, &t, 0, m);
1038 	} else {
1039 		for (k = 0; k < aa.n; ++k)
1040 			mem_aln2sam(opt, bns, &str, s, aa.n, aa.a, k, m);
1041 		for (k = 0; k < aa.n; ++k) free(aa.a[k].cigar);
1042 		free(aa.a);
1043 	}
1044 	s->sam = str.s;
1045 	if (XA) {
1046 		for (k = 0; k < a->n; ++k) free(XA[k]);
1047 		free(XA);
1048 	}
1049 }
1050 
mem_align1_core(const mem_opt_t * opt,const bwt_t * bwt,const bntseq_t * bns,const uint8_t * pac,int l_seq,char * seq,void * buf)1051 mem_alnreg_v mem_align1_core(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, const uint8_t *pac, int l_seq, char *seq, void *buf)
1052 {
1053 	int i;
1054 	mem_chain_v chn;
1055 	mem_alnreg_v regs;
1056 
1057 	for (i = 0; i < l_seq; ++i) // convert to 2-bit encoding if we have not done so
1058 		seq[i] = seq[i] < 4? seq[i] : nst_nt4_table[(int)seq[i]];
1059 
1060 	chn = mem_chain(opt, bwt, bns, l_seq, (uint8_t*)seq, buf);
1061 	chn.n = mem_chain_flt(opt, chn.n, chn.a);
1062 	mem_flt_chained_seeds(opt, bns, pac, l_seq, (uint8_t*)seq, chn.n, chn.a);
1063 	if (bwa_verbose >= 4) mem_print_chain(bns, &chn);
1064 
1065 	kv_init(regs);
1066 	for (i = 0; i < chn.n; ++i) {
1067 		mem_chain_t *p = &chn.a[i];
1068 		if (bwa_verbose >= 4) err_printf("* ---> Processing chain(%d) <---\n", i);
1069 		mem_chain2aln(opt, bns, pac, l_seq, (uint8_t*)seq, p, &regs);
1070 		free(chn.a[i].seeds);
1071 	}
1072 	free(chn.a);
1073 	regs.n = mem_sort_dedup_patch(opt, bns, pac, (uint8_t*)seq, regs.n, regs.a);
1074 	if (bwa_verbose >= 4) {
1075 		err_printf("* %ld chains remain after removing duplicated chains\n", regs.n);
1076 		for (i = 0; i < regs.n; ++i) {
1077 			mem_alnreg_t *p = &regs.a[i];
1078 			printf("** %d, [%d,%d) <=> [%ld,%ld)\n", p->score, p->qb, p->qe, (long)p->rb, (long)p->re);
1079 		}
1080 	}
1081 	for (i = 0; i < regs.n; ++i) {
1082 		mem_alnreg_t *p = &regs.a[i];
1083 		if (p->rid >= 0 && bns->anns[p->rid].is_alt)
1084 			p->is_alt = 1;
1085 	}
1086 	return regs;
1087 }
1088 
mem_reg2aln(const mem_opt_t * opt,const bntseq_t * bns,const uint8_t * pac,int l_query,const char * query_,const mem_alnreg_t * ar)1089 mem_aln_t mem_reg2aln(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, int l_query, const char *query_, const mem_alnreg_t *ar)
1090 {
1091 	mem_aln_t a;
1092 	int i, w2, tmp, qb, qe, NM, score, is_rev, last_sc = -(1<<30), l_MD;
1093 	int64_t pos, rb, re;
1094 	uint8_t *query;
1095 
1096 	memset(&a, 0, sizeof(mem_aln_t));
1097 	if (ar == 0 || ar->rb < 0 || ar->re < 0) { // generate an unmapped record
1098 		a.rid = -1; a.pos = -1; a.flag |= 0x4;
1099 		return a;
1100 	}
1101 	qb = ar->qb, qe = ar->qe;
1102 	rb = ar->rb, re = ar->re;
1103 	query = malloc(l_query);
1104 	for (i = 0; i < l_query; ++i) // convert to the nt4 encoding
1105 		query[i] = query_[i] < 5? query_[i] : nst_nt4_table[(int)query_[i]];
1106 	a.mapq = ar->secondary < 0? mem_approx_mapq_se(opt, ar) : 0;
1107 	if (ar->secondary >= 0) a.flag |= 0x100; // secondary alignment
1108 	tmp = infer_bw(qe - qb, re - rb, ar->truesc, opt->a, opt->o_del, opt->e_del);
1109 	w2  = infer_bw(qe - qb, re - rb, ar->truesc, opt->a, opt->o_ins, opt->e_ins);
1110 	w2 = w2 > tmp? w2 : tmp;
1111 	if (bwa_verbose >= 4) printf("* Band width: inferred=%d, cmd_opt=%d, alnreg=%d\n", w2, opt->w, ar->w);
1112 	if (w2 > opt->w) w2 = w2 < ar->w? w2 : ar->w;
1113 	i = 0; a.cigar = 0;
1114 	do {
1115 		free(a.cigar);
1116 		w2 = w2 < opt->w<<2? w2 : opt->w<<2;
1117 		a.cigar = bwa_gen_cigar2(opt->mat, opt->o_del, opt->e_del, opt->o_ins, opt->e_ins, w2, bns->l_pac, pac, qe - qb, (uint8_t*)&query[qb], rb, re, &score, &a.n_cigar, &NM);
1118 		if (bwa_verbose >= 4) printf("* Final alignment: w2=%d, global_sc=%d, local_sc=%d\n", w2, score, ar->truesc);
1119 		if (score == last_sc || w2 == opt->w<<2) break; // it is possible that global alignment and local alignment give different scores
1120 		last_sc = score;
1121 		w2 <<= 1;
1122 	} while (++i < 3 && score < ar->truesc - opt->a);
1123 	l_MD = strlen((char*)(a.cigar + a.n_cigar)) + 1;
1124 	a.NM = NM;
1125 	pos = bns_depos(bns, rb < bns->l_pac? rb : re - 1, &is_rev);
1126 	a.is_rev = is_rev;
1127 	if (a.n_cigar > 0) { // squeeze out leading or trailing deletions
1128 		if ((a.cigar[0]&0xf) == 2) {
1129 			pos += a.cigar[0]>>4;
1130 			--a.n_cigar;
1131 			memmove(a.cigar, a.cigar + 1, a.n_cigar * 4 + l_MD);
1132 		} else if ((a.cigar[a.n_cigar-1]&0xf) == 2) {
1133 			--a.n_cigar;
1134 			memmove(a.cigar + a.n_cigar, a.cigar + a.n_cigar + 1, l_MD); // MD needs to be moved accordingly
1135 		}
1136 	}
1137 	if (qb != 0 || qe != l_query) { // add clipping to CIGAR
1138 		int clip5, clip3;
1139 		clip5 = is_rev? l_query - qe : qb;
1140 		clip3 = is_rev? qb : l_query - qe;
1141 		a.cigar = realloc(a.cigar, 4 * (a.n_cigar + 2) + l_MD);
1142 		if (clip5) {
1143 			memmove(a.cigar+1, a.cigar, a.n_cigar * 4 + l_MD); // make room for 5'-end clipping
1144 			a.cigar[0] = clip5<<4 | 3;
1145 			++a.n_cigar;
1146 		}
1147 		if (clip3) {
1148 			memmove(a.cigar + a.n_cigar + 1, a.cigar + a.n_cigar, l_MD); // make room for 3'-end clipping
1149 			a.cigar[a.n_cigar++] = clip3<<4 | 3;
1150 		}
1151 	}
1152 	a.rid = bns_pos2rid(bns, pos);
1153 	assert(a.rid == ar->rid);
1154 	a.pos = pos - bns->anns[a.rid].offset;
1155 	a.score = ar->score; a.sub = ar->sub > ar->csub? ar->sub : ar->csub;
1156 	a.is_alt = ar->is_alt; a.alt_sc = ar->alt_sc;
1157 	free(query);
1158 	return a;
1159 }
1160 
1161 typedef struct {
1162 	const mem_opt_t *opt;
1163 	const bwt_t *bwt;
1164 	const bntseq_t *bns;
1165 	const uint8_t *pac;
1166 	const mem_pestat_t *pes;
1167 	smem_aux_t **aux;
1168 	bseq1_t *seqs;
1169 	mem_alnreg_v *regs;
1170 	int64_t n_processed;
1171 } worker_t;
1172 
worker1(void * data,int i,int tid)1173 static void worker1(void *data, int i, int tid)
1174 {
1175 	worker_t *w = (worker_t*)data;
1176 	if (!(w->opt->flag&MEM_F_PE)) {
1177 		if (bwa_verbose >= 4) printf("=====> Processing read '%s' <=====\n", w->seqs[i].name);
1178 		w->regs[i] = mem_align1_core(w->opt, w->bwt, w->bns, w->pac, w->seqs[i].l_seq, w->seqs[i].seq, w->aux[tid]);
1179 	} else {
1180 		if (bwa_verbose >= 4) printf("=====> Processing read '%s'/1 <=====\n", w->seqs[i<<1|0].name);
1181 		w->regs[i<<1|0] = mem_align1_core(w->opt, w->bwt, w->bns, w->pac, w->seqs[i<<1|0].l_seq, w->seqs[i<<1|0].seq, w->aux[tid]);
1182 		if (bwa_verbose >= 4) printf("=====> Processing read '%s'/2 <=====\n", w->seqs[i<<1|1].name);
1183 		w->regs[i<<1|1] = mem_align1_core(w->opt, w->bwt, w->bns, w->pac, w->seqs[i<<1|1].l_seq, w->seqs[i<<1|1].seq, w->aux[tid]);
1184 	}
1185 }
1186 
worker2(void * data,int i,int tid)1187 static void worker2(void *data, int i, int tid)
1188 {
1189 	extern int mem_sam_pe(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, const mem_pestat_t pes[4], uint64_t id, bseq1_t s[2], mem_alnreg_v a[2]);
1190 	extern void mem_reg2ovlp(const mem_opt_t *opt, const bntseq_t *bns, const uint8_t *pac, bseq1_t *s, mem_alnreg_v *a);
1191 	worker_t *w = (worker_t*)data;
1192 	if (!(w->opt->flag&MEM_F_PE)) {
1193 		if (bwa_verbose >= 4) printf("=====> Finalizing read '%s' <=====\n", w->seqs[i].name);
1194 		mem_mark_primary_se(w->opt, w->regs[i].n, w->regs[i].a, w->n_processed + i);
1195 		if (w->opt->flag & MEM_F_PRIMARY5) mem_reorder_primary5(w->opt->T, &w->regs[i]);
1196 		mem_reg2sam(w->opt, w->bns, w->pac, &w->seqs[i], &w->regs[i], 0, 0);
1197 		free(w->regs[i].a);
1198 	} else {
1199 		if (bwa_verbose >= 4) printf("=====> Finalizing read pair '%s' <=====\n", w->seqs[i<<1|0].name);
1200 		mem_sam_pe(w->opt, w->bns, w->pac, w->pes, (w->n_processed>>1) + i, &w->seqs[i<<1], &w->regs[i<<1]);
1201 		free(w->regs[i<<1|0].a); free(w->regs[i<<1|1].a);
1202 	}
1203 }
1204 
mem_process_seqs(const mem_opt_t * opt,const bwt_t * bwt,const bntseq_t * bns,const uint8_t * pac,int64_t n_processed,int n,bseq1_t * seqs,const mem_pestat_t * pes0)1205 void mem_process_seqs(const mem_opt_t *opt, const bwt_t *bwt, const bntseq_t *bns, const uint8_t *pac, int64_t n_processed, int n, bseq1_t *seqs, const mem_pestat_t *pes0)
1206 {
1207 	extern void kt_for(int n_threads, void (*func)(void*,int,int), void *data, int n);
1208 	worker_t w;
1209 	mem_pestat_t pes[4];
1210 	double ctime, rtime;
1211 	int i;
1212 
1213 	ctime = cputime(); rtime = realtime();
1214 	global_bns = bns;
1215 	w.regs = malloc(n * sizeof(mem_alnreg_v));
1216 	w.opt = opt; w.bwt = bwt; w.bns = bns; w.pac = pac;
1217 	w.seqs = seqs; w.n_processed = n_processed;
1218 	w.pes = &pes[0];
1219 	w.aux = malloc(opt->n_threads * sizeof(smem_aux_t));
1220 	for (i = 0; i < opt->n_threads; ++i)
1221 		w.aux[i] = smem_aux_init();
1222 	kt_for(opt->n_threads, worker1, &w, (opt->flag&MEM_F_PE)? n>>1 : n); // find mapping positions
1223 	for (i = 0; i < opt->n_threads; ++i)
1224 		smem_aux_destroy(w.aux[i]);
1225 	free(w.aux);
1226 	if (opt->flag&MEM_F_PE) { // infer insert sizes if not provided
1227 		if (pes0) memcpy(pes, pes0, 4 * sizeof(mem_pestat_t)); // if pes0 != NULL, set the insert-size distribution as pes0
1228 		else mem_pestat(opt, bns->l_pac, n, w.regs, pes); // otherwise, infer the insert size distribution from data
1229 	}
1230 	kt_for(opt->n_threads, worker2, &w, (opt->flag&MEM_F_PE)? n>>1 : n); // generate alignment
1231 	free(w.regs);
1232 	if (bwa_verbose >= 3)
1233 		fprintf(stderr, "[M::%s] Processed %d reads in %.3f CPU sec, %.3f real sec\n", __func__, n, cputime() - ctime, realtime() - rtime);
1234 }
1235