1 /* @(#)paranoia.c	1.50 19/04/03 J. Schilling from cdparanoia-III-alpha9.8 */
2 #include <schily/mconfig.h>
3 #ifndef lint
4 static	UConst char sccsid[] =
5 "@(#)paranoia.c	1.50 19/04/03 J. Schilling from cdparanoia-III-alpha9.8";
6 
7 #endif
8 /*
9  * CopyPolicy: GNU Lesser General Public License v2.1 applies
10  * Copyright (C) 1997-2001,2008 by Monty (xiphmont@mit.edu)
11  * Copyright (C) 2002-2019 by J. Schilling
12  *
13  * Toplevel file for the paranoia abstraction over the cdda lib
14  *
15  */
16 
17 /* immediate todo:: */
18 /* Allow disabling of root fixups? */
19 
20 /*
21  * Dupe bytes are creeping into cases that require greater overlap
22  * than a single fragment can provide.  We need to check against a
23  * larger area* (+/-32 sectors of root?) to better eliminate
24  *  dupes. Of course this leads to other problems... Is it actually a
25  *  practically solvable problem?
26  */
27 /* Bimodal overlap distributions break us. */
28 /* scratch detection/tolerance not implemented yet */
29 
30 /*
31  * Da new shtick: verification now a two-step assymetric process.
32  *
33  * A single 'verified/reconstructed' data segment cache, and then the
34  * multiple fragment cache
35  *
36  * verify a newly read block against previous blocks; do it only this
37  * once. We maintain a list of 'verified sections' from these matches.
38  *
39  * We then glom these verified areas into a new data buffer.
40  * Defragmentation fixups are allowed here alone.
41  *
42  * We also now track where read boundaries actually happened; do not
43  * verify across matching boundaries.
44  */
45 
46 /*
47  * Silence.  "It's BAAAAAAaaack."
48  *
49  * audio is now treated as great continents of values floating on a
50  * mantle of molten silence.  Silence is not handled by basic
51  * verification at all; we simply anchor sections of nonzero audio to a
52  * position and fill in everything else as silence.  We also note the
53  * audio that interfaces with silence; an edge must be 'wet'.
54  */
55 
56 /*
57  * Let's translate the above vivid metaphor into something a mere mortal
58  * can understand:
59  *
60  * Non-silent audio is "solid."  Silent audio is "wet" and fluid.  The reason
61  * to treat silence as fluid is that if there's a long enough span of
62  * silence, we can't reliably detect jitter or dropped samples within that
63  * span (since all silence looks alike).  Non-silent audio, on the other
64  * hand, is distinctive and can be reliably reassembled.
65  *
66  * So we treat long spans of silence specially.  We only consider an edge
67  * of a non-silent region ("continent" or "island") to be "wet" if it borders
68  * a long span of silence.  Short spans of silence are merely damp and can
69  * be reliably placed within a continent.
70  *
71  * We position ("anchor") the non-silent regions somewhat arbitrarily (since
72  * they may be jittered and we have no way to verify their exact position),
73  * and fill the intervening space with silence.
74  *
75  * See i_silence_match() for the gory details.
76  */
77 
78 #include <schily/alloca.h>
79 #include <schily/stdlib.h>
80 #include <schily/unistd.h>
81 #include <schily/standard.h>
82 #include <schily/utypes.h>
83 #include <schily/stdio.h>
84 #include <schily/string.h>
85 #include "p_block.h"
86 #include "cdda_paranoia.h"
87 #include "overlap.h"
88 #include "gap.h"
89 #include "isort.h"
90 #include "pmalloc.h"
91 
92 /*
93  * used by: i_iterate_stage2() / i_stage2_each()
94  */
95 typedef struct sync_result {
96 	long		offset;
97 	long		begin;
98 	long		end;
99 } sync_result;
100 
101 struct c2errs {
102 	long	c2errs;		/* # of reads with C2 errors */
103 	long	c2bytes;	/* # of bytes with C2 errors */
104 	long	c2secs;		/* # of sectorss with C2 errors */
105 	long	c2maxerrs;	/* Max. # of C2 errors per sector */
106 };
107 
108 LOCAL	inline long	re		__PR((root_block * root));
109 LOCAL	inline long	rb		__PR((root_block * root));
110 LOCAL	inline long	rs		__PR((root_block * root));
111 LOCAL	inline Int16_t	*rv		__PR((root_block * root));
112 LOCAL	inline long i_paranoia_overlap	__PR((Int16_t * buffA, Int16_t * buffB,
113 						long offsetA, long offsetB,
114 						long sizeA, long sizeB,
115 						long *ret_begin, long *ret_end));
116 LOCAL	inline long i_paranoia_overlap2	__PR((Int16_t * buffA, Int16_t * buffB,
117 						Uchar *flagsA, Uchar *flagsB,
118 						long offsetA, long offsetB,
119 						long sizeA, long sizeB,
120 						long *ret_begin, long *ret_end));
121 LOCAL	inline long do_const_sync	__PR((c_block * A,
122 						sort_info * B, Uchar *flagB,
123 						long posA, long posB,
124 						long *begin, long *end, long *offset));
125 LOCAL	inline long try_sort_sync	__PR((cdrom_paranoia * p,
126 						sort_info * A, Uchar *Aflags,
127 						c_block * B,
128 						long post, long *begin, long *end,
129 						long *offset, void (*callback) (long, int)));
130 LOCAL	inline void stage1_matched	__PR((c_block * old, c_block * new,
131 						long matchbegin, long matchend,
132 						long matchoffset, void (*callback) (long, int)));
133 LOCAL	long	i_iterate_stage1	__PR((cdrom_paranoia * p, c_block * old, c_block * new,
134 						void (*callback) (long, int)));
135 LOCAL	long	i_stage1		__PR((cdrom_paranoia * p, c_block * new,
136 						void (*callback) (long, int)));
137 LOCAL	long	i_iterate_stage2	__PR((cdrom_paranoia * p, v_fragment * v,
138 						sync_result * r, void (*callback) (long, int)));
139 LOCAL	void	i_silence_test		__PR((root_block * root));
140 LOCAL	long	i_silence_match		__PR((root_block * root, v_fragment * v,
141 						void (*callback) (long, int)));
142 LOCAL	long	i_stage2_each		__PR((root_block * root, v_fragment * v,
143 						void (*callback) (long, int)));
144 LOCAL	int	i_init_root		__PR((root_block * root, v_fragment * v, long begin,
145 						void (*callback) (long, int)));
146 LOCAL	int	vsort			__PR((const void *a, const void *b));
147 LOCAL	int	i_stage2		__PR((cdrom_paranoia * p, long beginword, long endword,
148 						void (*callback) (long, int)));
149 LOCAL	void	i_end_case		__PR((cdrom_paranoia * p, long endword,
150 						void (*callback) (long, int)));
151 LOCAL	void	verify_skip_case	__PR((cdrom_paranoia * p, void (*callback) (long, int)));
152 EXPORT	void	paranoia_free		__PR((cdrom_paranoia * p));
153 EXPORT	void	paranoia_modeset	__PR((cdrom_paranoia * p, int enable));
154 EXPORT	long	paranoia_seek		__PR((cdrom_paranoia * p, long seek, int mode));
155 LOCAL	void	c2_audiocopy		__PR((void *to, void *from, int nsectors));
156 LOCAL	int	bits			__PR((int c));
157 LOCAL	void	c2_errcopy		__PR((void *to, void *from, int nsectors, struct c2errs *errp));
158 c_block		*i_read_c_block		__PR((cdrom_paranoia * p, long beginword, long endword,
159 						void (*callback) (long, int)));
160 EXPORT	Int16_t	*paranoia_read		__PR((cdrom_paranoia * p, void (*callback) (long, int)));
161 EXPORT	Int16_t	*paranoia_read_limited	__PR((cdrom_paranoia * p, void (*callback) (long, int),
162 						int max_retries));
163 EXPORT	void	paranoia_overlapset	__PR((cdrom_paranoia * p, long overlap));
164 
165 /*
166  * Return end offset of current block.
167  * End offset counts in multiples of samples (16 bits).
168  */
169 LOCAL inline long
re(root)170 re(root)
171 	root_block	*root;
172 {
173 	if (!root)
174 		return (-1);
175 	if (!root->vector)
176 		return (-1);
177 	return (ce(root->vector));
178 }
179 
180 /*
181  * Return begin offset of current block.
182  * Begin offset counts in multiples of samples (16 bits).
183  */
184 LOCAL inline long
rb(root)185 rb(root)
186 	root_block	*root;
187 {
188 	if (!root)
189 		return (-1);
190 	if (!root->vector)
191 		return (-1);
192 	return (cb(root->vector));
193 }
194 
195 /*
196  * Return size of current block.
197  * Size counts in multiples of samples (16 bits).
198  */
199 LOCAL inline long
rs(root)200 rs(root)
201 	root_block	*root;
202 {
203 	if (!root)
204 		return (-1);
205 	if (!root->vector)
206 		return (-1);
207 	return (cs(root->vector));
208 }
209 
210 /*
211  * Return data vector from current block.
212  */
213 LOCAL inline Int16_t *
rv(root)214 rv(root)
215 	root_block	*root;
216 {
217 	if (!root)
218 		return (NULL);
219 	if (!root->vector)
220 		return (NULL);
221 	return (cv(root->vector));
222 }
223 
224 #define	rc(r)	((r)->vector)
225 
226 /*
227  * Flags indicating the status of a read samples.
228  *
229  * Imagine the below enumeration values are #defines to be used in a
230  * bitmask rather than distinct values of an enum.
231  *
232  * The variable part of the declaration is trickery to force the enum
233  * symbol values to be recorded in debug symbol tables. They are used
234  * to allow one refer to the enumeration value names in a debugger
235  * and in debugger expressions.
236  */
237 #define	FLAGS_EDGE	0x1	/**< first/last N words of frame	   */
238 #define	FLAGS_UNREAD	0x2	/**< unread, hence missing and unmatchable */
239 #define	FLAGS_VERIFIED	0x4	/**< block read and verified		   */
240 #define	FLAGS_C2	0x8	/**< sample with C2 error		   */
241 
242 /*
243  * matching and analysis code
244  */
245 
246 /*
247  * i_paranoia_overlap() (internal)
248  *
249  * This function is called when buffA[offsetA] == buffB[offsetB].  This
250  * function searches backward and forward to see how many consecutive
251  * samples also match.
252  *
253  * This function is called by do_const_sync() when we're not doing any
254  * verification.  Its more complicated sibling is i_paranoia_overlap2.
255  *
256  * This function returns the number of consecutive matching samples.
257  * If (ret_begin) or (ret_end) are not NULL, it fills them with the
258  * offsets of the first and last matching samples in A.
259  */
260 LOCAL inline long
i_paranoia_overlap(buffA,buffB,offsetA,offsetB,sizeA,sizeB,ret_begin,ret_end)261 i_paranoia_overlap(buffA, buffB, offsetA, offsetB, sizeA, sizeB, ret_begin, ret_end)
262 	Int16_t	*buffA;
263 	Int16_t	*buffB;
264 	long	offsetA;
265 	long	offsetB;
266 	long	sizeA;
267 	long	sizeB;
268 	long	*ret_begin;
269 	long	*ret_end;
270 {
271 	long	beginA = offsetA;
272 	long	endA = offsetA;
273 	long	beginB = offsetB;
274 	long	endB = offsetB;
275 
276 	/*
277 	 * Scan backward to extend the matching run in that direction.
278 	 */
279 	for (; beginA >= 0 && beginB >= 0; beginA--, beginB--)
280 		if (buffA[beginA] != buffB[beginB])
281 			break;
282 	beginA++;
283 	beginB++;
284 
285 	/*
286 	 * Scan forward to extend the matching run in that direction.
287 	 */
288 	for (; endA < sizeA && endB < sizeB; endA++, endB++)
289 		if (buffA[endA] != buffB[endB])
290 			break;
291 
292 	/*
293 	 * Return the result of our search.
294 	 */
295 	if (ret_begin)
296 		*ret_begin = beginA;
297 	if (ret_end)
298 		*ret_end = endA;
299 	return (endA - beginA);
300 }
301 
302 /*
303  * i_paranoia_overlap2() (internal)
304  *
305  * This function is called when buffA[offsetA] == buffB[offsetB].  This
306  * function searches backward and forward to see how many consecutive
307  * samples also match.
308  *
309  * This function is called by do_const_sync() when we're verifying the
310  * data coming off the CD.  Its less complicated sibling is
311  * i_paranoia_overlap, which is a good place to look to see the simplest
312  * outline of how this function works.
313  *
314  * This function returns the number of consecutive matching samples.
315  * If (ret_begin) or (ret_end) are not NULL, it fills them with the
316  * offsets of the first and last matching samples in A.
317  */
318 LOCAL inline long
i_paranoia_overlap2(buffA,buffB,flagsA,flagsB,offsetA,offsetB,sizeA,sizeB,ret_begin,ret_end)319 i_paranoia_overlap2(buffA, buffB, flagsA, flagsB, offsetA, offsetB, sizeA, sizeB, ret_begin, ret_end)
320 	Int16_t	*buffA;
321 	Int16_t	*buffB;
322 	Uchar	*flagsA;
323 	Uchar	*flagsB;
324 	long	offsetA;
325 	long	offsetB;
326 	long	sizeA;
327 	long	sizeB;
328 	long	*ret_begin;
329 	long	*ret_end;
330 {
331 	long		beginA = offsetA;
332 	long		endA = offsetA;
333 	long		beginB = offsetB;
334 	long		endB = offsetB;
335 
336 	/*
337 	 * Scan backward to extend the matching run in that direction.
338 	 */
339 	for (; beginA >= 0 && beginB >= 0; beginA--, beginB--) {
340 		if (buffA[beginA] != buffB[beginB])
341 			break;
342 		/*
343 		 * don't allow matching across matching sector boundaries. The
344 		 * liklihood of the drive skipping identically in two
345 		 * different reads with the same sector read boundary is actually
346 		 * relatively very high compared to the liklihood of it skipping
347 		 * when one read is continuous across the boundary and other was
348 		 * discontinuous.
349 		 * Stop if both samples were at the edges of a low-level read.
350 		 */
351 		if ((flagsA[beginA] & flagsB[beginB] & FLAGS_EDGE)) {
352 			beginA--;
353 			beginB--;
354 			break;
355 		}
356 		/*
357 		 * don't allow matching through known missing data
358 		 */
359 		if ((flagsA[beginA] & FLAGS_UNREAD) || (flagsB[beginB] & FLAGS_UNREAD))
360 			break;
361 	}
362 	beginA++;
363 	beginB++;
364 
365 	/*
366 	 * Scan forward to extend the matching run in that direction.
367 	 */
368 	for (; endA < sizeA && endB < sizeB; endA++, endB++) {
369 		if (buffA[endA] != buffB[endB])
370 			break;
371 		/*
372 		 * don't allow matching across matching sector boundaries
373 		 */
374 		if ((flagsA[endA] & flagsB[endB] & FLAGS_EDGE) && endA != beginA) {
375 			break;
376 		}
377 		/*
378 		 * don't allow matching through known missing data
379 		 * Stop if both samples were at the edges of a low-level read.
380 		 */
381 		if ((flagsA[endA] & FLAGS_UNREAD) || (flagsB[endB] & FLAGS_UNREAD))
382 			break;
383 	}
384 
385 	/*
386 	 * Return the result of our search.
387 	 */
388 	if (ret_begin)
389 		*ret_begin = beginA;
390 	if (ret_end)
391 		*ret_end = endA;
392 	return (endA - beginA);
393 }
394 
395 /*
396  * do_const_sync() (internal)
397  *
398  * This function is called when samples A[posA] == B[posB].  It tries to
399  * build a matching run from that point, looking forward and backward to
400  * see how many consecutive samples match.  Since the starting samples
401  * might only be coincidentally identical, we only consider the run to
402  * be a true match if it's longer than MIN_WORDS_SEARCH.
403  *
404  * This function returns the length of the run if a matching run was found,
405  * or 0 otherwise.  If a matching run was found, (begin) and (end) are set
406  * to the absolute positions of the beginning and ending samples of the
407  * run in A, and (offset) is set to the jitter between the c_blocks.
408  * (I.e., offset indicates the distance between what A considers sample N
409  * on the CD and what B considers sample N.)
410  */
411 LOCAL inline long
do_const_sync(A,B,flagB,posA,posB,begin,end,offset)412 do_const_sync(A, B, flagB, posA, posB, begin, end, offset)
413 	c_block		*A;
414 	sort_info	*B;
415 	Uchar		*flagB;
416 	long		posA;
417 	long		posB;
418 	long		*begin;
419 	long		*end;
420 	long		*offset;
421 {
422 	Uchar		*flagA = A->flags;
423 	long		ret = 0;
424 
425 	/*
426 	 * If we're doing any verification whatsoever, we have flags in stage
427 	 * 1, and will take them into account.  Otherwise (e.g. in stage 2),
428 	 * we just do the simple equality test for samples on both sides of
429 	 * the initial match.
430 	 */
431 	if (flagB == NULL)
432 		ret = i_paranoia_overlap(cv(A), iv(B), posA, posB,
433 			cs(A), is(B), begin, end);
434 	else if ((flagB[posB] & FLAGS_UNREAD) == 0)
435 		ret = i_paranoia_overlap2(cv(A), iv(B), flagA, flagB, posA, posB, cs(A),
436 			is(B), begin, end);
437 
438 	/*
439 	 * Small matching runs could just be coincidental.  We only consider this
440 	 * a real match if it's long enough.
441 	 */
442 	if (ret > MIN_WORDS_SEARCH) {
443 
444 		*offset = (posA + cb(A)) - (posB + ib(B));
445 
446 		/*
447 		 * Note that try_sort_sync()'s swaps A & B when it calls this function,
448 		 * so while we adjust begin & end to be relative to A here, that means
449 		 * it's relative to B in try_sort_sync().
450 		 */
451 		*begin += cb(A);
452 		*end += cb(A);
453 		return (ret);
454 	}
455 	return (0);
456 }
457 
458 /*
459  * try_sort_sync() (internal)
460  *
461  * Starting from the sample in B with the absolute position (post), look
462  * for a matching run in A.  This search will look in A for a first
463  * matching sample within (p->dynoverlap) samples around (post).  If it
464  * finds one, it will then determine how many consecutive samples match
465  * both A and B from that point, looking backwards and forwards.  If
466  * this search produces a matching run longer than MIN_WORDS_SEARCH, we
467  * consider it a match.
468  *
469  * When used by stage 1, the "post" is planted with respect to the old
470  * c_block being compare to the new c_block.  In stage 2, the "post" is
471  * planted with respect to the verified root.
472  *
473  * This function returns 1 if a match is found and 0 if not.  When a match
474  * is found, (begin) and (end) are set to the boundaries of the run, and
475  * (offset) is set to the difference in position of the run in A and B.
476  * (begin) and (end) are the absolute positions of the samples in
477  * B.  (offset) transforms A to B's frame of reference.  I.e., an offset of
478  * 2 would mean that A's absolute 3 is equivalent to B's 5.
479  *
480  * post is w.r.t. B.  in stage one, we post from old.  In stage 2 we
481  * post from root. Begin, end, offset count from B's frame of
482  * reference
483  */
484 LOCAL inline long
try_sort_sync(p,A,Aflags,B,post,begin,end,offset,callback)485 try_sort_sync(p, A, Aflags, B, post, begin, end, offset, callback)
486 	cdrom_paranoia	*p;
487 	sort_info	*A;
488 	Uchar		*Aflags;
489 	c_block		*B;
490 	long		post;
491 	long		*begin;
492 	long		*end;
493 	long		*offset;
494 	void		(*callback) __PR((long, int));
495 {
496 
497 	long		dynoverlap = p->dynoverlap;
498 	sort_link	*ptr = NULL;
499 	Uchar		*Bflags = B->flags;
500 
501 	/*
502 	 * block flag matches FLAGS_UNREAD (unmatchable)
503 	 */
504 	if (Bflags == NULL || (Bflags[post - cb(B)] & FLAGS_UNREAD) == 0) {
505 		/*
506 		 * always try absolute offset zero first!
507 		 */
508 		{
509 			long		zeropos = post - ib(A);
510 
511 			if (zeropos >= 0 && zeropos < is(A)) {
512 				/*
513 				 * Before we bother with the search for a matching samples,
514 				 * we check the simple case.  If there's no jitter at all
515 				 * (i.e. the absolute positions of A's and B's samples are
516 				 * consistent), A's sample at (post) should be identical
517 				 * to B's sample at the same position.
518 				 */
519 				if (cv(B)[post - cb(B)] == iv(A)[zeropos]) {
520 					/*
521 					 * The first sample matched, now try to grow the matching run
522 					 * in both directions.  We only consider it a match if more
523 					 * than MIN_WORDS_SEARCH consecutive samples match.
524 					 */
525 					if (do_const_sync(B, A, Aflags,
526 							post - cb(B), zeropos,
527 							begin, end, offset)) {
528 
529 						offset_add_value(p, &(p->stage1), *offset, callback);
530 
531 						return (1);
532 					}
533 				}
534 			}
535 		}
536 	} else
537 		return (0);
538 
539 	/*
540 	 * If the samples with the same absolute position didn't match, it's
541 	 * either a bad sample, or the two c_blocks are jittered with respect
542 	 * to each other.  Now we search through A for samples that do have
543 	 * the same value as B's post.  The search looks from first to last
544 	 * occurrence witin (dynoverlap) samples of (post).
545 	 */
546 	ptr = sort_getmatch(A, post - ib(A), dynoverlap, cv(B)[post - cb(B)]);
547 
548 	while (ptr) {
549 		/*
550 		 * We've found a matching sample, so try to grow the matching run in
551 		 * both directions.  If we find a long enough run (longer than
552 		 * MIN_WORDS_SEARCH), we've found a match.
553 		 */
554 		if (do_const_sync(B, A, Aflags,
555 				post - cb(B), ipos(A, ptr),
556 				begin, end, offset)) {
557 			offset_add_value(p, &(p->stage1), *offset, callback);
558 			return (1);
559 		}
560 		/*
561 		 * The matching sample was just a fluke -- there weren't enough adjacent
562 		 * samples that matched to consider a matching run.  So now we check
563 		 * for the next occurrence of that value in A.
564 		 */
565 		ptr = sort_nextmatch(A, ptr);
566 	}
567 
568 	/*
569 	 * We didn't find any matches.
570 	 */
571 	*begin = -1;
572 	*end = -1;
573 	*offset = -1;
574 	return (0);
575 }
576 
577 /*
578  * STAGE 1 MATCHING
579  */
580 /* Top level of the first stage matcher */
581 
582 /*
583  * We match each analysis point of new to the preexisting blocks
584  * recursively.  We can also optionally maintain a list of fragments of
585  * the preexisting block that didn't match anything, and match them back
586  * afterward.
587  */
588 #define	OVERLAP_ADJ	(MIN_WORDS_OVERLAP/2-1)
589 
590 /*
591  * stage1_matched() (internal)
592  *
593  * This function is called whenever stage 1 verification finds two identical
594  * runs of samples from different reads.  The runs must be more than
595  * MIN_WORDS_SEARCH samples long.  They may be jittered (i.e. their absolute
596  * positions on the CD may not match due to inaccurate seeking) with respect
597  * to each other, but they have been verified to have no dropped samples
598  * within them.
599  *
600  * This function provides feedback via the callback mechanism and marks the
601  * runs as verified.  The details of the marking are somehwat subtle and
602  * are described near the relevant code.
603  *
604  * Subsequent portions of the stage 1 code will build a verified fragment
605  * from this run.  The verified fragment will eventually be merged
606  * into the verified root (and its absolute position determined) in
607  * stage 2.
608  */
609 LOCAL inline void
stage1_matched(old,new,matchbegin,matchend,matchoffset,callback)610 stage1_matched(old, new, matchbegin, matchend, matchoffset, callback)
611 	c_block	*old;
612 	c_block	*new;
613 	long	matchbegin;
614 	long	matchend;
615 	long	matchoffset;
616 	void	(*callback) __PR((long, int));
617 {
618 	long		i;
619 	long		oldadjbegin = matchbegin - cb(old);
620 	long		oldadjend = matchend - cb(old);
621 	long		newadjbegin = matchbegin - matchoffset - cb(new);
622 	long		newadjend = matchend - matchoffset - cb(new);
623 
624 	/*
625 	 * Provide feedback via the callback about the samples we've just
626 	 * verified.
627 	 *
628 	 * "???: How can matchbegin ever be < cb(old)?"
629 	 *   Sorry, old bulletproofing habit.  I often use <= to mean "not >"
630 	 *   --Monty
631 	 *
632 	 * "???: Why do edge samples get logged only when there's jitter
633 	 * between the matched runs (matchoffset != 0)?"
634 	 *   FIXUP_EDGE is actually logging a jitter event, not a rift--
635 	 *   a rift is FIXUP_ATOM --Monty
636 	 */
637 	if (matchbegin - matchoffset <= cb(new) ||
638 		matchbegin <= cb(old) ||
639 		(new->flags[newadjbegin] & FLAGS_EDGE) ||
640 		(old->flags[oldadjbegin] & FLAGS_EDGE)) {
641 		if (matchoffset)
642 			if (callback)
643 				(*callback) (matchbegin, PARANOIA_CB_FIXUP_EDGE);
644 	} else if (callback)
645 		(*callback) (matchbegin, PARANOIA_CB_FIXUP_ATOM);
646 
647 	if (matchend - matchoffset >= ce(new) ||
648 		(new->flags[newadjend] & FLAGS_EDGE) ||
649 		matchend >= ce(old) ||
650 		(old->flags[oldadjend] & FLAGS_EDGE)) {
651 		if (matchoffset)
652 			if (callback)
653 				(*callback) (matchend, PARANOIA_CB_FIXUP_EDGE);
654 	} else if (callback)
655 		(*callback) (matchend, PARANOIA_CB_FIXUP_ATOM);
656 
657 	/* BEGIN CSTYLED */
658 	/*
659 	 * Mark verified samples as "verified," but trim the verified region
660 	 * by OVERLAP_ADJ samples on each side.  There are several significant
661 	 * implications of this trimming:
662 	 *
663 	 * 1) Why we trim at all:  We have to trim to distinguish between two
664 	 * adjacent verified runs and one long verified run.  We encounter this
665 	 * situation when samples have been dropped:
666 	 *
667 	 *   matched portion of read 1 ....)(.... matched portion of read 1
668 	 *       read 2 adjacent run  .....)(..... read 2 adjacent run
669 	 *                                 ||
670 	 *                      dropped samples in read 2
671 	 *
672 	 * So at this point, the fact that we have two adjacent runs means
673 	 * that we have not yet verified that the two runs really are adjacent.
674 	 * (In fact, just the opposite:  there are two runs because they were
675 	 * matched by separate runs, indicating that some samples didn't match
676 	 * across the length of read 2.)
677 	 *
678 	 * If we verify that they are actually adjacent (e.g. if the two runs
679 	 * are simply a result of matching runs from different reads, not from
680 	 * dropped samples), we will indeed mark them as one long merged run.
681 	 *
682 	 * 2) Why we trim by this amount: We want to ensure that when we
683 	 * verify the relationship between these two runs, we do so with
684 	 * an overlapping fragment at least OVERLAP samples long.  Following
685 	 * from the above example:
686 	 *
687 	 *                (..... matched portion of read 3 .....)
688 	 *       read 2 adjacent run  .....)(..... read 2 adjacent run
689 	 *
690 	 * Assuming there were no dropped samples between the adjacent runs,
691 	 * the matching portion of read 3 will need to be at least OVERLAP
692 	 * samples long to mark the two runs as one long verified run.
693 	 * If there were dropped samples, read 3 wouldn't match across the
694 	 * two runs, proving our caution worthwhile.
695 	 *
696 	 * 3) Why we partially discard the work we've done:  We don't.
697 	 * When subsequently creating verified fragments from this run,
698 	 * we compensate for this trimming.  Thus the verified fragment will
699 	 * contain the full length of verified samples.  Only the c_blocks
700 	 * will reflect this trimming.
701 	 *
702 	 * Mark the verification flags.  Don't mark the first or last OVERLAP/2
703 	 * elements so that overlapping fragments have to overlap by OVERLAP to
704 	 * actually merge. We also remove elements from the sort such that
705 	 * later sorts do not have to sift through already matched data
706 	 */
707 	/* END CSTYLED */
708 	newadjbegin += OVERLAP_ADJ;
709 	newadjend -= OVERLAP_ADJ;
710 	for (i = newadjbegin; i < newadjend; i++)
711 		new->flags[i] |= FLAGS_VERIFIED;	/* mark verified */
712 
713 	oldadjbegin += OVERLAP_ADJ;
714 	oldadjend -= OVERLAP_ADJ;
715 	for (i = oldadjbegin; i < oldadjend; i++)
716 		old->flags[i] |= FLAGS_VERIFIED;	/* mark verified */
717 
718 }
719 
720 #define	CB_NULL		(void (*) __PR((long, int)))0
721 
722 /*
723  * i_iterate_stage1 (internal)
724  *
725  * This function is called by i_stage1() to compare newly read samples with
726  * previously read samples, searching for contiguous runs of identical
727  * samples.  Matching runs indicate that at least two reads of the CD
728  * returned identical data, with no dropped samples in that run.
729  * The runs may be jittered (i.e. their absolute positions on the CD may
730  * not be accurate due to inaccurate seeking) at this point.  Their
731  * positions will be determined in stage 2.
732  *
733  * This function compares the new c_block (which has been indexed in
734  * p->sortcache) to a previous c_block.  It is called for each previous
735  * c_block.  It searches for runs of identical samples longer than
736  * MIN_WORDS_SEARCH.  Samples in matched runs are marked as verified.
737  *
738  * Subsequent stage 1 code builds verified fragments from the runs of
739  * verified samples.  These fragments are merged into the verified root
740  * in stage 2.
741  *
742  * This function returns the number of distinct runs verified in the new
743  * c_block when compared against this old c_block.
744  */
745 LOCAL long
i_iterate_stage1(p,old,new,callback)746 i_iterate_stage1(p, old, new, callback)
747 	cdrom_paranoia	*p;
748 	c_block		*old;
749 	c_block		*new;
750 	void		(*callback) __PR((long, int));
751 {
752 
753 	long		matchbegin = -1;
754 	long		matchend = -1;
755 	long		matchoffset;
756 
757 	/*
758 	 * "???: Why do we limit our search only to the samples with overlapping
759 	 * absolute positions?  It could be because it eliminates some further
760 	 * bounds checking."
761 	 *  Short answer is yes --Monty
762 	 *
763 	 * "Why do we "no longer try to spread the ... search" as mentioned
764 	 * below?"
765 	 * The search is normally much faster without the spread,
766 	 * even in heavy jitter.  Dynoverlap tends to be a bigger deal in
767 	 * stage 2. --Monty
768 	 */
769 	/*
770 	 * we no longer try to spread the stage one search area by dynoverlap
771 	 */
772 	long		searchend = min(ce(old), ce(new));
773 	long		searchbegin = max(cb(old), cb(new));
774 	long		searchsize = searchend - searchbegin;
775 	sort_info	*i = p->sortcache;
776 	long		ret = 0;
777 	long		j;
778 
779 	long		tried = 0;
780 	long		matched = 0;
781 
782 	if (searchsize <= 0)
783 		return (0);
784 
785 	/*
786 	 * match return values are in terms of the new vector, not old
787 	 * "???: Why 23?" Odd, prime number --Monty
788 	 */
789 	for (j = searchbegin; j < searchend; j += 23) {
790 		/*
791 		 * Skip past any samples verified in previous comparisons to
792 		 * other old c_blocks.  Also, obviously, don't bother verifying
793 		 * unread/unmatchable samples.
794 		 */
795 		if ((new->flags[j - cb(new)] & (FLAGS_VERIFIED|FLAGS_UNREAD)) == 0) {
796 			tried++;
797 			/*
798 			 * Starting from the sample in the old c_block with the absolute
799 			 * position j, look for a matching run in the new c_block.  This
800 			 * search will look a certain distance around j, and if successful
801 			 * will extend the matching run as far backward and forward as
802 			 * it can.
803 			 *
804 			 * The search will only return 1 if it finds a matching run long
805 			 * enough to be deemed significant.
806 			 */
807 			if (try_sort_sync(p, i, new->flags, old, j, &matchbegin, &matchend, &matchoffset,
808 					callback) == 1) {
809 
810 				matched += matchend - matchbegin;
811 
812 				/*
813 				 * purely cosmetic: if we're matching zeros,
814 				 * don't use the callback because they will
815 				 * appear to be all skewed
816 				 */
817 				{
818 					long		jj = matchbegin - cb(old);
819 					long		end = matchend - cb(old);
820 
821 					for (; jj < end; jj++)
822 						if (cv(old)[jj] != 0)
823 							break;
824 
825 					/*
826 					 * Mark the matched samples in both c_blocks as verified.
827 					 * In reality, not all the samples are marked.  See
828 					 * stage1_matched() for details.
829 					 */
830 					if (jj < end) {
831 						stage1_matched(old, new, matchbegin, matchend, matchoffset, callback);
832 					} else {
833 						stage1_matched(old, new, matchbegin, matchend, matchoffset, CB_NULL);
834 					}
835 				}
836 				ret++;
837 
838 				/*
839 				 * Skip past this verified run to look for more matches.
840 				 */
841 				if (matchend - 1 > j)
842 					j = matchend - 1;
843 			}
844 		}
845 	}
846 #ifdef NOISY
847 	fprintf(stderr, "iterate_stage1: search area=%ld[%ld-%ld] tried=%ld matched=%ld spans=%ld\n",
848 		searchsize, searchbegin, searchend, tried, matched, ret);
849 #endif
850 
851 	return (ret);
852 }
853 
854 /*
855  * i_stage1() (internal)
856  *
857  * Compare newly read samples against previously read samples, searching
858  * for contiguous runs of identical samples.  Matching runs indicate that
859  * at least two reads of the CD returned identical data, with no dropped
860  * samples in that run.  The runs may be jittered (i.e. their absolute
861  * positions on the CD may not be accurate due to inaccurate seeking) at
862  * this point.  Their positions will be determined in stage 2.
863  *
864  * This function compares a new c_block against all other c_blocks in memory,
865  * searching for sufficiently long runs of identical samples.  Since each
866  * c_block represents a separate call to read_c_block, this ensures that
867  * multiple reads have returned identical data.  (Additionally, read_c_block
868  * varies the reads so that multiple reads are unlikely to produce identical
869  * errors, so any matches between reads are considered verified.  See
870  * i_read_c_block for more details.)
871  *
872  * Each time we find such a  run (longer than MIN_WORDS_SEARCH), we mark
873  * the samples as "verified" in both c_blocks.  Runs of verified samples in
874  * the new c_block are promoted into verified fragments, which will later
875  * be merged into the verified root in stage 2.
876  *
877  * In reality, not all the verified samples are marked as "verified."
878  * See stage1_matched() for an explanation.
879  *
880  * This function returns the number of verified fragments created by the
881  * stage 1 matching.
882  */
883 LOCAL long
i_stage1(p,new,callback)884 i_stage1(p, new, callback)
885 	cdrom_paranoia	*p;
886 	c_block		*new;
887 	void		(*callback) __PR((long, int));
888 {
889 
890 	long		size = cs(new);
891 	c_block		*ptr = c_last(p);
892 	int		ret = 0;
893 	long		begin = 0;
894 	long		end;
895 
896 	/*
897 	 * We're going to be comparing the new c_block against the other
898 	 * c_blocks in memory.  Initialize the "sort cache" index to allow
899 	 * for fast searching through the new c_block.  (The index will
900 	 * actually be built the first time we search.)
901 	 */
902 	if (ptr)
903 		sort_setup(p->sortcache, cv(new), &cb(new), cs(new),
904 			cb(new), ce(new));
905 
906 	/*
907 	 * Iterate from oldest to newest c_block, comparing the new c_block
908 	 * to each, looking for a sufficiently long run of identical samples
909 	 * (longer than MIN_WORDS_SEARCH), which will be marked as "verified"
910 	 * in both c_blocks.
911 	 *
912 	 * Since the new c_block is already in the list (at the head), don't
913 	 * compare it against itself.
914 	 */
915 	while (ptr && ptr != new) {
916 
917 		if (callback)
918 			(*callback) (cb(new), PARANOIA_CB_VERIFY);
919 		i_iterate_stage1(p, ptr, new, callback);
920 
921 		ptr = c_prev(ptr);
922 	}
923 
924 	/*
925 	 * parse the verified areas of new into v_fragments
926 	 *
927 	 * Find each run of contiguous verified samples in the new c_block
928 	 * and create a verified fragment from each run.
929 	 */
930 	begin = 0;
931 	while (begin < size) {
932 		for (; begin < size; begin++)
933 			if (new->flags[begin] & FLAGS_VERIFIED)
934 				break;
935 		for (end = begin; end < size; end++)
936 			if ((new->flags[end] & FLAGS_VERIFIED) == 0)
937 				break;
938 		if (begin >= size)
939 			break;
940 
941 		ret++;
942 
943 		/*
944 		 * We create a new verified fragment from the contiguous run
945 		 * of verified samples.
946 		 *
947 		 * We expand the "verified" range by OVERLAP_ADJ on each side
948 		 * to compensate for trimming done to the verified range by
949 		 * stage1_matched().  The samples were actually verified, and
950 		 * hence belong in the verified fragment.  See stage1_matched()
951 		 * for an explanation of the trimming.
952 		 */
953 		new_v_fragment(p, new, cb(new) + max(0, begin - OVERLAP_ADJ),
954 			cb(new) + min(size, end + OVERLAP_ADJ),
955 			(end + OVERLAP_ADJ >= size && new->lastsector));
956 
957 		begin = end;
958 	}
959 
960 	/*
961 	 * Return the number of distinct verified fragments we found with
962 	 * stage 1 matching.
963 	 */
964 	return (ret);
965 }
966 
967 /*
968  * STAGE 2 MATCHING
969  */
970 /*
971  * reconcile v_fragments to root buffer.  Free if matched, fragment/fixup root
972  * if necessary
973  *
974  * do *not* match using zero posts
975  */
976 /*
977  * i_iterate_stage2 (internal)
978  *
979  * This function searches for a sufficiently long run of identical samples
980  * between the passed verified fragment and the verified root.  The search
981  * is similar to that performed by i_iterate_stage1.  Of course, what we do
982  * as a result of a match is different.
983  *
984  * Our search is slightly different in that we refuse to match silence to
985  * silence.  All silence looks alike, and it would result in too many false
986  * positives here, so we handle silence separately.
987  *
988  * Also, because we're trying to determine whether this fragment as a whole
989  * overlaps with the root at all, we narrow our search (since it should match
990  * immediately or not at all).  This is in contrast to stage 1, where we
991  * search the entire vector looking for all possible matches.
992  *
993  * This function returns 0 if no match was found (including failure to find
994  * one due to silence), or 1 if we found a match.
995  *
996  * When a match is found, the sync_result_t is set to the boundaries of
997  * matching run (begin/end, in terms of the root) and how far out of sync
998  * the fragment is from the canonical root (offset).  Note that this offset
999  * is opposite in sign from the notion of offset used by try_sort_sync()
1000  * and stage 1 generally.
1001  */
1002 LOCAL long
i_iterate_stage2(p,v,r,callback)1003 i_iterate_stage2(p, v, r, callback)
1004 	cdrom_paranoia	*p;
1005 	v_fragment	*v;
1006 	sync_result	*r;
1007 	void		(*callback) __PR((long, int));
1008 {
1009 	root_block	*root = &(p->root);
1010 	long		matchbegin = -1;
1011 	long		matchend = -1;
1012 	long		offset;
1013 	long		fbv;
1014 	long		fev;
1015 
1016 #ifdef NOISY
1017 	fprintf(stderr, "Stage 2 search: fbv=%ld fev=%ld\n", fb(v), fe(v));
1018 #endif
1019 
1020 	/*
1021 	 * Quickly check whether there could possibly be any overlap between
1022 	 * the verified fragment and the root.  Our search will allow up to
1023 	 * (p->dynoverlap) jitter between the two, so we expand the fragment
1024 	 * search area by p->dynoverlap on both sides and see if that expanded
1025 	 * area overlaps with the root.
1026 	 *
1027 	 * We could just as easily expand root's boundaries by p->dynoverlap
1028 	 * instead and achieve the same result.
1029 	 */
1030 	if (min(fe(v) + p->dynoverlap, re(root)) -
1031 		max(fb(v) - p->dynoverlap, rb(root)) <= 0)
1032 		return (0);
1033 
1034 	if (callback)
1035 		(*callback) (fb(v), PARANOIA_CB_VERIFY);
1036 
1037 	/*
1038 	 * We're going to try to match the fragment to the root while allowing
1039 	 * for p->dynoverlap jitter, so we'll actually be looking at samples
1040 	 * in the fragment whose position claims to be up to p->dynoverlap
1041 	 * outside the boundaries of the root.  But, of course, don't extend
1042 	 * past the edges of the fragment.
1043 	 */
1044 	fbv = max(fb(v), rb(root) - p->dynoverlap);
1045 
1046 	/*
1047 	 * Skip past leading zeroes in the fragment, and bail if there's nothing
1048 	 * but silence.  We handle silence later separately.
1049 	 */
1050 	while (fbv < fe(v) && fv(v)[fbv - fb(v)] == 0)
1051 		fbv++;
1052 	if (fbv == fe(v))
1053 		return (0);
1054 
1055 	/*
1056 	 * This is basically the same idea as the initial calculation for fbv
1057 	 * above.  Look at samples up to p->dynoverlap outside the boundaries
1058 	 * of the root, but don't extend past the edges of the fragment.
1059 	 *
1060 	 * However, we also limit the search to no more than 256 samples.
1061 	 * Unlike stage 1, we're not trying to find all possible matches within
1062 	 * two runs -- rather, we're trying to see if the fragment as a whole
1063 	 * overlaps with the root.  If we can't find a match within 256 samples,
1064 	 * there's probably no match to be found (because this fragment doesn't
1065 	 * overlap with the root).
1066 	 *
1067 	 * "??? Is this why?  Why 256?" 256 is simply a 'large enough number'. --Monty
1068 	 */
1069 	fev = min(min(fbv + 256, re(root) + p->dynoverlap), fe(v));
1070 
1071 	{
1072 		/*
1073 		 * Because we'll allow for up to (p->dynoverlap) jitter between the
1074 		 * fragment and the root, we expand the search area (fbv to fev) by
1075 		 * p->dynoverlap on both sides.  But, because we're iterating through
1076 		 * root, we need to constrain the search area not to extend beyond
1077 		 * the root's boundaries.
1078 		 */
1079 		long		searchend = min(fev + p->dynoverlap, re(root));
1080 		long		searchbegin = max(fbv - p->dynoverlap, rb(root));
1081 		sort_info	*i = p->sortcache;
1082 		long		j;
1083 
1084 		/*
1085 		 * Initialize the "sort cache" index to allow for fast searching
1086 		 * through the verified fragment between (fbv,fev).  (The index will
1087 		 * actually be built the first time we search.)
1088 		 */
1089 		sort_setup(i, fv(v), &fb(v), fs(v), fbv, fev);
1090 		for (j = searchbegin; j < searchend; j += 23) {
1091 			/*
1092 			 * Skip past silence in the root.  If there are just a few silent
1093 			 * samples, the effect is minimal.  The real reason we need this is
1094 			 * for large regions of silence.  All silence looks alike, so you
1095 			 * could false-positive "match" two runs of silence that are either
1096 			 * unrelated or ought to be jittered, and try_sort_sync can't
1097 			 * accurately determine jitter (offset) from silence.
1098 			 *
1099 			 * Therefore, we want to post on a non-zero sample.  If there's
1100 			 * nothing but silence left in the root, bail.  We don't want
1101 			 * to match it here.
1102 			 */
1103 			while (j < searchend && rv(root)[j - rb(root)] == 0)
1104 				j++;
1105 			if (j == searchend)
1106 				break;
1107 
1108 			/*
1109 			 * Starting from the (non-zero) sample in the root with the absolute
1110 			 * position j, look for a matching run in the verified fragment.  This
1111 			 * search will look a certain distance around j, and if successful
1112 			 * will extend the matching run as far backward and forward as
1113 			 * it can.
1114 			 *
1115 			 * The search will only return 1 if it finds a matching run long
1116 			 * enough to be deemed significant.  Note that the search is limited
1117 			 * by the boundaries given to sort_setup() above.
1118 			 *
1119 			 * Note also that flags aren't used in stage 2 (since neither verified
1120 			 * fragments nor the root have them).
1121 			 */
1122 			if (try_sort_sync(p, i, (Uchar *)0, rc(root), j,
1123 					&matchbegin, &matchend, &offset, callback)) {
1124 
1125 				/*
1126 				 * If we found a matching run, we return the results of our match.
1127 				 *
1128 				 * Note that we flip the sign of (offset) because try_sort_sync()
1129 				 * returns it in terms of the fragment (i.e. what we add
1130 				 * to the fragment's position to yield the corresponding position
1131 				 * in the root), but here we consider the root to be canonical,
1132 				 * and so our returned "offset" reflects how the fragment is offset
1133 				 * from the root.
1134 				 *
1135 				 * E.g.: If the fragment's sample 10 corresponds to root's 12,
1136 				 * try_sort_sync() would return 2.  But since root is canonical,
1137 				 * we say that the fragment is off by -2.
1138 				 */
1139 				r->begin = matchbegin;
1140 				r->end = matchend;
1141 				r->offset = -offset;
1142 				if (offset)
1143 					if (callback)
1144 						(*callback) (r->begin, PARANOIA_CB_FIXUP_EDGE);
1145 				return (1);
1146 			}
1147 		}
1148 	}
1149 	return (0);
1150 }
1151 
1152 /*
1153  * i_silence_test() (internal)
1154  *
1155  * If the entire root is silent, or there's enough trailing silence
1156  * to be significant (MIN_SILENCE_BOUNDARY samples), mark the beginning
1157  * of the silence and "light" the silence flag.  This flag will remain lit
1158  * until i_silence_match() appends some non-silent samples to the root.
1159  *
1160  * We do this because if there's a long enough span of silence, we can't
1161  * reliably detect jitter or dropped samples within that span.  See
1162  * i_silence_match() for details on how we recover from this situation.
1163  */
1164 LOCAL void
i_silence_test(root)1165 i_silence_test(root)
1166 	root_block	*root;
1167 {
1168 	Int16_t		*vec = rv(root);
1169 	long		end = re(root) - rb(root) - 1;
1170 	long		j;
1171 
1172 	/*
1173 	 * Look backward from the end of the root to find the first non-silent
1174 	 * sample.
1175 	 */
1176 	for (j = end - 1; j >= 0; j--)
1177 		if (vec[j] != 0)
1178 			break;
1179 
1180 	/*
1181 	 * If the entire root is silent, or there's enough trailing silence
1182 	 * to be significant, mark the beginning of the silence and "light"
1183 	 * the silence flag.
1184 	 */
1185 	if (j < 0 || end - j > MIN_SILENCE_BOUNDARY) {
1186 		if (j < 0)
1187 			j = 0;
1188 		root->silenceflag = 1;
1189 		root->silencebegin = rb(root) + j;
1190 		if (root->silencebegin < root->returnedlimit)
1191 			root->silencebegin = root->returnedlimit;
1192 	}
1193 }
1194 
1195 /*
1196  * i_silence_match() (internal)
1197  *
1198  * This function is merges verified fragments into the verified root in cases
1199  * where there is a problematic amount of silence (MIN_SILENCE_BOUNDARY
1200  * samples) at the end of the root.
1201  *
1202  * We need a special approach because if there's a long enough span of
1203  * silence, we can't reliably detect jitter or dropped samples within that
1204  * span (since all silence looks alike).
1205  *
1206  * Only fragments that begin with MIN_SILENCE_BOUNDARY samples are eligible
1207  * to be merged in this case.  Fragments that are too far beyond the edge
1208  * of the root to possibly overlap are also disregarded.
1209  *
1210  * Our first approach is to assume that such fragments have no jitter (since
1211  * we can't establish otherwise) and merge them.  However, if it's clear
1212  * that there must be jitter (i.e. because non-silent samples overlap when
1213  * we assume no jitter), we assume the fragment has the minimum possible
1214  * jitter and then merge it.
1215  *
1216  * This function extends silence fairly aggressively, so it must be called
1217  * with fragments in ascending order (beginning position) in case there are
1218  * small non-silent regions within the silence.
1219  */
1220 LOCAL long
i_silence_match(root,v,callback)1221 i_silence_match(root, v, callback)
1222 	root_block	*root;
1223 	v_fragment	*v;
1224 	void		(*callback) __PR((long, int));
1225 {
1226 
1227 	cdrom_paranoia	*p = v->p;
1228 	Int16_t	*vec = fv(v);
1229 	long		end = fs(v);
1230 	long		begin;
1231 	long		j;
1232 
1233 	/*
1234 	 * See how much leading silence this fragment has.  If there are fewer than
1235 	 * MIN_SILENCE_BOUNDARY leading silent samples, we don't do this special
1236 	 * silence matching.
1237 	 *
1238 	 * This fragment could actually belong here, but we can't be sure unless
1239 	 * it has enough silence on its leading edge.  This fragment will likely
1240 	 * stick around until we do successfully extend the root, at which point
1241 	 * it will be merged using the usual method.
1242 	 */
1243 	if (end < MIN_SILENCE_BOUNDARY)
1244 		return (0);
1245 	for (j = 0; j < end; j++)
1246 		if (vec[j] != 0)
1247 			break;
1248 	if (j < MIN_SILENCE_BOUNDARY)
1249 		return (0);
1250 
1251 	/*
1252 	 * Convert the offset of the first non-silent sample to an absolute
1253 	 * position.  For the time being, we will assume that this position
1254 	 * is accurate, with no jitter.
1255 	 */
1256 	j += fb(v);
1257 
1258 	/*
1259 	 * If this fragment is ahead of the root, see if that could just be due
1260 	 * to jitter (if it's within p->dynoverlap samples of the end of root).
1261 	 */
1262 	if (fb(v) >= re(root) && fb(v) - p->dynoverlap < re(root)) {
1263 		/*
1264 		 * This fragment is within jitter range of the root, so we extend the
1265 		 * root's silence so that it overlaps with this fragment.  At this point
1266 		 * we know that the fragment has at least MIN_SILENCE_BOUNDARY silent
1267 		 * samples at the beginning, so we overlap by that amount.
1268 		 *
1269 		 * XXX dynarrays are not needed here.
1270 		 */
1271 		long		addto = fb(v) + MIN_SILENCE_BOUNDARY - re(root);
1272 #ifdef	HAVE_DYN_ARRAYS
1273 		Int16_t		avec[addto];
1274 #else
1275 #ifdef	HAVE_ALLOCA
1276 		Int16_t		*avec = alloca(addto * sizeof (Int16_t));
1277 #else
1278 		Int16_t		*avec = _pmalloc(addto * sizeof (Int16_t));
1279 
1280 		DBG_MALLOC_MARK(avec);
1281 #endif
1282 #endif
1283 
1284 		memset(avec, 0, addto * sizeof (Int16_t));
1285 		c_append(rc(root), avec, addto);
1286 #ifndef	HAVE_DYN_ARRAYS
1287 #ifndef	HAVE_ALLOCA
1288 		_pfree(avec);
1289 #endif
1290 #endif
1291 	}
1292 
1293 	/*
1294 	 * Calculate the overlap of the root's trailing silence and the fragment's
1295 	 * leading silence.  (begin,end) are the boundaries of that overlap.
1296 	 */
1297 	begin = max(fb(v), root->silencebegin);
1298 	end = min(j, re(root));
1299 
1300 	/*
1301 	 * If there is an overlap, we assume that both the root and the fragment
1302 	 * are jitter-free (since there's no way for us to tell otherwise).
1303 	 */
1304 	if (begin < end) {
1305 		/*
1306 		 * If the fragment will extend the root, then we append it to the root.
1307 		 * Otherwise, no merging is necessary, as the fragment should already
1308 		 * be contained within the root.
1309 		 */
1310 		if (fe(v) > re(root)) {
1311 			long		voff = begin - fb(v);
1312 
1313 			/*
1314 			 * Truncate the overlapping silence from the end of the root.
1315 			 */
1316 			c_remove(rc(root), begin - rb(root), -1);
1317 			/*
1318 			 * Append the fragment to the root, starting from the point of overlap.
1319 			 */
1320 			c_append(rc(root), vec + voff, fs(v) - voff);
1321 		}
1322 		/*
1323 		 * Record the fact that we merged this fragment assuming zero jitter.
1324 		 */
1325 		offset_add_value(p, &p->stage2, 0, callback);
1326 
1327 	} else {
1328 		/*
1329 		 * We weren't able to merge the fragment assuming zero jitter.
1330 		 *
1331 		 * Check whether the fragment's leading silence ends before the root's
1332 		 * trailing silence begins.  If it does, we assume that the root is
1333 		 * jittered forward.
1334 		 */
1335 		if (j < begin) {
1336 			/*
1337 			 * We're going to append the non-silent samples of the fragment
1338 			 * to the root where its silence begins.
1339 			 *
1340 			 * "??? This seems to be a very strange approach.  At this point
1341 			 *  the root has a lot of trailing silence, and the fragment has
1342 			 *  the lot of leading silence.  This merge will drop the silence
1343 			 *  and just splice the non-silence together.
1344 			 *
1345 			 *  In theory, rift analysis will either confirm or fix this result.
1346 			 *  What circumstances motivated this approach?"
1347 			 *
1348 			 * This is an 'all bets are off' situation and we choose to make
1349 			 * the best guess we can, based on absolute position being
1350 			 * returned by the most recent reads.  There are drives that
1351 			 * will randomly lose what they're doing during a read and just
1352 			 * pad out the results with zeros and return no error.  This at
1353 			 * least has a shot of addressing that situation. --Monty
1354 			 *
1355 			 * Compute the amount of silence at the beginning of the fragment.
1356 			 */
1357 			long		voff = j - fb(v);
1358 
1359 			/*
1360 			 * If attaching the non-silent tail of the fragment to the end
1361 			 * of the non-silent portion of the root will extend the root,
1362 			 * then we'll append the samples to the root.  Otherwise, no
1363 			 * merging is necessary, as the fragment should already be contained
1364 			 * within the root.
1365 			 */
1366 			if (begin + fs(v) - voff > re(root)) {
1367 				/*
1368 				 * Truncate the trailing silence from the root.
1369 				 */
1370 				c_remove(rc(root), root->silencebegin - rb(root), -1);
1371 				/*
1372 				 * Append the non-silent tail of the fragment to the root.
1373 				 */
1374 				c_append(rc(root), vec + voff, fs(v) - voff);
1375 			}
1376 			/*
1377 			 * Record the fact that we merged this fragment assuming (end-begin)
1378 			 * jitter.
1379 			 */
1380 			offset_add_value(p, &p->stage2, end - begin, callback);
1381 		} else {
1382 			/*
1383 			 * We only get here if the fragment is past the end of the root,
1384 			 * which means it must be farther than (dynoverlap) away, due to our
1385 			 * root extension above.
1386 			 *
1387 			 * We weren't able to merge this fragment into the root after all.
1388 			 */
1389 			return (0);
1390 		}
1391 	}
1392 
1393 	/*
1394 	 * We only get here if we merged the fragment into the root.  Update
1395 	 * the root's silence flag.
1396 	 *
1397 	 * Note that this is the only place silenceflag is reset.  In other words,
1398 	 * once i_silence_test() lights the silence flag, it can only be reset
1399 	 * by i_silence_match().
1400 	 */
1401 	root->silenceflag = 0;
1402 	/*
1403 	 * Now see if the new, extended root ends in silence.
1404 	 */
1405 	i_silence_test(root);
1406 
1407 	/*
1408 	 * Since we merged the fragment, we can free it now.  But first we propagate
1409 	 * its lastsector flag.
1410 	 */
1411 	if (v->lastsector)
1412 		root->lastsector = 1;
1413 	free_v_fragment(v);
1414 	return (1);
1415 }
1416 
1417 /*
1418  * i_stage2_each (internal)
1419  *
1420  * This function (which is entirely too long) attempts to merge the passed
1421  * verified fragment into the verified root.
1422  *
1423  * First this function looks for a run of identical samples between
1424  * the root and the fragment.  If it finds a long enough run, it then
1425  * checks for "rifts" (see below) and fixes the root and/or fragment as
1426  * necessary.  Finally, if the fragment will extend the tail of the root,
1427  * we merge the fragment and extend the root.
1428  *
1429  * Most of the ugliness in this function has to do with handling "rifts",
1430  * which are points of disagreement between the root and the verified
1431  * fragment.  This can happen when a drive consistently drops a few samples
1432  * or stutters and repeats a few samples.  It has to be consistent enough
1433  * to result in a verified fragment (i.e. it happens twice), but inconsistent
1434  * enough (e.g. due to the jiggled reads) not to happen every time.
1435  *
1436  * This function returns 1 if the fragment was successfully merged into the
1437  * root, and 0 if not.
1438  */
1439 LOCAL long
i_stage2_each(root,v,callback)1440 i_stage2_each(root, v, callback)
1441 	root_block	*root;
1442 	v_fragment	*v;
1443 	void		(*callback) __PR((long, int));
1444 {
1445 
1446 	cdrom_paranoia *p;
1447 	long		dynoverlap;
1448 
1449 	/*
1450 	 * If this fragment has already been merged & freed, abort.
1451 	 */
1452 	if (!v || !v->one)
1453 		return (0);
1454 
1455 	p = v->p;
1456 	/*
1457 	 * "??? Why do we round down to an even dynoverlap?" Dynoverlap is
1458 	 * in samples, not stereo frames --Monty
1459 	 */
1460 	dynoverlap = p->dynoverlap / 2 * 2;
1461 
1462 	/*
1463 	 * If there's no verified root yet, abort.
1464 	 */
1465 	if (!rv(root)) {
1466 		return (0);
1467 	} else {
1468 		sync_result	r;
1469 
1470 		/*
1471 		 * Search for a sufficiently long run of identical samples between
1472 		 * the verified fragment and the verified root.  There's a little
1473 		 * bit of subtlety in the search when silence is involved.
1474 		 */
1475 		if (i_iterate_stage2(p, v, &r, callback)) {
1476 			/*
1477 			 * Convert the results of the search to be relative to the root.
1478 			 */
1479 			long		begin = r.begin - rb(root);
1480 			long		end = r.end - rb(root);
1481 
1482 			/*
1483 			 * Convert offset into a value that will transform a relative
1484 			 * position in the root to the corresponding relative position in
1485 			 * the fragment.  I.e., if offset = -2, then the sample at relative
1486 			 * position 2 in the root is at relative position 0 in the fragment.
1487 			 *
1488 			 * While a bit opaque, this does reduce the number of calculations
1489 			 * below.
1490 			 */
1491 			long		offset = r.begin + r.offset - fb(v) - begin;
1492 			long		temp;
1493 			c_block		*l = NULL;
1494 
1495 			/*
1496 			 * we have a match! We don't rematch off rift, we chase
1497 			 * the match all the way to both extremes doing rift
1498 			 * analysis.
1499 			 */
1500 #ifdef NOISY
1501 			fprintf(stderr, "Stage 2 match\n");
1502 #endif
1503 
1504 			/*
1505 			 * Convert offset into a value that will transform a relative
1506 			 * position in the root to the corresponding relative position in
1507 			 * the fragment.  I.e., if offset = -2, then the sample at relative
1508 			 * position 2 in the root is at relative position 0 in the fragment.
1509 			 *
1510 			 * While a bit opaque, this does reduce the number of calculations
1511 			 * below.
1512 			 */
1513 			/*
1514 			 * Now that we've found a sufficiently long run of identical samples
1515 			 * between the fragment and the root, we need to check for rifts.
1516 			 *
1517 			 * A "rift", as mentioned above, is a disagreement between the
1518 			 * fragment and the root.  When there's a rift, the matching run
1519 			 * found by i_iterate_stage2() will obviously stop where the root
1520 			 * and the fragment disagree.
1521 			 *
1522 			 * So we detect rifts by checking whether the matching run extends
1523 			 * to the ends of the fragment and root.  If the run does extend to
1524 			 * the ends of the fragment and root, then all overlapping samples
1525 			 * agreed, and there's no rift.  If, however, the matching run
1526 			 * stops with samples left over in both the root and the fragment,
1527 			 * that means the root and fragment disagreed at that point.
1528 			 * Leftover samples at the beginning of the match indicate a
1529 			 * leading rift, and leftover samples at the end of the match indicate
1530 			 * a trailing rift.
1531 			 *
1532 			 * Once we detect a rift, we attempt to fix it, depending on the
1533 			 * nature of the disagreement.  See i_analyze_rift_[rf] for details
1534 			 * on how we determine what kind of rift it is.  See below for
1535 			 * how we attempt to fix the rifts.
1536 			 */
1537 
1538 			/*
1539 			 * First, check for a leading rift, fix it if possible, and then
1540 			 * extend the match forward until either we hit the limit of the
1541 			 * overlapping samples, or until we encounter another leading rift.
1542 			 * Keep doing this until we hit the beginning of the overlap.
1543 			 *
1544 			 * Note that while we do fix up leading rifts, we don't extend
1545 			 * the root backward (earlier samples) -- only forward (later
1546 			 * samples).
1547 			 */
1548 
1549 			/*
1550 			 * If the beginning of the match didn't reach the beginning of
1551 			 * either the fragment or the root, we have a leading rift to be
1552 			 * examined.
1553 			 *
1554 			 * Remember that (begin) is the offset into the root, and (begin+offset)
1555 			 * is the equivalent offset into the fragment.  If neither one is at
1556 			 * zero, then they both have samples before the match, and hence a
1557 			 * rift.
1558 			 */
1559 			while ((begin + offset > 0 && begin > 0)) {
1560 				long		matchA = 0,
1561 						matchB = 0,
1562 						matchC = 0;
1563 				/*
1564 				 * (begin) is the offset into the root of the first matching sample,
1565 				 * (beginL) is the offset into the fragment of the first matching
1566 				 * sample.  These samples are at the edge of the rift.
1567 				 */
1568 				long		beginL = begin + offset;
1569 
1570 				/*
1571 				 * The first time we encounter a leading rift, allocate a
1572 				 * scratch copy of the verified fragment which we'll use if
1573 				 * we need to fix up the fragment before merging it into
1574 				 * the root.
1575 				 */
1576 				if (l == NULL) {
1577 					Int16_t	*buff = _pmalloc(fs(v) * sizeof (Int16_t));
1578 
1579 					DBG_MALLOC_MARK(buff);
1580 					l = c_alloc(buff, fb(v), fs(v));
1581 					DBG_MALLOC_MARK(l);
1582 					memcpy(buff, fv(v), fs(v) * sizeof (Int16_t));
1583 				}
1584 
1585 				/* BEGIN CSTYLED */
1586 				/*
1587 				 * Starting at the first mismatching sample, see how far back the
1588 				 * rift goes, and determine what kind of rift it is.  Note that
1589 				 * we're searching through the fixed up copy of the fragment.
1590 				 *
1591 				 * matchA  > 0 if there are samples missing from the root
1592 				 * matchA  < 0 if there are duplicate samples (stuttering) in the root
1593 				 * matchB  > 0 if there are samples missing from the fragment
1594 				 * matchB  < 0 if there are duplicate samples in the fragment
1595 				 * matchC != 0 if there's a section of garbage, after which
1596 				 *             the fragment and root agree and are in sync
1597 				 */
1598 				/* END CSTYLED */
1599 				i_analyze_rift_r(rv(root), cv(l),
1600 					rs(root), cs(l),
1601 					begin - 1, beginL - 1,
1602 					&matchA, &matchB, &matchC);
1603 
1604 #ifdef NOISY
1605 				fprintf(stderr, "matching rootR: matchA:%ld matchB:%ld matchC:%ld\n",
1606 					matchA, matchB, matchC);
1607 #endif
1608 				/*
1609 				 * "??? The root.returnedlimit checks below are presently a mystery."
1610 				 * Those are for the case where our backtracking wants to take
1611 				 * us to back before bytes we've already returned to the
1612 				 * application.  In short, it's a "we're screwed"
1613 				 * check. --Monty
1614 				 */
1615 				if (matchA) {
1616 					/*
1617 					 * There's a problem with the root
1618 					 */
1619 					if (matchA > 0) {
1620 						/*
1621 						 * There were (matchA) samples dropped from the root.  We'll add
1622 						 * them back from the fixed up fragment.
1623 						 */
1624 						if (callback)
1625 							(*callback) (begin + rb(root) - 1, PARANOIA_CB_FIXUP_DROPPED);
1626 						if (rb(root) + begin < p->root.returnedlimit)
1627 							break;
1628 						else {
1629 							/*
1630 							 * At the edge of the rift in the root, insert the missing
1631 							 * samples from the fixed up fragment.  They're the (matchA)
1632 							 * samples immediately preceding the edge of the rift in the
1633 							 * fragment.
1634 							 */
1635 							c_insert(rc(root), begin, cv(l) + beginL - matchA,
1636 								matchA);
1637 
1638 							/*
1639 							 * We just inserted (matchA) samples into the root, so update
1640 							 * our begin/end offsets accordingly.  Also adjust the
1641 							 * (offset) to compensate (since we use it to find samples in
1642 							 * the fragment, and the fragment hasn't changed).
1643 							 */
1644 							offset -= matchA;
1645 							begin += matchA;
1646 							end += matchA;
1647 						}
1648 					} else {
1649 						/*
1650 						 * There were (-matchA) duplicate samples (stuttering) in the
1651 						 * root.  We'll drop them.
1652 						 */
1653 						if (callback)
1654 							(*callback) (begin + rb(root) - 1, PARANOIA_CB_FIXUP_DUPED);
1655 						if (rb(root) + begin + matchA < p->root.returnedlimit)
1656 							break;
1657 						else {
1658 							/*
1659 							 * Remove the (-matchA) samples immediately preceding the
1660 							 * edge of the rift in the root.
1661 							 */
1662 							c_remove(rc(root), begin + matchA, -matchA);
1663 
1664 							/*
1665 							 * We just removed (-matchA) samples from the root, so update
1666 							 * our begin/end offsets accordingly.  Also adjust the offset
1667 							 * to compensate.  Remember that matchA < 0, so we're actually
1668 							 * subtracting from begin/end.
1669 							 */
1670 							offset -= matchA;
1671 							begin += matchA;
1672 							end += matchA;
1673 						}
1674 					}
1675 				} else if (matchB) {
1676 					/*
1677 					 * There's a problem with the fragment
1678 					 */
1679 					if (matchB > 0) {
1680 						/*
1681 						 * There were (matchB) samples dropped from the fragment.  We'll
1682 						 * add them back from the root.
1683 						 */
1684 						if (callback)
1685 							(*callback) (begin + rb(root) - 1, PARANOIA_CB_FIXUP_DROPPED);
1686 
1687 						/*
1688 						 * At the edge of the rift in the fragment, insert the missing
1689 						 * samples from the root.  They're the (matchB) samples
1690 						 * immediately preceding the edge of the rift in the root.
1691 						 * Note that we're fixing up the scratch copy of the fragment.
1692 						 */
1693 						c_insert(l, beginL, rv(root) + begin - matchB,
1694 							matchB);
1695 
1696 						/*
1697 						 * We just inserted (matchB) samples into the fixed up fragment,
1698 						 * so update (offset), since we use it to find samples in the
1699 						 * fragment based on the root's unchanged offsets.
1700 						 */
1701 						offset += matchB;
1702 					} else {
1703 						/*
1704 						 * There were (-matchB) duplicate samples (stuttering) in the
1705 						 * fixed up fragment.  We'll drop them.
1706 						 */
1707 						if (callback)
1708 							(*callback) (begin + rb(root) - 1, PARANOIA_CB_FIXUP_DUPED);
1709 
1710 						/*
1711 						 * Remove the (-matchB) samples immediately preceding the edge
1712 						 * of the rift in the fixed up fragment.
1713 						 */
1714 						c_remove(l, beginL + matchB, -matchB);
1715 
1716 						/*
1717 						 * We just removed (-matchB) samples from the fixed up fragment,
1718 						 * so update (offset), since we use it to find samples in the
1719 						 * fragment based on the root's unchanged offsets.
1720 						 */
1721 						offset += matchB;
1722 					}
1723 				} else if (matchC) {
1724 					/*
1725 					 * There are (matchC) samples that simply disagree between the
1726 					 * fragment and the root.  On the other side of the mismatch, the
1727 					 * fragment and root agree again.  We can't classify the mismatch
1728 					 * as either a stutter or dropped samples, and we have no way of
1729 					 * telling whether the fragment or the root is right.
1730 					 *
1731 					 * "The original comment indicated that we set "disagree"
1732 					 * flags in the root, but it seems to be historical."  The
1733 					 * disagree flags were from a time when we did interpolation
1734 					 * over samples we simply couldn't get to agree.  Yes,
1735 					 * historical functionality that didn;t work well. --Monty
1736 					 */
1737 					if (rb(root) + begin - matchC < p->root.returnedlimit)
1738 						break;
1739 
1740 					/*
1741 					 * Overwrite the mismatching (matchC) samples in root with the
1742 					 * samples from the fixed up fragment.
1743 					 *
1744 					 * "??? Do we think the fragment is more likely correct, is this
1745 					 * just arbitrary, or is there some other reason for overwriting
1746 					 * the root?"
1747 					 *   We think these samples are more likely to be correct --Monty
1748 					 */
1749 					c_overwrite(rc(root), begin - matchC,
1750 						cv(l) + beginL - matchC, matchC);
1751 
1752 				} else {
1753 					/*
1754 					 * We may have had a mismatch because we ran into leading silence.
1755 					 *
1756 					 * "??? To be studied: why would this cause a mismatch?
1757 					 * Neither i_analyze_rift_r nor i_iterate_stage2() nor
1758 					 * i_paranoia_overlap() appear to take silence into
1759 					 * consideration in this regard.  It could be due to our
1760 					 * skipping of silence when searching for a match."  Jitter
1761 					 * and or skipping in sections of silence could end up with
1762 					 * two sets of verified vectors that agree completely except
1763 					 * for the length of the silence.  Silence is a huge bugaboo
1764 					 * in general because there's no entropy within it to base
1765 					 * verification on. --Monty
1766 					 *
1767 					 * Since we don't extend the root in that direction, we don't
1768 					 * do anything, just move on to trailing rifts.
1769 					 */
1770 					/*
1771 					 * If the rift was too complex to fix (see i_analyze_rift_r),
1772 					 * we just stop and leave the leading edge where it is.
1773 					 */
1774 					/* RRR(*callback)(post,PARANOIA_CB_XXX); */
1775 					break;
1776 				}
1777 				/*
1778 				 * Recalculate the offset of the edge of the rift in the fixed
1779 				 * up fragment, in case it changed.
1780 				 *
1781 				 * "??? Why is this done here rather than in the (matchB) case above,
1782 				 * which should be the only time beginL will change."
1783 				 * Because there's no reason not to? --Monty
1784 				 */
1785 				beginL = begin + offset;
1786 
1787 				/*
1788 				 * Now that we've fixed up the root or fragment as necessary, see
1789 				 * how far we can extend the matching run.  This function is
1790 				 * overkill, as it tries to extend the matching run in both
1791 				 * directions (and rematches what we already matched), but it works.
1792 				 */
1793 				i_paranoia_overlap(rv(root), cv(l),
1794 					begin, beginL,
1795 					rs(root), cs(l),
1796 					&begin, &end);
1797 			}
1798 
1799 			/*
1800 			 * Second, check for a trailing rift, fix it if possible, and then
1801 			 * extend the match forward until either we hit the limit of the
1802 			 * overlapping samples, or until we encounter another trailing rift.
1803 			 * Keep doing this until we hit the end of the overlap.
1804 			 *
1805 			 * If the end of the match didn't reach the end of either the fragment
1806 			 * or the root, we have a trailing rift to be examined.
1807 			 *
1808 			 * Remember that (end) is the offset into the root, and (end+offset)
1809 			 * is the equivalent offset into the fragment.  If neither one is
1810 			 * at the end of the vector, then they both have samples after the
1811 			 * match, and hence a rift.
1812 			 *
1813 			 * (temp) is the size of the (potentially fixed-up) fragment.  If
1814 			 * there was a leading rift, (l) is the fixed up fragment, and
1815 			 * (offset) is now relative to it.
1816 			 */
1817 			temp = l ? cs(l) : fs(v);
1818 			while (end + offset < temp && end < rs(root)) {
1819 				long	matchA = 0,
1820 					matchB = 0,
1821 					matchC = 0;
1822 				/*
1823 				 * (begin) is the offset into the root of the first matching sample,
1824 				 * (beginL) is the offset into the fragment of the first matching
1825 				 * sample.  We know these samples match and will use these offsets
1826 				 * later when we try to extend the matching run.
1827 				 */
1828 				long	beginL = begin + offset;
1829 				/*
1830 				 * (end) is the offset into the root of the first mismatching sample
1831 				 * after the matching run, (endL) is the offset into the fragment of
1832 				 * the equivalent sample.  These samples are at the edge of the rift.
1833 				 */
1834 				long	endL = end + offset;
1835 
1836 				/*
1837 				 * The first time we encounter a rift, allocate a scratch copy of
1838 				 * the verified fragment which we'll use if we need to fix up the
1839 				 * fragment before merging it into the root.
1840 				 *
1841 				 * Note that if there was a leading rift, we'll already have
1842 				 * this (potentially fixed-up) scratch copy allocated.
1843 				 */
1844 				if (l == NULL) {
1845 					Int16_t	*buff = _pmalloc(fs(v) * sizeof (Int16_t));
1846 
1847 					DBG_MALLOC_MARK(buff);
1848 					l = c_alloc(buff, fb(v), fs(v));
1849 					DBG_MALLOC_MARK(l);
1850 					memcpy(buff, fv(v), fs(v) * sizeof (Int16_t));
1851 				}
1852 
1853 				/* BEGIN CSTYLED */
1854 				/*
1855 				 * Starting at the first mismatching sample, see how far forward the
1856 				 * rift goes, and determine what kind of rift it is.  Note that we're
1857 				 * searching through the fixed up copy of the fragment.
1858 				 *
1859 				 * matchA  > 0 if there are samples missing from the root
1860 				 * matchA  < 0 if there are duplicate samples (stuttering) in the root
1861 				 * matchB  > 0 if there are samples missing from the fragment
1862 				 * matchB  < 0 if there are duplicate samples in the fragment
1863 				 * matchC != 0 if there's a section of garbage, after which
1864 				 *             the fragment and root agree and are in sync
1865 				 */
1866 				/* END CSTYLED */
1867 				i_analyze_rift_f(rv(root), cv(l),
1868 					rs(root), cs(l),
1869 					end, endL,
1870 					&matchA, &matchB, &matchC);
1871 
1872 #ifdef NOISY
1873 				fprintf(stderr, "matching rootF: matchA:%ld matchB:%ld matchC:%ld\n",
1874 					matchA, matchB, matchC);
1875 #endif
1876 
1877 				if (matchA) {
1878 					/*
1879 					 * There's a problem with the root
1880 					 */
1881 					if (matchA > 0) {
1882 						/*
1883 						 * There were (matchA) samples dropped from the root.  We'll add
1884 						 * them back from the fixed up fragment.
1885 						 */
1886 						if (callback)
1887 							(*callback) (end + rb(root), PARANOIA_CB_FIXUP_DROPPED);
1888 						if (end + rb(root) < p->root.returnedlimit)
1889 							break;
1890 
1891 						/*
1892 						 * At the edge of the rift in the root, insert the missing
1893 						 * samples from the fixed up fragment.  They're the (matchA)
1894 						 * samples immediately preceding the edge of the rift in the
1895 						 * fragment.
1896 						 */
1897 						c_insert(rc(root), end, cv(l) + endL, matchA);
1898 
1899 						/*
1900 						 * Although we just inserted samples into the root, we did so
1901 						 * after (begin) and (end), so we needn't update those offsets.
1902 						 */
1903 					} else {
1904 						/*
1905 						 * There were (-matchA) duplicate samples (stuttering) in the
1906 						 * root.  We'll drop them.
1907 						 */
1908 						if (callback)
1909 							(*callback) (end + rb(root), PARANOIA_CB_FIXUP_DUPED);
1910 						if (end + rb(root) < p->root.returnedlimit)
1911 							break;
1912 
1913 						/*
1914 						 * Remove the (-matchA) samples immediately following the edge
1915 						 * of the rift in the root.
1916 						 */
1917 						c_remove(rc(root), end, -matchA);
1918 
1919 						/*
1920 						 * Although we just removed samples from the root, we did so
1921 						 * after (begin) and (end), so we needn't update those offsets.
1922 						 */
1923 					}
1924 				} else if (matchB) {
1925 					/*
1926 					 * There's a problem with the fragment
1927 					 */
1928 					if (matchB > 0) {
1929 						/*
1930 						 * There were (matchB) samples dropped from the fragment.  We'll
1931 						 * add them back from the root.
1932 						 */
1933 						if (callback)
1934 							(*callback) (end + rb(root), PARANOIA_CB_FIXUP_DROPPED);
1935 
1936 						/*
1937 						 * At the edge of the rift in the fragment, insert the missing
1938 						 * samples from the root.  They're the (matchB) samples
1939 						 * immediately following the dge of the rift in the root.
1940 						 * Note that we're fixing up the scratch copy of the fragment.
1941 						 */
1942 						c_insert(l, endL, rv(root) + end, matchB);
1943 
1944 						/*
1945 						 * Although we just inserted samples into the fragment, we did so
1946 						 * after (begin) and (end), so (offset) hasn't changed either.
1947 						 */
1948 					} else {
1949 						/*
1950 						 * There were (-matchB) duplicate samples (stuttering) in thea
1951 						 * fixed up fragment.  We'll drop them.
1952 						 */
1953 						if (callback)
1954 							(*callback) (end + rb(root), PARANOIA_CB_FIXUP_DUPED);
1955 
1956 						/*
1957 						 * Remove the (-matchB) samples immediately following the edge
1958 						 * of the rift in the fixed up fragment.
1959 						 */
1960 						c_remove(l, endL, -matchB);
1961 
1962 						/*
1963 						 * Although we just removed samples from the fragment, we did so
1964 						 * after (begin) and (end), so (offset) hasn't changed either.
1965 						 */
1966 					}
1967 				} else if (matchC) {
1968 					/*
1969 					 * There are (matchC) samples that simply disagree between the
1970 					 * fragment and the root.  On the other side of the mismatch, the
1971 					 * fragment and root agree again.  We can't classify the mismatch
1972 					 * as either a stutter or dropped samples, and we have no way of
1973 					 * telling whether the fragment or the root is right.
1974 					 *
1975 					 * The original comment indicated that we set "disagree" flags
1976 					 * in the root, but it seems to be historical.
1977 					 */
1978 					if (end + rb(root) < p->root.returnedlimit)
1979 						break;
1980 
1981 					/*
1982 					 * Overwrite the mismatching (matchC) samples in root with the
1983 					 * samples from the fixed up fragment.
1984 					 */
1985 					c_overwrite(rc(root), end, cv(l) + endL, matchC);
1986 				} else {
1987 					/*
1988 					 * We may have had a mismatch because we ran into trailing silence.
1989 					 *
1990 					 * At this point we have a trailing rift.  We check whether
1991 					 * one of the vectors (fragment or root) has trailing silence.
1992 					 */
1993 					analyze_rift_silence_f(rv(root), cv(l),
1994 						rs(root), cs(l),
1995 						end, endL,
1996 						&matchA, &matchB);
1997 					if (matchA) {
1998 						/*
1999 						 * The contents of the root's trailing rift are silence.  The
2000 						 * fragment's are not (otherwise there wouldn't be a rift).
2001 						 * We therefore assume that the root has garbage from this
2002 						 * point forward and truncate it.
2003 						 *
2004 						 * This will have the effect of eliminating the trailing
2005 						 * rift, causing the fragment's samples to be appended to
2006 						 * the root.
2007 						 *
2008 						 * Can only do this if we haven't
2009 						 * already returned data
2010 						 */
2011 						if (end + rb(root) >= p->root.returnedlimit) {
2012 							c_remove(rc(root), end, -1);
2013 						}
2014 					} else if (matchB) {
2015 						/*
2016 						 * The contents of the fragment's trailing rift are silence.
2017 						 * The root's are not (otherwise there wouldn't be a rift).
2018 						 * We therefore assume that the fragment has garbage from this
2019 						 * point forward.
2020 						 *
2021 						 * We needn't actually truncate the fragment, because the root
2022 						 * has already been fixed up from this fragment as much as
2023 						 * possible, and the truncated fragment wouldn't extend the
2024 						 * root.  Therefore, we can consider this (truncated) fragment
2025 						 * to be already merged into the root.  So we dispose of it and
2026 						 * return a success.
2027 						 */
2028 						if (l)
2029 							i_cblock_destructor(l);
2030 						free_v_fragment(v);
2031 						return (1);
2032 
2033 					} else {
2034 						/*
2035 						 * If the rift was too complex to fix (see i_analyze_rift_f),
2036 						 * we just stop and leave the trailing edge where it is.
2037 						 */
2038 						/* RRR(*callback)(post,PARANOIA_CB_XXX); */
2039 					}
2040 					break;
2041 				}
2042 
2043 				/*
2044 				 * Now that we've fixed up the root or fragment as necessary, see
2045 				 * how far we can extend the matching run.  This function is
2046 				 * overkill, as it tries to extend the matching run in both
2047 				 * directions (and rematches what we already matched), but it works.
2048 				 */
2049 				i_paranoia_overlap(rv(root), cv(l),
2050 					begin, beginL,
2051 					rs(root), cs(l),
2052 					(long *)0, &end);
2053 			}
2054 
2055 			/*
2056 			 * Third and finally, if the overlapping verified fragment extends
2057 			 * our range forward (later samples), we append ("glom") the new
2058 			 * samples to the end of the root.
2059 			 *
2060 			 * Note that while we did fix up leading rifts, we don't extend
2061 			 * the root backward (earlier samples) -- only forward (later
2062 			 * samples).
2063 			 *
2064 			 * This is generally fine, since the verified root is supposed to
2065 			 * slide from earlier samples to later samples across multiple calls
2066 			 * to paranoia_read().
2067 			 *
2068 			 * "??? But, is this actually right?  Because of this, we don't
2069 			 * extend the root to hold the earliest read sample, if we
2070 			 * happened to initialize the root with a later sample due to
2071 			 * jitter.  There are probably some ugly side effects from
2072 			 * extending the root backward, in the general case, but it may
2073 			 * not be so dire if we're near sample 0.  To be investigated."
2074 			 * In the begin case, any start position is arbitrary due to
2075 			 * inexact seeking.  Later, we can't back-extend the root as the
2076 			 * samples preceeding the beginning have already been returned
2077 			 * to the application! --Monty
2078 			 */
2079 			{
2080 				long	sizeA = rs(root);
2081 				long	sizeB;
2082 				long	vecbegin;
2083 				Int16_t	*vector;
2084 
2085 				/*
2086 				 * If there were any rifts, we'll use the fixed up fragment (l),
2087 				 * otherwise, we use the original fragment (v).
2088 				 */
2089 				if (l) {
2090 					sizeB = cs(l);
2091 					vector = cv(l);
2092 					vecbegin = cb(l);
2093 				} else {
2094 					sizeB = fs(v);
2095 					vector = fv(v);
2096 					vecbegin = fb(v);
2097 				}
2098 
2099 				/*
2100 				 * Convert the fragment-relative offset (sizeB) into an offset
2101 				 * relative to the root (A), and see if the offset is past the
2102 				 * end of the root (> sizeA).  If it is, this fragment will extend
2103 				 * our root.
2104 				 *
2105 				 * "??? Why do we check for v->lastsector separately?" Because
2106 				 * of the case where root extends *too* far; if we never get a
2107 				 * read that accidentally extends that far again, we could
2108 				 * hang and loop forever. --Monty
2109 				 */
2110 				if (sizeB - offset > sizeA || v->lastsector) {
2111 					if (v->lastsector) {
2112 						root->lastsector = 1;
2113 					}
2114 
2115 					/*
2116 					 * "??? Why would end be < sizeA? Why do we truncate root?"
2117 					 * Because it can happen (seeking is very very inexact) and
2118 					 * end of disk tends to be very problematic in terms of
2119 					 * stopping point.  We also generally believe more recent
2120 					 * information over previous information when they disagree
2121 					 * and both are 'verified'. --Monty
2122 					 */
2123 					if (end < sizeA)
2124 						c_remove(rc(root), end, -1);
2125 
2126 					/*
2127 					 * Extend the root with the samples from the end of the
2128 					 * (potentially fixed up) fragment.
2129 					 */
2130 					if (sizeB - offset - end)
2131 						c_append(rc(root), vector + end + offset,
2132 							sizeB - offset - end);
2133 
2134 					/*
2135 					 * Any time we update the root we need to check whether it ends
2136 					 * with a large span of silence.
2137 					 */
2138 					i_silence_test(root);
2139 
2140 					/*
2141 					 * Add the dynoverlap offset into our stage 2 statistics.
2142 					 *
2143 					 * Note that we convert our peculiar offset (which is in terms of
2144 					 * the relative positions of samples within each vector) back into
2145 					 * the actual offset between what A considers sample N and what B
2146 					 * considers sample N.
2147 					 *
2148 					 * We do this at the end of rift handling because any original
2149 					 * offset returned by i_iterate_stage2() might have been due to
2150 					 * dropped or duplicated samples.  Once we've fixed up the root
2151 					 * and the fragment, we have an offset which more reliably
2152 					 * indicates jitter.
2153 					 */
2154 					offset_add_value(p, &p->stage2, offset + vecbegin - rb(root), callback);
2155 				}
2156 			}
2157 			if (l)
2158 				i_cblock_destructor(l);
2159 			free_v_fragment(v);
2160 			return (1);
2161 
2162 		} else {
2163 			/*
2164 			 * We were unable to merge this fragment into the root.
2165 			 *
2166 			 * Check whether the fragment should have overlapped with the root,
2167 			 * even taking possible jitter into account.  (I.e., If the fragment
2168 			 * ends so far before the end of the root that even (dynoverlap)
2169 			 * samples of jitter couldn't push it beyond the end of the root,
2170 			 * it should have overlapped.)
2171 			 *
2172 			 * It is, however, possible that we failed to match using the normal
2173 			 * tests because we're dealing with silence, which we handle
2174 			 * separately.
2175 			 *
2176 			 * If the fragment should have overlapped, and we're not dealing
2177 			 * with the special silence case, we don't know what to make of
2178 			 * this fragment, and we just discard it.
2179 			 */
2180 			if (fe(v) + dynoverlap < re(root) && !root->silenceflag) {
2181 				/*
2182 				 * It *should* have matched.  No good; free it.
2183 				 */
2184 				free_v_fragment(v);
2185 			}
2186 			/*
2187 			 * otherwise, we likely want this for an upcoming match
2188 			 * we don't free the sort info (if it was collected)
2189 			 */
2190 			return (0);
2191 
2192 		}
2193 	}
2194 }
2195 
2196 LOCAL int
i_init_root(root,v,begin,callback)2197 i_init_root(root, v, begin, callback)
2198 	root_block	*root;
2199 	v_fragment	*v;
2200 	long		begin;
2201 	void		(*callback) __PR((long, int));
2202 {
2203 	if (fb(v) <= begin && fe(v) > begin) {
2204 
2205 		root->lastsector = v->lastsector;
2206 		root->returnedlimit = begin;
2207 
2208 		if (rv(root)) {
2209 			i_cblock_destructor(rc(root));
2210 			rc(root) = NULL;
2211 		}
2212 		{
2213 			Int16_t		*buff = _pmalloc(fs(v) * sizeof (Int16_t));
2214 
2215 			DBG_MALLOC_MARK(buff);
2216 			memcpy(buff, fv(v), fs(v) * sizeof (Int16_t));
2217 			root->vector = c_alloc(buff, fb(v), fs(v));
2218 			DBG_MALLOC_MARK(root->vector);
2219 		}
2220 
2221 		/*
2222 		 * Check whether the new root has a long span of trailing silence.
2223 		 */
2224 		i_silence_test(root);
2225 
2226 		return (1);
2227 	} else
2228 		return (0);
2229 }
2230 
2231 LOCAL int
vsort(a,b)2232 vsort(a, b)
2233 	const void	*a;
2234 	const void	*b;
2235 {
2236 	return ((*(v_fragment **) a)->begin - (*(v_fragment **) b)->begin);
2237 }
2238 
2239 /*
2240  * i_stage2 (internal)
2241  *
2242  * This function attempts to extend the verified root by merging verified
2243  * fragments into it.  It keeps extending the tail end of the root until
2244  * it runs out of matching fragments.  See i_stage2_each (and
2245  * i_iterate_stage2) for details of fragment matching and merging.
2246  *
2247  * This function is called by paranoia_read_limited when the verified root
2248  * doesn't contain sufficient data to satisfy the request for samples.
2249  * If this function fails to extend the verified root far enough (having
2250  * exhausted the currently available verified fragments), the caller
2251  * will then read the device again to try and establish more verified
2252  * fragments.
2253  *
2254  * We first try to merge all the fragments in ascending order using the
2255  * standard method (i_stage2_each()), and then we try to merge the
2256  * remaining fragments using silence matching (i_silence_match())
2257  * if the root has a long span of trailing silence.  See the initial
2258  * comments on silence and  i_silence_match() for an explanation of this
2259  * distinction.
2260  *
2261  * This function returns the number of verified fragments successfully
2262  * merged into the verified root.
2263  */
2264 LOCAL int
i_stage2(p,beginword,endword,callback)2265 i_stage2(p, beginword, endword, callback)
2266 	cdrom_paranoia	*p;
2267 	long		beginword;
2268 	long		endword;
2269 	void		(*callback) __PR((long, int));
2270 {
2271 
2272 	int		flag = 1;
2273 	int		ret = 0;
2274 	root_block	*root = &(p->root);
2275 
2276 #ifdef NOISY
2277 	fprintf(stderr, "Fragments:%ld\n", p->fragments->active);
2278 	fflush(stderr);
2279 #endif
2280 
2281 	/*
2282 	 * even when the 'silence flag' is lit, we try to do non-silence
2283 	 * matching in the event that there are still audio vectors with
2284 	 * content to be sunk before the silence
2285 	 */
2286 
2287 	/*
2288 	 * This flag is not the silence flag.  Rather, it indicates whether
2289 	 * we succeeded in adding a verified fragment to the verified root.
2290 	 * In short, we keep adding fragments until we no longer find a
2291 	 * match.
2292 	 */
2293 	while (flag) {
2294 		/*
2295 		 * Convert the linked list of verified fragments into an array,
2296 		 * to be sorted in order of beginning sample position
2297 		 */
2298 		v_fragment	*first = v_first(p);
2299 		long		active = p->fragments->active,
2300 				count = 0;
2301 #ifdef	HAVE_DYN_ARRAYS
2302 		v_fragment	*list[active];
2303 #else
2304 		v_fragment	**list = _pmalloc(active * sizeof (v_fragment *));
2305 
2306 		DBG_MALLOC_MARK(list);
2307 #endif
2308 
2309 		while (first) {
2310 			v_fragment	*next = v_next(first);
2311 
2312 			list[count++] = first;
2313 			first = next;
2314 		}
2315 
2316 		/*
2317 		 * Reset the flag so that if we don't match any fragments, we
2318 		 * stop looping.  Then, proceed only if there are any fragments
2319 		 * to match.
2320 		 */
2321 		flag = 0;
2322 		if (count) {
2323 			/*
2324 			 * Sort the array of verified fragments in order of beginning
2325 			 * sample position.
2326 			 */
2327 			qsort(list, active, sizeof (v_fragment *), vsort);
2328 
2329 			/*
2330 			 * We don't check for the silence flag yet, because even if the
2331 			 * verified root ends in silence (and thus the silence flag is set),
2332 			 * there may be a non-silent region at the beginning of the verified
2333 			 * root, into which we can merge the verified fragments.
2334 			 *
2335 			 * Iterate through the verified fragments, starting at the fragment
2336 			 * with the lowest beginning sample position.
2337 			 */
2338 			for (count = 0; count < active; count++) {
2339 				first = list[count];
2340 
2341 				/*
2342 				 * Make sure this fragment hasn't already been merged (and
2343 				 * thus freed).
2344 				 */
2345 				if (first->one) {
2346 					/*
2347 					 * If we don't have a verified root yet, just promote the first
2348 					 * fragment (with lowest beginning sample) to be the verified
2349 					 * root.
2350 					 *
2351 					 * "??? It seems that this could be fairly arbitrary if jitter
2352 					 * is an issue.  If we've verified two fragments allegedly
2353 					 * beginning at "0" (which are actually slightly offset due to
2354 					 * jitter), the root might not begin at the earliest read
2355 					 * sample.  Additionally, because subsequent fragments are
2356 					 * only merged at the tail end of the root, this situation
2357 					 * won't be fixed by merging the earlier samples.
2358 					 *
2359 					 * Practically, this ends up not being critical since most
2360 					 * drives insert some extra silent samples at the beginning
2361 					 * of the stream.  Missing a few of them doesn't cause any
2362 					 * real lost data.  But it is non-deterministic."
2363 					 *
2364 					 * On such a drive, the entire act of CDDA read is highly
2365 					 * nondeterministic.  All redbook says is +/- 75 sectors.
2366 					 * If you insist on the earliest possible sample, you can
2367 					 * get into a situation where the first read was far earlier
2368 					 * than all the others and no other read ever repeats the
2369 					 * early positioning. --Monty
2370 					 */
2371 					if (rv(root) == NULL) {
2372 						if (i_init_root(&(p->root), first, beginword, callback)) {
2373 							free_v_fragment(first);
2374 
2375 							/*
2376 							 * Consider this a merged fragment, so set the flag
2377 							 * to keep looping.
2378 							 */
2379 							flag = 1;
2380 							ret++;
2381 						}
2382 					} else {
2383 						/*
2384 						 * Try to merge this fragment with the verified root,
2385 						 * extending the tail of the root.
2386 						 */
2387 						if (i_stage2_each(root, first, callback)) {
2388 							/*
2389 							 * If we successfully merged the fragment, set the flag
2390 							 * to keep looping.
2391 							 */
2392 							ret++;
2393 							flag = 1;
2394 						}
2395 					}
2396 				}
2397 			}
2398 
2399 			/*
2400 			 * silence handling
2401 			 *
2402 			 * If the verified root ends in a long span of silence, iterate
2403 			 * through the remaining unmerged fragments to see if they can be
2404 			 * merged using our special silence matching.
2405 			 */
2406 			if (!flag && p->root.silenceflag) {
2407 				for (count = 0; count < active; count++) {
2408 					first = list[count];
2409 
2410 					/*
2411 					 * Make sure this fragment hasn't already been merged (and
2412 					 * thus freed).
2413 					 */
2414 					if (first->one) {
2415 						if (rv(root) != NULL) {
2416 							/*
2417 							 * Try to merge the fragment into the root.  This will only
2418 							 * succeed if the fragment overlaps and begins with sufficient
2419 							 * silence to be a presumed match.
2420 							 *
2421 							 * Note that the fragments must be passed to i_silence_match()
2422 							 * in ascending order, as they are here.
2423 							 */
2424 							if (i_silence_match(root, first, callback)) {
2425 								/*
2426 								 * If we successfully merged the fragment, set the flag
2427 								 * to keep looping.
2428 								 */
2429 								ret++;
2430 								flag = 1;
2431 							}
2432 						}
2433 					}
2434 				}
2435 			}
2436 		}
2437 #ifndef	HAVE_DYN_ARRAYS
2438 		_pfree(list);
2439 #endif
2440 
2441 		/*
2442 		 * If we were able to extend the verified root at all during this pass
2443 		 * through the loop, loop again to see if we can merge any remaining
2444 		 * fragments with the extended root.
2445 		 */
2446 	}
2447 
2448 	/*
2449 	 * Return the number of fragments we successfully merged into the
2450 	 * verified root.
2451 	 */
2452 	return (ret);
2453 }
2454 
2455 LOCAL void
i_end_case(p,endword,callback)2456 i_end_case(p, endword, callback)
2457 	cdrom_paranoia	*p;
2458 	long		endword;
2459 	void		(*callback) __PR((long, int));
2460 {
2461 
2462 	root_block	*root = &p->root;
2463 
2464 	/*
2465 	 * have an 'end' flag; if we've just read in the last sector in a
2466 	 * session, set the flag.  If we verify to the end of a fragment
2467 	 * which has the end flag set, we're done (set a done flag).
2468 	 * Pad zeroes to the end of the read
2469 	 */
2470 	if (root->lastsector == 0)
2471 		return;
2472 	if (endword < re(root))
2473 		return;
2474 
2475 	{
2476 		long		addto = endword - re(root);
2477 		char		*temp = _pcalloc(addto, sizeof (char) * 2);
2478 
2479 		DBG_MALLOC_MARK(temp);
2480 		c_append(rc(root), (void *) temp, addto);
2481 		_pfree(temp);
2482 
2483 		/*
2484 		 * trash da cache
2485 		 */
2486 		paranoia_resetcache(p);
2487 
2488 	}
2489 }
2490 
2491 /*
2492  * We want to add a sector. Look through the caches for something that
2493  * spans.  Also look at the flags on the c_block... if this is an
2494  * obliterated sector, get a bit of a chunk past the obliteration.
2495  *
2496  * Not terribly smart right now, actually.  We can probably find
2497  * *some* match with a cache block somewhere.  Take it and continue it
2498  * through the skip
2499  */
2500 LOCAL void
verify_skip_case(p,callback)2501 verify_skip_case(p, callback)
2502 	cdrom_paranoia	*p;
2503 	void		(*callback) __PR((long, int));
2504 {
2505 
2506 	root_block	*root = &(p->root);
2507 	c_block		*graft = NULL;
2508 	int		vflag = 0;
2509 	int		gend = 0;
2510 	long		post;
2511 
2512 #ifdef NOISY
2513 	fprintf(stderr, "\nskipping\n");
2514 #endif
2515 
2516 	if (rv(root) == NULL) {
2517 		post = 0;
2518 	} else {
2519 		post = re(root);
2520 	}
2521 	if (post == -1)
2522 		post = 0;
2523 
2524 	if (callback)
2525 		(*callback) (post, PARANOIA_CB_SKIP);
2526 
2527 	if (p->enable & PARANOIA_MODE_NEVERSKIP)
2528 		return;
2529 
2530 	/*
2531 	 * We want to add a sector.  Look for a c_block that spans,
2532 	 * preferrably a verified area
2533 	 */
2534 	{
2535 		c_block		*c = c_first(p);
2536 
2537 		while (c) {
2538 			long		cbegin = cb(c);
2539 			long		cend = ce(c);
2540 
2541 			if (cbegin <= post && cend > post) {
2542 				long		vend = post;
2543 
2544 				if (c->flags[post - cbegin] & FLAGS_VERIFIED) {
2545 					/*
2546 					 * verified area!
2547 					 */
2548 					while (vend < cend && (c->flags[vend - cbegin] & FLAGS_VERIFIED))
2549 						vend++;
2550 					if (!vflag || vend > vflag) {
2551 						graft = c;
2552 						gend = vend;
2553 					}
2554 					vflag = 1;
2555 				} else {
2556 					/*
2557 					 * not a verified area
2558 					 */
2559 					if (!vflag) {
2560 						while (vend < cend && (c->flags[vend - cbegin] & FLAGS_VERIFIED) == 0)
2561 							vend++;
2562 						if (graft == NULL || gend > vend) {
2563 							/*
2564 							 * smallest unverified area
2565 							 */
2566 							graft = c;
2567 							gend = vend;
2568 						}
2569 					}
2570 				}
2571 			}
2572 			c = c_next(c);
2573 		}
2574 
2575 		if (graft) {
2576 			long		cbegin = cb(graft);
2577 			long		cend = ce(graft);
2578 
2579 			while (gend < cend && (graft->flags[gend - cbegin] & FLAGS_VERIFIED))
2580 				gend++;
2581 			gend = min(gend + OVERLAP_ADJ, cend);
2582 
2583 			if (rv(root) == NULL) {
2584 				Int16_t	*buff = _pmalloc(cs(graft));
2585 
2586 				DBG_MALLOC_MARK(buff);
2587 				memcpy(buff, cv(graft), cs(graft));
2588 				rc(root) = c_alloc(buff, cb(graft), cs(graft));
2589 				DBG_MALLOC_MARK(rc(root));
2590 			} else {
2591 				c_append(rc(root), cv(graft) + post - cbegin,
2592 					gend - post);
2593 			}
2594 
2595 			root->returnedlimit = re(root);
2596 			return;
2597 		}
2598 	}
2599 
2600 	/*
2601 	 * No?  Fine.  Great.  Write in some zeroes :-P
2602 	 */
2603 	{
2604 		void	*temp = _pcalloc(CD_FRAMESIZE_RAW, sizeof (Int16_t));
2605 
2606 		DBG_MALLOC_MARK(temp);
2607 		if (rv(root) == NULL) {
2608 			rc(root) = c_alloc(temp, post, CD_FRAMESIZE_RAW);
2609 			DBG_MALLOC_MARK(rc(root));
2610 		} else {
2611 			c_append(rc(root), temp, CD_FRAMESIZE_RAW);
2612 			_pfree(temp);
2613 		}
2614 		root->returnedlimit = re(root);
2615 	}
2616 }
2617 
2618 /*
2619  * toplevel
2620  */
2621 EXPORT void
paranoia_free(p)2622 paranoia_free(p)
2623 	cdrom_paranoia	*p;
2624 {
2625 	paranoia_resetall(p);
2626 	sort_free(p->sortcache);
2627 	free_list(p->cache, 1);
2628 	free_list(p->fragments, 1);
2629 	_pfree(p);
2630 }
2631 
2632 EXPORT void
paranoia_modeset(p,enable)2633 paranoia_modeset(p, enable)
2634 	cdrom_paranoia	*p;
2635 	int		enable;
2636 {
2637 	p->enable = enable;
2638 	if (enable & PARANOIA_MODE_C2CHECK) {
2639 		p->sectsize = CD_C2SIZE_RAW;
2640 		p->sectwords = CD_C2SIZE_RAW/2;
2641 	} else {
2642 		p->sectsize = CD_FRAMESIZE_RAW;
2643 		p->sectwords = CD_FRAMESIZE_RAW/2;
2644 	}
2645 }
2646 
2647 EXPORT int
paranoia_modeget(p)2648 paranoia_modeget(p)
2649 	cdrom_paranoia	*p;
2650 {
2651 	return (p->enable);
2652 }
2653 
2654 EXPORT void
paranoia_set_readahead(p,readahead)2655 paranoia_set_readahead(p, readahead)
2656 	cdrom_paranoia	*p;
2657 	int		readahead;
2658 {
2659 	p->readahead = readahead;
2660 	sort_free(p->sortcache);
2661 	p->sortcache = sort_alloc(p->readahead * CD_FRAMEWORDS);
2662 	DBG_MALLOC_MARK(p->sortcache);
2663 }
2664 
2665 EXPORT int
paranoia_get_readahead(p)2666 paranoia_get_readahead(p)
2667 	cdrom_paranoia	*p;
2668 {
2669 	return (p->readahead);
2670 }
2671 
2672 EXPORT long
paranoia_seek(p,seek,mode)2673 paranoia_seek(p, seek, mode)
2674 	cdrom_paranoia	*p;
2675 	long		seek;
2676 	int		mode;
2677 {
2678 	long	sector;
2679 	long	ret;
2680 
2681 	switch (mode) {
2682 	case SEEK_SET:
2683 		sector = seek;
2684 		break;
2685 	case SEEK_END:
2686 		sector = p->d_disc_lastsector(p->d) + seek;
2687 		break;
2688 	default:
2689 		sector = p->cursor + seek;
2690 		break;
2691 	}
2692 
2693 	if (p->d_sector_gettrack(p->d, sector) == -1)
2694 		return (-1);
2695 
2696 	i_cblock_destructor(p->root.vector);
2697 	p->root.vector = NULL;
2698 	p->root.lastsector = 0;
2699 	p->root.returnedlimit = 0;
2700 
2701 	ret = p->cursor;
2702 	p->cursor = sector;
2703 
2704 	i_paranoia_firstlast(p);
2705 
2706 	/*
2707 	 * Evil hack to fix pregap patch for NEC drives! To be rooted out in a10
2708 	 */
2709 	p->current_firstsector = sector;
2710 
2711 	return (ret);
2712 }
2713 
2714 LOCAL void
c2_audiocopy(to,from,nsectors)2715 c2_audiocopy(to, from, nsectors)
2716 	void	*to;
2717 	void	*from;
2718 	int	nsectors;
2719 {
2720 	char	*tocp = to;
2721 	char	*fromcp = from;
2722 
2723 	while (--nsectors >= 0) {
2724 		memmove(tocp, fromcp, CD_FRAMESIZE_RAW);
2725 		tocp += CD_FRAMESIZE_RAW;
2726 		fromcp += CD_C2SIZE_RAW;
2727 	}
2728 }
2729 
2730 /*
2731  * Stolen from readcd(1)
2732  */
2733 LOCAL int
bits(c)2734 bits(c)
2735 	int	c;
2736 {
2737 	int	n = 0;
2738 
2739 	if (c & 0x01)
2740 		n++;
2741 	if (c & 0x02)
2742 		n++;
2743 	if (c & 0x04)
2744 		n++;
2745 	if (c & 0x08)
2746 		n++;
2747 	if (c & 0x10)
2748 		n++;
2749 	if (c & 0x20)
2750 		n++;
2751 	if (c & 0x40)
2752 		n++;
2753 	if (c & 0x80)
2754 		n++;
2755 	return (n);
2756 }
2757 
2758 LOCAL void
c2_errcopy(to,from,nsectors,errp)2759 c2_errcopy(to, from, nsectors, errp)
2760 	void	*to;
2761 	void	*from;
2762 	int	nsectors;
2763 	struct c2errs *errp;
2764 {
2765 	char	dummy[CD_C2PTR_RAW * 4];
2766 	char	*tocp = to;
2767 	char	*fromcp = from;
2768 	int	errs = 0;
2769 	UInt8_t *ep;
2770 	UInt8_t e;
2771 	int	i;
2772 
2773 	errp->c2errs = 0;
2774 	errp->c2bytes = 0;
2775 	errp->c2secs = 0;
2776 	errp->c2maxerrs = 0;
2777 
2778 	while (--nsectors >= 0) {
2779 		if (to == NULL)
2780 			tocp = dummy;
2781 		ep = (UInt8_t *)(fromcp + CD_FRAMESIZE_RAW);
2782 		for (i = CD_C2PTR_RAW; --i >= 0; tocp += 4) {
2783 			if ((e = *ep++) != 0) {
2784 				if (e & 0xC0)
2785 					tocp[0] |= FLAGS_C2;
2786 				if (e & 0x30)
2787 					tocp[1] |= FLAGS_C2;
2788 				if (e & 0x0C)
2789 					tocp[2] |= FLAGS_C2;
2790 				if (e & 0x03)
2791 					tocp[3] |= FLAGS_C2;
2792 				errs += bits(e);
2793 			}
2794 		}
2795 		if (errs > 0) {
2796 			errp->c2bytes += errs;
2797 			errp->c2secs++;
2798 			if (errs > errp->c2maxerrs)
2799 				errp->c2maxerrs = errs;
2800 			errs = 0;
2801 		}
2802 
2803 		fromcp += CD_C2SIZE_RAW;
2804 	}
2805 	if (errp->c2secs > 0)
2806 		errp->c2errs++;
2807 }
2808 
2809 /*
2810  * read_c_block() (internal)
2811  *
2812  * This funtion reads many (p->readahead) sectors, encompassing at least
2813  * the requested words.
2814  *
2815  * It returns a c_block which encapsulates these sectors' data and sector
2816  * number.  The sectors come come from multiple low-level read requests.
2817  *
2818  * This function reads many sectors in order to exhaust any caching on the
2819  * drive itself, as caching would simply return the same incorrect data
2820  * over and over.  Paranoia depends on truly re-reading portions of the
2821  * disc to make sure the reads are accurate and correct any inaccuracies.
2822  *
2823  * Which precise sectors are read varies ("jiggles") between calls to
2824  * read_c_block, to prevent consistent errors across multiple reads
2825  * from being misinterpreted as correct data.
2826  *
2827  * The size of each low-level read is determined by the underlying driver
2828  * (p->d->nsectors), which allows the driver to specify how many sectors
2829  * can be read in a single request.  Historically, the Linux kernel could
2830  * only read 8 sectors at a time, with likely dropped samples between each
2831  * read request.  Other operating systems may have different limitations.
2832  *
2833  * This function is called by paranoia_read_limited(), which breaks the
2834  * c_block of read data into runs of samples that are likely to be
2835  * contiguous, verifies them and stores them in verified fragments, and
2836  * eventually merges the fragments into the verified root.
2837  *
2838  * This function returns the last c_block read or NULL on error.
2839  */
2840 EXPORT c_block *
i_read_c_block(p,beginword,endword,callback)2841 i_read_c_block(p, beginword, endword, callback)
2842 	cdrom_paranoia	*p;
2843 	long		beginword;
2844 	long		endword;
2845 	void		(*callback) __PR((long, int));
2846 {
2847 	/*
2848 	 * why do it this way?  We need to read lots of sectors to kludge
2849 	 * around stupid read ahead buffers on cheap drives, as well as avoid
2850 	 * expensive back-seeking. We also want to 'jiggle' the start address
2851 	 * to try to break borderline drives more noticeably (and make broken
2852 	 * drives with unaddressable sectors behave more often).
2853 	 */
2854 	long		readat;
2855 	long		firstread;
2856 	long		totaltoread = p->readahead;
2857 	long		sectatonce = p->nsectors;
2858 	long		driftcomp = (float) p->dyndrift / CD_FRAMEWORDS + .5;
2859 	c_block		*new = NULL;
2860 	root_block	*root = &p->root;
2861 	Int16_t		*buffer = NULL;
2862 	void		*bufbase = NULL;
2863 	Uchar		*flags = NULL;
2864 	long		fsize = 0;
2865 	long		sofar;
2866 	long		dynoverlap = (p->dynoverlap + CD_FRAMEWORDS - 1) / CD_FRAMEWORDS;
2867 	long		anyflag = 0;
2868 	int		reduce = 0;
2869 static	int		pagesize = -1;
2870 #define	valign(x, a)	(((char *)(x)) + ((a) - 1 - ((((UIntptr_t)(x))-1)%(a))))
2871 
2872 	/*
2873 	 * Calculate the first sector to read.  This calculation takes
2874 	 * into account the need to jitter the starting point of the read
2875 	 * to reveal consistent errors as well as the low reliability of
2876 	 * the edge words of a read.
2877 	 */
2878 
2879 	/*
2880 	 * What is the first sector to read?  want some pre-buffer if we're not
2881 	 * at the extreme beginning of the disc
2882 	 */
2883 	if (p->enable & (PARANOIA_MODE_VERIFY | PARANOIA_MODE_OVERLAP)) {
2884 
2885 		/*
2886 		 * we want to jitter the read alignment boundary
2887 		 */
2888 		long	target;
2889 
2890 		if (rv(root) == NULL || rb(root) > beginword)
2891 			target = p->cursor - dynoverlap;
2892 		else
2893 			target = re(root) / (CD_FRAMEWORDS) - dynoverlap;
2894 
2895 		if (target + MIN_SECTOR_BACKUP > p->lastread && target <= p->lastread)
2896 			target = p->lastread - MIN_SECTOR_BACKUP;
2897 
2898 		/*
2899 		 * we want to jitter the read alignment boundary, as some
2900 		 * drives, beginning from a specific point, will tend to
2901 		 * lose bytes between sectors in the same place.  Also, as
2902 		 * our vectors are being made up of multiple reads, we want
2903 		 * the overlap boundaries to move....
2904 		 */
2905 		readat = (target & (~((long) JIGGLE_MODULO - 1))) + p->jitter;
2906 		if (readat > target)
2907 			readat -= JIGGLE_MODULO;
2908 		p->jitter++;
2909 		if (p->jitter >= JIGGLE_MODULO)
2910 			p->jitter = 0;
2911 
2912 	} else {
2913 		readat = p->cursor;
2914 	}
2915 
2916 	readat += driftcomp;
2917 
2918 	/*
2919 	 * Create a new, empty c_block and add it to the head of the
2920 	 * list of c_blocks in memory.  It will be empty until the end of
2921 	 * this subroutine.
2922 	 */
2923 	if (p->enable & (PARANOIA_MODE_OVERLAP | PARANOIA_MODE_VERIFY)) {
2924 		flags = _pcalloc(totaltoread * CD_FRAMEWORDS, 1);
2925 		DBG_MALLOC_MARK(flags);
2926 		fsize = totaltoread * CD_FRAMEWORDS;
2927 		new = new_c_block(p);
2928 		recover_cache(p);
2929 	} else {
2930 		/*
2931 		 * in the case of root it's just the buffer
2932 		 */
2933 		paranoia_resetall(p);
2934 		new = new_c_block(p);
2935 	}
2936 
2937 	/*
2938 	 * Do not use valloc() as valloc() in glibc is buggy and returns memory
2939 	 * that cannot be passed to free().
2940 	 */
2941 	if (pagesize < 0) {
2942 #ifdef	_SC_PAGESIZE
2943 		pagesize = sysconf(_SC_PAGESIZE);
2944 #else
2945 		pagesize = getpagesize();
2946 #endif
2947 		if (pagesize < 0)
2948 			pagesize = 4096;	/* Just a guess */
2949 	}
2950 	reduce = pagesize / p->sectsize;
2951 	bufbase = _pmalloc(totaltoread * p->sectsize + pagesize);
2952 	DBG_MALLOC_MARK(bufbase);
2953 	buffer = (Int16_t *)valign(bufbase, pagesize);
2954 	sofar = 0;
2955 	firstread = -1;
2956 
2957 	/*
2958 	 * Issue each of the low-level reads; the optimal read size is
2959 	 * approximately the cachemodel's cdrom cache size.  The only reason
2960 	 * to read less would be memory considerations.
2961 	 *
2962 	 * p->readahead   = total number of sectors to read
2963 	 * p->d->nsectors = number of sectors to read per request
2964 	 */
2965 	/*
2966 	 * actual read loop
2967 	 */
2968 	while (sofar < totaltoread) {
2969 		long	secread = sectatonce;	/* number of sectors to read this request */
2970 		long	adjread = readat;	/* first sector to read for this request */
2971 		long	thisread;		/* how many sectors were read this request */
2972 
2973 		/*
2974 		 * don't under/overflow the audio session
2975 		 */
2976 		if (adjread < p->current_firstsector) {
2977 			secread -= p->current_firstsector - adjread;
2978 			adjread = p->current_firstsector;
2979 		}
2980 		if (adjread + secread - 1 > p->current_lastsector)
2981 			secread = p->current_lastsector - adjread + 1;
2982 
2983 		if (sofar + secread > totaltoread)
2984 			secread = totaltoread - sofar;
2985 
2986 		/*
2987 		 * If we are inside the buffer, the transfers are no longer
2988 		 * page aligned. Reduce the transfer size to avoid problems.
2989 		 * Such problems are definitely known to appear on FreeBSD.
2990 		 */
2991 		if ((sofar > 0) && (secread > (sectatonce - reduce)))
2992 			secread = sectatonce - reduce;
2993 
2994 		if (secread > 0) {
2995 			struct c2errs	c2errs;
2996 
2997 			/*
2998 			 * We only evaluate it when the PARANOIA_MODE_C2CHECK
2999 			 * flag is present, but let us be nice.
3000 			 */
3001 			c2errs.c2errs = c2errs.c2bytes = c2errs.c2secs =
3002 					c2errs.c2maxerrs = 0;
3003 
3004 			if (firstread < 0)
3005 				firstread = adjread;
3006 
3007 			/*
3008 			 * Issue the low-level read to the driver.
3009 			 *
3010 			 * If the low-level read returned too few sectors, pad the result
3011 			 * with null data and mark it as invalid (FLAGS_UNREAD).  We pad
3012 			 * because we're going to be appending further reads to the current
3013 			 * c_block.
3014 			 *
3015 			 * "???: Why not re-read?  It might be to keep you from getting
3016 			 * hung up on a bad sector.  Or it might be to avoid
3017 			 * interrupting the streaming as much as possible."
3018 			 *
3019 			 * There are drives on which you will never get a full read in
3020 			 * some positions.  They always abort out early due to firmware
3021 			 * boundary cases.  Reread will cause exactly the same thing to
3022 			 * happen again.  NEC MultiSpeed 4x is one such drive. In these
3023 			 * cases, you take what part of the read you know is good, and
3024 			 * you get substantially better performance. --Monty
3025 			 */
3026 			if ((thisread = p->d_read(p->d, buffer + sofar * p->sectwords, adjread,
3027 						secread)) < secread) {
3028 
3029 				if (thisread < 0)
3030 					thisread = 0;
3031 
3032 				/*
3033 				 * Uhhh... right.  Make something up. But
3034 				 * don't make us seek backward!
3035 				 */
3036 				if (callback)
3037 					(*callback) ((adjread + thisread) * CD_FRAMEWORDS, PARANOIA_CB_READERR);
3038 				memset(buffer + (sofar + thisread) * CD_FRAMEWORDS, 0,
3039 					CD_FRAMESIZE_RAW * (secread - thisread));
3040 				if (flags)
3041 					memset(flags + (sofar + thisread) * CD_FRAMEWORDS, FLAGS_UNREAD,
3042 						CD_FRAMEWORDS * (secread - thisread));
3043 			}
3044 			if (thisread != 0)
3045 				anyflag = 1;
3046 
3047 			/* BEGIN CSTYLED */
3048 			/*
3049 			 * Because samples are likely to be dropped between read requests,
3050 			 * mark the samples near the the boundaries of the read requests
3051 			 * as suspicious (FLAGS_EDGE).  This means that any span of samples
3052 			 * against which these adjacent read requests are compared must
3053 			 * overlap beyond the edges and into the more trustworthy data.
3054 			 * Such overlapping spans are accordingly at least MIN_WORDS_OVERLAP
3055 			 * words long (and naturally longer if any samples were dropped
3056 			 * between the read requests).
3057 			 *
3058 			 *          (EEEEE...overlapping span...EEEEE)
3059 			 * (read 1 ...........EEEEE)   (EEEEE...... read 2 ......EEEEE) ...
3060 			 *         dropped samples --^
3061 			 */
3062 			/* END CSTYLED */
3063 			if (flags && sofar != 0) {
3064 				/*
3065 				 * Don't verify across overlaps that are too
3066 				 * close to one another
3067 				 */
3068 				int	i = 0;
3069 
3070 				for (i = -MIN_WORDS_OVERLAP / 2; i < MIN_WORDS_OVERLAP / 2; i++)
3071 					flags[sofar * CD_FRAMEWORDS + i] |= FLAGS_EDGE;
3072 			}
3073 			if (flags && p->enable & PARANOIA_MODE_C2CHECK) {
3074 				c2_errcopy(flags + sofar * CD_FRAMEWORDS,
3075 					buffer + sofar * p->sectwords, thisread, &c2errs);
3076 			}
3077 			p->lastread = adjread + secread;
3078 
3079 			if (adjread + secread - 1 == p->current_lastsector)
3080 				new->lastsector = -1;
3081 
3082 			if (callback) {
3083 				(*callback) (thisread, PARANOIA_CB_SECS);
3084 				if (p->enable & PARANOIA_MODE_C2CHECK) {
3085 					if (c2errs.c2errs > 0)
3086 						(*callback) (adjread * CD_FRAMEWORDS, PARANOIA_CB_C2ERR);
3087 					if (c2errs.c2bytes > 0)
3088 						(*callback) (c2errs.c2bytes, PARANOIA_CB_C2BYTES);
3089 					if (c2errs.c2secs > 0)
3090 						(*callback) (c2errs.c2secs, PARANOIA_CB_C2SECS);
3091 					if (c2errs.c2maxerrs > 0)
3092 						(*callback) (c2errs.c2maxerrs, PARANOIA_CB_C2MAXERRS);
3093 				}
3094 				(*callback) ((adjread + secread - 1) * CD_FRAMEWORDS, PARANOIA_CB_READ);
3095 			}
3096 
3097 			sofar += secread;
3098 			readat = adjread + secread;
3099 		} else if (readat < p->current_firstsector) {
3100 			readat += sectatonce;
3101 						/*
3102 						 * due to being before the
3103 						 * readable area
3104 						 */
3105 		} else {
3106 			break;	/* due to being past the readable area */
3107 		}
3108 
3109 	/*
3110 	 * Keep issuing read requests until we've read enough sectors to
3111 	 * exhaust the drive's cache.
3112 	 */
3113 	}
3114 
3115 	/*
3116 	 * If we managed to read any sectors at all (anyflag), fill in the
3117 	 * previously allocated c_block with the read data.  Otherwise, free
3118 	 * our buffers, dispose of the c_block, and return NULL.
3119 	 */
3120 	if (anyflag) {
3121 		new->vector = _pmalloc(totaltoread * CD_FRAMESIZE_RAW);
3122 		DBG_MALLOC_MARK(new->vector);
3123 		if (p->enable & PARANOIA_MODE_C2CHECK) {
3124 			c2_audiocopy(new->vector, buffer, totaltoread);
3125 		} else {
3126 			memcpy(new->vector, buffer, totaltoread * CD_FRAMESIZE_RAW);
3127 		}
3128 		_pfree(bufbase);
3129 
3130 		new->begin = firstread * CD_FRAMEWORDS - p->dyndrift;
3131 		new->size = sofar * CD_FRAMEWORDS;
3132 		new->flags = flags;
3133 		new->fsize = fsize;
3134 	} else {
3135 		if (new)
3136 			free_c_block(new);
3137 		if (bufbase)
3138 			_pfree(bufbase);
3139 		if (flags)
3140 			_pfree(flags);
3141 		new = NULL;
3142 		bufbase = NULL;
3143 		flags = NULL;
3144 	}
3145 	return (new);
3146 }
3147 
3148 /*
3149  * paranoia_read(), paranoia_read_limited()
3150  *
3151  * These functions "read" the next sector of audio data and returns
3152  * a pointer to a full sector of verified samples (2352 bytes).
3153  *
3154  * The returned buffer is *not* to be freed by the caller.  It will
3155  * persist only until the next call to paranoia_read() for this p
3156  */
3157 EXPORT Int16_t *
paranoia_read(p,callback)3158 paranoia_read(p, callback)
3159 	cdrom_paranoia	*p;
3160 	void		(*callback) __PR((long, int));
3161 {
3162 	return (paranoia_read_limited(p, callback, 20));
3163 }
3164 
3165 /*
3166  * I added max_retry functionality this way in order to avoid breaking any
3167  * old apps using the nerw libs.  cdparanoia 9.8 will need the updated libs,
3168  * but nothing else will require it.
3169  */
3170 EXPORT Int16_t *
paranoia_read_limited(p,callback,max_retries)3171 paranoia_read_limited(p, callback, max_retries)
3172 	cdrom_paranoia	*p;
3173 	void		(*callback) __PR((long, int));
3174 	int		max_retries;
3175 {
3176 	long		beginword = p->cursor * (CD_FRAMEWORDS);
3177 	long		endword = beginword + CD_FRAMEWORDS;
3178 	long		retry_count = 0;
3179 	long		lastend = -2;
3180 	root_block	*root = &p->root;
3181 
3182 	if (beginword > p->root.returnedlimit)
3183 		p->root.returnedlimit = beginword;
3184 	lastend = re(root);
3185 
3186 	/*
3187 	 * Since paranoia reads and verifies chunks of data at a time
3188 	 * (which it needs to counteract dropped samples and inaccurate
3189 	 * seeking), the requested samples may already be in memory,
3190 	 * in the verified "root".
3191 	 *
3192 	 * The root is where paranoia stores samples that have been
3193 	 * verified and whose position has been accurately determined.
3194 	 *
3195 	 * First, is the sector we want already in the root?
3196 	 */
3197 	while (rv(root) == NULL ||
3198 		rb(root) > beginword ||
3199 		(re(root) < endword + p->maxdynoverlap &&
3200 			p->enable & (PARANOIA_MODE_VERIFY | PARANOIA_MODE_OVERLAP)) ||
3201 		re(root) < endword) {
3202 
3203 		/*
3204 		 * Nope; we need to build or extend the root verified range
3205 		 *
3206 		 * We may have already read the necessary samples and placed
3207 		 * them into verified fragments, but not yet merged them into
3208 		 * the verified root.  We'll check that before we actually
3209 		 * try to read data from the drive.
3210 		 */
3211 		if (p->enable & (PARANOIA_MODE_VERIFY | PARANOIA_MODE_OVERLAP)) {
3212 			/*
3213 			 * We need to make sure our memory consumption doesn't grow
3214 			 * to the size of the whole CD.  But at the same time, we
3215 			 * need to hang onto some of the verified data (even perhaps
3216 			 * data that's already been returned by paranoia_read()) in
3217 			 * order to verify and accurately position future samples.
3218 			 *
3219 			 * Therefore, we free some of the verified data that we
3220 			 * no longer need.
3221 			 */
3222 			i_paranoia_trim(p, beginword, endword);
3223 			recover_cache(p);
3224 			if (rb(root) != -1 && p->root.lastsector) {
3225 				i_end_case(p, endword + p->maxdynoverlap,
3226 					callback);
3227 			} else {
3228 				/*
3229 				 * Merge as many verified fragments into the verified root
3230 				 * as we need to satisfy the pending request.  We may
3231 				 * not have all the fragments we need, in which case we'll
3232 				 * read data from the CD further below.
3233 				 */
3234 				i_stage2(p, beginword,
3235 					endword + p->maxdynoverlap,
3236 					callback);
3237 			}
3238 		} else
3239 			i_end_case(p, endword + p->maxdynoverlap,
3240 				callback);	/* only trips if we're already */
3241 						/* done */
3242 
3243 		/*
3244 		 * If we were able to fill the verified root with data already
3245 		 * in memory, we don't need to read any more data from the drive.
3246 		 */
3247 		if (!(rb(root) == -1 || rb(root) > beginword ||
3248 				re(root) < endword + p->maxdynoverlap))
3249 			break;
3250 
3251 		/*
3252 		 * Hmm, need more.  Read another block
3253 		 */
3254 		{
3255 			/*
3256 			 * Read many sectors, encompassing at least the requested words.
3257 			 *
3258 			 * The returned c_block encapsulates these sectors' data and
3259 			 * sector number.  The sectors come come from multiple low-level
3260 			 * read requests, and words which were near the boundaries of
3261 			 * those read requests are marked with FLAGS_EDGE.
3262 			 */
3263 			c_block	*new = i_read_c_block(p, beginword, endword, callback);
3264 
3265 			if (new) {
3266 				if (p->enable & (PARANOIA_MODE_OVERLAP | PARANOIA_MODE_VERIFY)) {
3267 					/*
3268 					 * If we need to verify these samples, send them to
3269 					 * stage 1 verification, which will add verified samples
3270 					 * to the set of verified fragments.  Verified fragments
3271 					 * will be merged into the verified root during stage 2
3272 					 * overlap analysis.
3273 					 */
3274 					if (p->enable & PARANOIA_MODE_VERIFY)
3275 						i_stage1(p, new, callback);
3276 					else {
3277 						/*
3278 						 * If we're only doing overlapping reads (no stage 1
3279 						 * verification), consider each low-level read in the
3280 						 * c_block to be a verified fragment.  We exclude the
3281 						 * edges from these fragments to enforce the requirement
3282 						 * that we overlap the reads by the minimum amount.
3283 						 * These fragments will be merged into the verified
3284 						 * root during stage 2 overlap analysis.
3285 						 */
3286 						/*
3287 						 * just make v_fragments from the
3288 						 * boundary information.
3289 						 */
3290 						long	begin = 0,
3291 							end = 0;
3292 
3293 						while (begin < cs(new)) {
3294 							while (end < cs(new) && (new->flags[begin] & FLAGS_EDGE))
3295 								begin++;
3296 							end = begin + 1;
3297 							while (end < cs(new) && (new->flags[end] & FLAGS_EDGE) == 0)
3298 								end++;
3299 							{
3300 								new_v_fragment(p, new, begin + cb(new),
3301 									end + cb(new),
3302 									(new->lastsector && cb(new) + end == ce(new)));
3303 							}
3304 							begin = end;
3305 						}
3306 					}
3307 
3308 				} else {
3309 					/*
3310 					 * If we're not doing any overlapping reads or verification
3311 					 * of data, skip over the stage 1 and stage 2 verification and
3312 					 * promote this c_block directly to the current "verified" root.
3313 					 */
3314 					if (p->root.vector)
3315 						i_cblock_destructor(p->root.vector);
3316 					free_elem(new->e, 0);
3317 					p->root.vector = new;
3318 
3319 					i_end_case(p, endword + p->maxdynoverlap,
3320 						callback);
3321 
3322 				}
3323 			}
3324 		}
3325 
3326 		/*
3327 		 * Are we doing lots of retries?  **********************
3328 		 *
3329 		 * Check unaddressable sectors first.  There's no backoff
3330 		 * here; jiggle and minimum backseek handle that for us
3331 		 */
3332 		if (rb(root) != -1 && lastend + 588 < re(root)) {
3333 			/*
3334 			 * If we've not grown half a sector
3335 			 */
3336 			lastend = re(root);
3337 			retry_count = 0;
3338 		} else {
3339 			/*
3340 			 * increase overlap or bail
3341 			 */
3342 			retry_count++;
3343 
3344 			/*
3345 			 * The better way to do this is to look at how many
3346 			 * actual matches we're getting and what kind of gap
3347 			 */
3348 			if (retry_count % 5 == 0) {
3349 				if (p->dynoverlap == p->maxdynoverlap ||
3350 						retry_count == max_retries) {
3351 					verify_skip_case(p, callback);
3352 					retry_count = 0;
3353 				} else {
3354 					if (p->stage1.offpoints != -1) {	/* hack */
3355 						p->dynoverlap *= 1.5;
3356 						if (p->dynoverlap > p->maxdynoverlap)
3357 							p->dynoverlap = p->maxdynoverlap;
3358 						if (callback)
3359 							(*callback) (p->dynoverlap, PARANOIA_CB_OVERLAP);
3360 					}
3361 				}
3362 			}
3363 		}
3364 
3365 		/*
3366 		 * Having read data from the drive and placed it into verified
3367 		 * fragments, we now loop back to try to extend the root with
3368 		 * the newly loaded data.  Alternatively, if the root already
3369 		 * contains the needed data, we'll just fall through.
3370 		 */
3371 	}
3372 	p->cursor++;
3373 
3374 	/*
3375 	 * Return a pointer into the verified root.  Thus, the caller
3376 	 * must NOT free the returned pointer!
3377 	 */
3378 	return (rv(root) + (beginword - rb(root)));
3379 }
3380 
3381 /*
3382  * a temporary hack
3383  */
3384 EXPORT void
paranoia_overlapset(p,overlap)3385 paranoia_overlapset(p, overlap)
3386 	cdrom_paranoia	*p;
3387 	long		overlap;
3388 {
3389 	p->dynoverlap = overlap * CD_FRAMEWORDS;
3390 	p->stage1.offpoints = -1;
3391 }
3392