1 /*
2  * bzip2 is written by Julian Seward <jseward@bzip.org>.
3  * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4  * See README and LICENSE files in this directory for more information.
5  */
6 
7 /*-------------------------------------------------------------*/
8 /*--- Compression machinery (not incl block sorting)        ---*/
9 /*---                                            compress.c ---*/
10 /*-------------------------------------------------------------*/
11 
12 /* ------------------------------------------------------------------
13 This file is part of bzip2/libbzip2, a program and library for
14 lossless, block-sorting data compression.
15 
16 bzip2/libbzip2 version 1.0.4 of 20 December 2006
17 Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18 
19 Please read the WARNING, DISCLAIMER and PATENTS sections in the
20 README file.
21 
22 This program is released under the terms of the license contained
23 in the file LICENSE.
24 ------------------------------------------------------------------ */
25 
26 /* CHANGES
27  * 0.9.0    -- original version.
28  * 0.9.0a/b -- no changes in this file.
29  * 0.9.0c   -- changed setting of nGroups in sendMTFValues()
30  *             so as to do a bit better on small files
31 */
32 
33 /* #include "bzlib_private.h" */
34 
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O                              ---*/
37 /*---------------------------------------------------*/
38 
39 /*---------------------------------------------------*/
40 static
BZ2_bsInitWrite(EState * s)41 void BZ2_bsInitWrite(EState* s)
42 {
43 	s->bsLive = 0;
44 	s->bsBuff = 0;
45 }
46 
47 
48 /*---------------------------------------------------*/
49 static NOINLINE
bsFinishWrite(EState * s)50 void bsFinishWrite(EState* s)
51 {
52 	while (s->bsLive > 0) {
53 		s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 		s->numZ++;
55 		s->bsBuff <<= 8;
56 		s->bsLive -= 8;
57 	}
58 }
59 
60 
61 /*---------------------------------------------------*/
62 static
63 /* Helps only on level 5, on other levels hurts. ? */
64 #if CONFIG_BZIP2_FAST >= 5
65 ALWAYS_INLINE
66 #endif
bsW(EState * s,int32_t n,uint32_t v)67 void bsW(EState* s, int32_t n, uint32_t v)
68 {
69 	while (s->bsLive >= 8) {
70 		s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 		s->numZ++;
72 		s->bsBuff <<= 8;
73 		s->bsLive -= 8;
74 	}
75 	s->bsBuff |= (v << (32 - s->bsLive - n));
76 	s->bsLive += n;
77 }
78 
79 
80 /*---------------------------------------------------*/
81 static
bsPutU32(EState * s,unsigned u)82 void bsPutU32(EState* s, unsigned u)
83 {
84 	bsW(s, 8, (u >> 24) & 0xff);
85 	bsW(s, 8, (u >> 16) & 0xff);
86 	bsW(s, 8, (u >>  8) & 0xff);
87 	bsW(s, 8,  u        & 0xff);
88 }
89 
90 
91 /*---------------------------------------------------*/
92 static
bsPutU16(EState * s,unsigned u)93 void bsPutU16(EState* s, unsigned u)
94 {
95 	bsW(s, 8, (u >>  8) & 0xff);
96 	bsW(s, 8,  u        & 0xff);
97 }
98 
99 
100 /*---------------------------------------------------*/
101 /*--- The back end proper                         ---*/
102 /*---------------------------------------------------*/
103 
104 /*---------------------------------------------------*/
105 static
makeMaps_e(EState * s)106 void makeMaps_e(EState* s)
107 {
108 	int i;
109 	s->nInUse = 0;
110 	for (i = 0; i < 256; i++) {
111 		if (s->inUse[i]) {
112 			s->unseqToSeq[i] = s->nInUse;
113 			s->nInUse++;
114 		}
115 	}
116 }
117 
118 
119 /*---------------------------------------------------*/
120 static NOINLINE
generateMTFValues(EState * s)121 void generateMTFValues(EState* s)
122 {
123 	uint8_t yy[256];
124 	int32_t i, j;
125 	int32_t zPend;
126 	int32_t wr;
127 	int32_t EOB;
128 
129 	/*
130 	 * After sorting (eg, here),
131 	 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 	 * and
133 	 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 	 * holds the original block data.
135 	 *
136 	 * The first thing to do is generate the MTF values,
137 	 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
138 	 *
139 	 * Because there are strictly fewer or equal MTF values
140 	 * than block values, ptr values in this area are overwritten
141 	 * with MTF values only when they are no longer needed.
142 	 *
143 	 * The final compressed bitstream is generated into the
144 	 * area starting at &((uint8_t*)s->arr2)[s->nblock]
145 	 *
146 	 * These storage aliases are set up in bzCompressInit(),
147 	 * except for the last one, which is arranged in
148 	 * compressBlock().
149 	 */
150 	uint32_t* ptr   = s->ptr;
151 	uint8_t*  block = s->block;
152 	uint16_t* mtfv  = s->mtfv;
153 
154 	makeMaps_e(s);
155 	EOB = s->nInUse+1;
156 
157 	for (i = 0; i <= EOB; i++)
158 		s->mtfFreq[i] = 0;
159 
160 	wr = 0;
161 	zPend = 0;
162 	for (i = 0; i < s->nInUse; i++)
163 		yy[i] = (uint8_t) i;
164 
165 	for (i = 0; i < s->nblock; i++) {
166 		uint8_t ll_i;
167 		AssertD(wr <= i, "generateMTFValues(1)");
168 		j = ptr[i] - 1;
169 		if (j < 0)
170 			j += s->nblock;
171 		ll_i = s->unseqToSeq[block[j]];
172 		AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
173 
174 		if (yy[0] == ll_i) {
175 			zPend++;
176 		} else {
177 			if (zPend > 0) {
178 				zPend--;
179 				while (1) {
180 					if (zPend & 1) {
181 						mtfv[wr] = BZ_RUNB; wr++;
182 						s->mtfFreq[BZ_RUNB]++;
183 					} else {
184 						mtfv[wr] = BZ_RUNA; wr++;
185 						s->mtfFreq[BZ_RUNA]++;
186 					}
187 					if (zPend < 2) break;
188 					zPend = (uint32_t)(zPend - 2) / 2;
189 					/* bbox: unsigned div is easier */
190 				};
191 				zPend = 0;
192 			}
193 			{
194 				register uint8_t  rtmp;
195 				register uint8_t* ryy_j;
196 				register uint8_t  rll_i;
197 				rtmp  = yy[1];
198 				yy[1] = yy[0];
199 				ryy_j = &(yy[1]);
200 				rll_i = ll_i;
201 				while (rll_i != rtmp) {
202 					register uint8_t rtmp2;
203 					ryy_j++;
204 					rtmp2  = rtmp;
205 					rtmp   = *ryy_j;
206 					*ryy_j = rtmp2;
207 				};
208 				yy[0] = rtmp;
209 				j = ryy_j - &(yy[0]);
210 				mtfv[wr] = j+1;
211 				wr++;
212 				s->mtfFreq[j+1]++;
213 			}
214 		}
215 	}
216 
217 	if (zPend > 0) {
218 		zPend--;
219 		while (1) {
220 			if (zPend & 1) {
221 				mtfv[wr] = BZ_RUNB;
222 				wr++;
223 				s->mtfFreq[BZ_RUNB]++;
224 			} else {
225 				mtfv[wr] = BZ_RUNA;
226 				wr++;
227 				s->mtfFreq[BZ_RUNA]++;
228 			}
229 			if (zPend < 2)
230 				break;
231 			zPend = (uint32_t)(zPend - 2) / 2;
232 			/* bbox: unsigned div is easier */
233 		};
234 		zPend = 0;
235 	}
236 
237 	mtfv[wr] = EOB;
238 	wr++;
239 	s->mtfFreq[EOB]++;
240 
241 	s->nMTF = wr;
242 }
243 
244 
245 /*---------------------------------------------------*/
246 #define BZ_LESSER_ICOST  0
247 #define BZ_GREATER_ICOST 15
248 
249 static NOINLINE
sendMTFValues(EState * s)250 void sendMTFValues(EState* s)
251 {
252 	int32_t v, t, i, j, gs, ge, bt, bc, iter;
253 	int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
254 	int32_t nGroups;
255 
256 	/*
257 	 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 	 * is a global since the decoder also needs it.
259 	 *
260 	 * int32_t  code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
261 	 * int32_t  rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
262 	 * are also globals only used in this proc.
263 	 * Made global to keep stack frame size small.
264 	 */
265 #define code     sendMTFValues__code
266 #define rfreq    sendMTFValues__rfreq
267 #define len_pack sendMTFValues__len_pack
268 
269 	uint16_t cost[BZ_N_GROUPS];
270 	int32_t  fave[BZ_N_GROUPS];
271 
272 	uint16_t* mtfv = s->mtfv;
273 
274 	alphaSize = s->nInUse + 2;
275 	for (t = 0; t < BZ_N_GROUPS; t++)
276 		for (v = 0; v < alphaSize; v++)
277 			s->len[t][v] = BZ_GREATER_ICOST;
278 
279 	/*--- Decide how many coding tables to use ---*/
280 	AssertH(s->nMTF > 0, 3001);
281 	if (s->nMTF < 200)  nGroups = 2; else
282 	if (s->nMTF < 600)  nGroups = 3; else
283 	if (s->nMTF < 1200) nGroups = 4; else
284 	if (s->nMTF < 2400) nGroups = 5; else
285 	nGroups = 6;
286 
287 	/*--- Generate an initial set of coding tables ---*/
288 	{
289 		int32_t nPart, remF, tFreq, aFreq;
290 
291 		nPart = nGroups;
292 		remF  = s->nMTF;
293 		gs = 0;
294 		while (nPart > 0) {
295 			tFreq = remF / nPart;
296 			ge = gs - 1;
297 			aFreq = 0;
298 			while (aFreq < tFreq && ge < alphaSize-1) {
299 				ge++;
300 				aFreq += s->mtfFreq[ge];
301 			}
302 
303 			if (ge > gs
304 			 && nPart != nGroups && nPart != 1
305 			 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
306 			) {
307 				aFreq -= s->mtfFreq[ge];
308 				ge--;
309 			}
310 
311 			for (v = 0; v < alphaSize; v++)
312 				if (v >= gs && v <= ge)
313 					s->len[nPart-1][v] = BZ_LESSER_ICOST;
314 				else
315 					s->len[nPart-1][v] = BZ_GREATER_ICOST;
316 
317 			nPart--;
318 			gs = ge + 1;
319 			remF -= aFreq;
320 		}
321 	}
322 
323 	/*
324 	 * Iterate up to BZ_N_ITERS times to improve the tables.
325 	 */
326 	for (iter = 0; iter < BZ_N_ITERS; iter++) {
327 		for (t = 0; t < nGroups; t++)
328 			fave[t] = 0;
329 
330 		for (t = 0; t < nGroups; t++)
331 			for (v = 0; v < alphaSize; v++)
332 				s->rfreq[t][v] = 0;
333 
334 #if CONFIG_BZIP2_FAST >= 5
335 		/*
336 		 * Set up an auxiliary length table which is used to fast-track
337 		 * the common case (nGroups == 6).
338 		 */
339 		if (nGroups == 6) {
340 			for (v = 0; v < alphaSize; v++) {
341 				s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
342 				s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
343 				s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
344 			}
345 		}
346 #endif
347 		nSelectors = 0;
348 		gs = 0;
349 		while (1) {
350 			/*--- Set group start & end marks. --*/
351 			if (gs >= s->nMTF)
352 				break;
353 			ge = gs + BZ_G_SIZE - 1;
354 			if (ge >= s->nMTF)
355 				ge = s->nMTF-1;
356 
357 			/*
358 			 * Calculate the cost of this group as coded
359 			 * by each of the coding tables.
360 			 */
361 			for (t = 0; t < nGroups; t++)
362 				cost[t] = 0;
363 #if CONFIG_BZIP2_FAST >= 5
364 			if (nGroups == 6 && 50 == ge-gs+1) {
365 				/*--- fast track the common case ---*/
366 				register uint32_t cost01, cost23, cost45;
367 				register uint16_t icv;
368 				cost01 = cost23 = cost45 = 0;
369 #define BZ_ITER(nn) \
370 	icv = mtfv[gs+(nn)]; \
371 	cost01 += s->len_pack[icv][0]; \
372 	cost23 += s->len_pack[icv][1]; \
373 	cost45 += s->len_pack[icv][2];
374 				BZ_ITER(0);  BZ_ITER(1);  BZ_ITER(2);  BZ_ITER(3);  BZ_ITER(4);
375 				BZ_ITER(5);  BZ_ITER(6);  BZ_ITER(7);  BZ_ITER(8);  BZ_ITER(9);
376 				BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
377 				BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
378 				BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
379 				BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
380 				BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
381 				BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
382 				BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
383 				BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
384 #undef BZ_ITER
385 				cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
386 				cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
387 				cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
388 			} else
389 #endif
390 			{
391 				/*--- slow version which correctly handles all situations ---*/
392 				for (i = gs; i <= ge; i++) {
393 					uint16_t icv = mtfv[i];
394 					for (t = 0; t < nGroups; t++)
395 						cost[t] += s->len[t][icv];
396 				}
397 			}
398 			/*
399 			 * Find the coding table which is best for this group,
400 			 * and record its identity in the selector table.
401 			 */
402 			/*bc = 999999999;*/
403 			/*bt = -1;*/
404 			bc = cost[0];
405 			bt = 0;
406 			for (t = 1 /*0*/; t < nGroups; t++) {
407 				if (cost[t] < bc) {
408 					bc = cost[t];
409 					bt = t;
410 				}
411 			}
412 			fave[bt]++;
413 			s->selector[nSelectors] = bt;
414 			nSelectors++;
415 
416 			/*
417 			 * Increment the symbol frequencies for the selected table.
418 			 */
419 /* 1% faster compress. +800 bytes */
420 #if CONFIG_BZIP2_FAST >= 4
421 			if (nGroups == 6 && 50 == ge-gs+1) {
422 				/*--- fast track the common case ---*/
423 #define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
424 				BZ_ITUR(0);  BZ_ITUR(1);  BZ_ITUR(2);  BZ_ITUR(3);  BZ_ITUR(4);
425 				BZ_ITUR(5);  BZ_ITUR(6);  BZ_ITUR(7);  BZ_ITUR(8);  BZ_ITUR(9);
426 				BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
427 				BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
428 				BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
429 				BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
430 				BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
431 				BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
432 				BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
433 				BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
434 #undef BZ_ITUR
435 				gs = ge + 1;
436 			} else
437 #endif
438 			{
439 				/*--- slow version which correctly handles all situations ---*/
440 				while (gs <= ge) {
441 					s->rfreq[bt][mtfv[gs]]++;
442 					gs++;
443 				}
444 				/* already is: gs = ge + 1; */
445 			}
446 		}
447 
448 		/*
449 		 * Recompute the tables based on the accumulated frequencies.
450 		 */
451 		/* maxLen was changed from 20 to 17 in bzip2-1.0.3.  See
452 		 * comment in huffman.c for details. */
453 		for (t = 0; t < nGroups; t++)
454 			BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
455 	}
456 
457 	AssertH(nGroups < 8, 3002);
458 	AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
459 
460 	/*--- Compute MTF values for the selectors. ---*/
461 	{
462 		uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
463 
464 		for (i = 0; i < nGroups; i++)
465 			pos[i] = i;
466 		for (i = 0; i < nSelectors; i++) {
467 			ll_i = s->selector[i];
468 			j = 0;
469 			tmp = pos[j];
470 			while (ll_i != tmp) {
471 				j++;
472 				tmp2 = tmp;
473 				tmp = pos[j];
474 				pos[j] = tmp2;
475 			};
476 			pos[0] = tmp;
477 			s->selectorMtf[i] = j;
478 		}
479 	};
480 
481 	/*--- Assign actual codes for the tables. --*/
482 	for (t = 0; t < nGroups; t++) {
483 		minLen = 32;
484 		maxLen = 0;
485 		for (i = 0; i < alphaSize; i++) {
486 			if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
487 			if (s->len[t][i] < minLen) minLen = s->len[t][i];
488 		}
489 		AssertH(!(maxLen > 17 /*20*/), 3004);
490 		AssertH(!(minLen < 1), 3005);
491 		BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
492 	}
493 
494 	/*--- Transmit the mapping table. ---*/
495 	{
496 		/* bbox: optimized a bit more than in bzip2 */
497 		int inUse16 = 0;
498 		for (i = 0; i < 16; i++) {
499 			if (sizeof(long) <= 4) {
500 				inUse16 = inUse16*2 +
501 					((*(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 0])
502 					| *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 4])
503 					| *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 8])
504 					| *(bb__aliased_uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
505 			} else { /* Our CPU can do better */
506 				inUse16 = inUse16*2 +
507 					((*(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 0])
508 					| *(bb__aliased_uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
509 			}
510 		}
511 
512 		bsW(s, 16, inUse16);
513 
514 		inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
515 		for (i = 0; i < 16; i++) {
516 			if (inUse16 < 0) {
517 				unsigned v16 = 0;
518 				for (j = 0; j < 16; j++)
519 					v16 = v16*2 + s->inUse[i * 16 + j];
520 				bsW(s, 16, v16);
521 			}
522 			inUse16 <<= 1;
523 		}
524 	}
525 
526 	/*--- Now the selectors. ---*/
527 	bsW(s, 3, nGroups);
528 	bsW(s, 15, nSelectors);
529 	for (i = 0; i < nSelectors; i++) {
530 		for (j = 0; j < s->selectorMtf[i]; j++)
531 			bsW(s, 1, 1);
532 		bsW(s, 1, 0);
533 	}
534 
535 	/*--- Now the coding tables. ---*/
536 	for (t = 0; t < nGroups; t++) {
537 		int32_t curr = s->len[t][0];
538 		bsW(s, 5, curr);
539 		for (i = 0; i < alphaSize; i++) {
540 			while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
541 			while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
542 			bsW(s, 1, 0);
543 		}
544 	}
545 
546 	/*--- And finally, the block data proper ---*/
547 	selCtr = 0;
548 	gs = 0;
549 	while (1) {
550 		if (gs >= s->nMTF)
551 			break;
552 		ge = gs + BZ_G_SIZE - 1;
553 		if (ge >= s->nMTF)
554 			ge = s->nMTF-1;
555 		AssertH(s->selector[selCtr] < nGroups, 3006);
556 
557 /* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
558 #if 0
559 		if (nGroups == 6 && 50 == ge-gs+1) {
560 			/*--- fast track the common case ---*/
561 			uint16_t mtfv_i;
562 			uint8_t* s_len_sel_selCtr  = &(s->len[s->selector[selCtr]][0]);
563 			int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
564 #define BZ_ITAH(nn) \
565 	mtfv_i = mtfv[gs+(nn)]; \
566 	bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
567 			BZ_ITAH(0);  BZ_ITAH(1);  BZ_ITAH(2);  BZ_ITAH(3);  BZ_ITAH(4);
568 			BZ_ITAH(5);  BZ_ITAH(6);  BZ_ITAH(7);  BZ_ITAH(8);  BZ_ITAH(9);
569 			BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
570 			BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
571 			BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
572 			BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
573 			BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
574 			BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
575 			BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
576 			BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
577 #undef BZ_ITAH
578 			gs = ge+1;
579 		} else
580 #endif
581 		{
582 			/*--- slow version which correctly handles all situations ---*/
583 			/* code is bit bigger, but moves multiply out of the loop */
584 			uint8_t* s_len_sel_selCtr  = &(s->len [s->selector[selCtr]][0]);
585 			int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
586 			while (gs <= ge) {
587 				bsW(s,
588 					s_len_sel_selCtr[mtfv[gs]],
589 					s_code_sel_selCtr[mtfv[gs]]
590 				);
591 				gs++;
592 			}
593 			/* already is: gs = ge+1; */
594 		}
595 		selCtr++;
596 	}
597 	AssertH(selCtr == nSelectors, 3007);
598 #undef code
599 #undef rfreq
600 #undef len_pack
601 }
602 
603 
604 /*---------------------------------------------------*/
605 static
BZ2_compressBlock(EState * s,int is_last_block)606 void BZ2_compressBlock(EState* s, int is_last_block)
607 {
608 	if (s->nblock > 0) {
609 		BZ_FINALISE_CRC(s->blockCRC);
610 		s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
611 		s->combinedCRC ^= s->blockCRC;
612 		if (s->blockNo > 1)
613 			s->numZ = 0;
614 
615 		BZ2_blockSort(s);
616 	}
617 
618 	s->zbits = &((uint8_t*)s->arr2)[s->nblock];
619 
620 	/*-- If this is the first block, create the stream header. --*/
621 	if (s->blockNo == 1) {
622 		BZ2_bsInitWrite(s);
623 		/*bsPutU8(s, BZ_HDR_B);*/
624 		/*bsPutU8(s, BZ_HDR_Z);*/
625 		/*bsPutU8(s, BZ_HDR_h);*/
626 		/*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
627 		bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
628 	}
629 
630 	if (s->nblock > 0) {
631 		/*bsPutU8(s, 0x31);*/
632 		/*bsPutU8(s, 0x41);*/
633 		/*bsPutU8(s, 0x59);*/
634 		/*bsPutU8(s, 0x26);*/
635 		bsPutU32(s, 0x31415926);
636 		/*bsPutU8(s, 0x53);*/
637 		/*bsPutU8(s, 0x59);*/
638 		bsPutU16(s, 0x5359);
639 
640 		/*-- Now the block's CRC, so it is in a known place. --*/
641 		bsPutU32(s, s->blockCRC);
642 
643 		/*
644 		 * Now a single bit indicating (non-)randomisation.
645 		 * As of version 0.9.5, we use a better sorting algorithm
646 		 * which makes randomisation unnecessary.  So always set
647 		 * the randomised bit to 'no'.  Of course, the decoder
648 		 * still needs to be able to handle randomised blocks
649 		 * so as to maintain backwards compatibility with
650 		 * older versions of bzip2.
651 		 */
652 		bsW(s, 1, 0);
653 
654 		bsW(s, 24, s->origPtr);
655 		generateMTFValues(s);
656 		sendMTFValues(s);
657 	}
658 
659 	/*-- If this is the last block, add the stream trailer. --*/
660 	if (is_last_block) {
661 		/*bsPutU8(s, 0x17);*/
662 		/*bsPutU8(s, 0x72);*/
663 		/*bsPutU8(s, 0x45);*/
664 		/*bsPutU8(s, 0x38);*/
665 		bsPutU32(s, 0x17724538);
666 		/*bsPutU8(s, 0x50);*/
667 		/*bsPutU8(s, 0x90);*/
668 		bsPutU16(s, 0x5090);
669 		bsPutU32(s, s->combinedCRC);
670 		bsFinishWrite(s);
671 	}
672 }
673 
674 
675 /*-------------------------------------------------------------*/
676 /*--- end                                        compress.c ---*/
677 /*-------------------------------------------------------------*/
678