1
2 /*-------------------------------------------------------------*/
3 /*--- Compression machinery (not incl block sorting) ---*/
4 /*--- compress.c ---*/
5 /*-------------------------------------------------------------*/
6
7 /* ------------------------------------------------------------------
8 This file is part of bzip2/libbzip2, a program and library for
9 lossless, block-sorting data compression.
10
11 bzip2/libbzip2 version 1.0.5 of 10 December 2007
12 Copyright (C) 1996-2007 Julian Seward <jseward@bzip.org>
13
14 Please read the WARNING, DISCLAIMER and PATENTS sections in the
15 README file.
16
17 This program is released under the terms of the license contained
18 in the file LICENSE.
19 ------------------------------------------------------------------ */
20
21 /* CHANGES
22 0.9.0 -- original version.
23 0.9.0a/b -- no changes in this file.
24 0.9.0c -- changed setting of nGroups in sendMTFValues()
25 so as to do a bit better on small files
26 */
27
28 #include "bzlib_private.h"
29
30 /*---------------------------------------------------*/
31 /*--- Bit stream I/O ---*/
32 /*---------------------------------------------------*/
33
34 /*---------------------------------------------------*/
BZ2_bsInitWrite(EState * s)35 void BZ2_bsInitWrite ( EState* s )
36 {
37 s->bsLive = 0;
38 s->bsBuff = 0;
39 }
40
41 /*---------------------------------------------------*/
42 static
bsFinishWrite(EState * s)43 void bsFinishWrite ( EState* s )
44 {
45 while (s->bsLive > 0) {
46 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
47 s->numZ++;
48 s->bsBuff <<= 8;
49 s->bsLive -= 8;
50 }
51 }
52
53 /*---------------------------------------------------*/
54 #define bsNEEDW(nz) \
55 { \
56 while (s->bsLive >= 8) { \
57 s->zbits[s->numZ] \
58 = (UChar)(s->bsBuff >> 24); \
59 s->numZ++; \
60 s->bsBuff <<= 8; \
61 s->bsLive -= 8; \
62 } \
63 }
64
65 /*---------------------------------------------------*/
66 static
67 __inline__
bsW(EState * s,Int32 n,UInt32 v)68 void bsW ( EState* s, Int32 n, UInt32 v )
69 {
70 bsNEEDW ( n );
71 s->bsBuff |= (v << (32 - s->bsLive - n));
72 s->bsLive += n;
73 }
74
75 /*---------------------------------------------------*/
76 static
bsPutUInt32(EState * s,UInt32 u)77 void bsPutUInt32 ( EState* s, UInt32 u )
78 {
79 bsW ( s, 8, (u >> 24) & 0xffL );
80 bsW ( s, 8, (u >> 16) & 0xffL );
81 bsW ( s, 8, (u >> 8) & 0xffL );
82 bsW ( s, 8, u & 0xffL );
83 }
84
85 /*---------------------------------------------------*/
86 static
bsPutUChar(EState * s,UChar c)87 void bsPutUChar ( EState* s, UChar c )
88 {
89 bsW( s, 8, (UInt32)c );
90 }
91
92 /*---------------------------------------------------*/
93 /*--- The back end proper ---*/
94 /*---------------------------------------------------*/
95
96 /*---------------------------------------------------*/
97 static
makeMaps_e(EState * s)98 void makeMaps_e ( EState* s )
99 {
100 Int32 i;
101 s->nInUse = 0;
102 for (i = 0; i < 256; i++)
103 if (s->inUse[i]) {
104 s->unseqToSeq[i] = s->nInUse;
105 s->nInUse++;
106 }
107 }
108
109 /*---------------------------------------------------*/
110 static
generateMTFValues(EState * s)111 void generateMTFValues ( EState* s )
112 {
113 UChar yy[256];
114 Int32 i, j;
115 Int32 zPend;
116 Int32 wr;
117 Int32 EOB;
118
119 /*
120 After sorting (eg, here),
121 s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
122 and
123 ((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
124 holds the original block data.
125
126 The first thing to do is generate the MTF values,
127 and put them in
128 ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
129 Because there are strictly fewer or equal MTF values
130 than block values, ptr values in this area are overwritten
131 with MTF values only when they are no longer needed.
132
133 The final compressed bitstream is generated into the
134 area starting at
135 (UChar*) (&((UChar*)s->arr2)[s->nblock])
136
137 These storage aliases are set up in bzCompressInit(),
138 except for the last one, which is arranged in
139 compressBlock().
140 */
141 UInt32* ptr = s->ptr;
142 UChar* block = s->block;
143 UInt16* mtfv = s->mtfv;
144
145 makeMaps_e ( s );
146 EOB = s->nInUse+1;
147
148 for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
149
150 wr = 0;
151 zPend = 0;
152 for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
153
154 for (i = 0; i < s->nblock; i++) {
155 UChar ll_i;
156 AssertD ( wr <= i, "generateMTFValues(1)" );
157 j = ptr[i]-1; if (j < 0) j += s->nblock;
158 ll_i = s->unseqToSeq[block[j]];
159 AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
160
161 if (yy[0] == ll_i) {
162 zPend++;
163 } else {
164
165 if (zPend > 0) {
166 zPend--;
167 while (True) {
168 if (zPend & 1) {
169 mtfv[wr] = BZ_RUNB; wr++;
170 s->mtfFreq[BZ_RUNB]++;
171 } else {
172 mtfv[wr] = BZ_RUNA; wr++;
173 s->mtfFreq[BZ_RUNA]++;
174 }
175 if (zPend < 2) break;
176 zPend = (zPend - 2) / 2;
177 };
178 zPend = 0;
179 }
180 {
181 register UChar rtmp;
182 register UChar* ryy_j;
183 register UChar rll_i;
184 rtmp = yy[1];
185 yy[1] = yy[0];
186 ryy_j = &(yy[1]);
187 rll_i = ll_i;
188 while ( rll_i != rtmp ) {
189 register UChar rtmp2;
190 ryy_j++;
191 rtmp2 = rtmp;
192 rtmp = *ryy_j;
193 *ryy_j = rtmp2;
194 };
195 yy[0] = rtmp;
196 j = ryy_j - &(yy[0]);
197 mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
198 }
199
200 }
201 }
202
203 if (zPend > 0) {
204 zPend--;
205 while (True) {
206 if (zPend & 1) {
207 mtfv[wr] = BZ_RUNB; wr++;
208 s->mtfFreq[BZ_RUNB]++;
209 } else {
210 mtfv[wr] = BZ_RUNA; wr++;
211 s->mtfFreq[BZ_RUNA]++;
212 }
213 if (zPend < 2) break;
214 zPend = (zPend - 2) / 2;
215 };
216 zPend = 0;
217 }
218
219 mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
220
221 s->nMTF = wr;
222 }
223
224 /*---------------------------------------------------*/
225 #define BZ_LESSER_ICOST 0
226 #define BZ_GREATER_ICOST 15
227
228 static
sendMTFValues(EState * s)229 void sendMTFValues ( EState* s )
230 {
231 Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
232 Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
233 Int32 nGroups, nBytes;
234
235 /*--
236 UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
237 is a global since the decoder also needs it.
238
239 Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
240 Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
241 are also globals only used in this proc.
242 Made global to keep stack frame size small.
243 --*/
244
245 UInt16 cost[BZ_N_GROUPS];
246 Int32 fave[BZ_N_GROUPS];
247
248 UInt16* mtfv = s->mtfv;
249
250 if (s->verbosity >= 3)
251 VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
252 "%d+2 syms in use\n",
253 s->nblock, s->nMTF, s->nInUse );
254
255 alphaSize = s->nInUse+2;
256 for (t = 0; t < BZ_N_GROUPS; t++)
257 for (v = 0; v < alphaSize; v++)
258 s->len[t][v] = BZ_GREATER_ICOST;
259
260 /*--- Decide how many coding tables to use ---*/
261 AssertH ( s->nMTF > 0, 3001 );
262 if (s->nMTF < 200) nGroups = 2; else
263 if (s->nMTF < 600) nGroups = 3; else
264 if (s->nMTF < 1200) nGroups = 4; else
265 if (s->nMTF < 2400) nGroups = 5; else
266 nGroups = 6;
267
268 /*--- Generate an initial set of coding tables ---*/
269 {
270 Int32 nPart, remF, tFreq, aFreq;
271
272 nPart = nGroups;
273 remF = s->nMTF;
274 gs = 0;
275 while (nPart > 0) {
276 tFreq = remF / nPart;
277 ge = gs-1;
278 aFreq = 0;
279 while (aFreq < tFreq && ge < alphaSize-1) {
280 ge++;
281 aFreq += s->mtfFreq[ge];
282 }
283
284 if (ge > gs
285 && nPart != nGroups && nPart != 1
286 && ((nGroups-nPart) % 2 == 1)) {
287 aFreq -= s->mtfFreq[ge];
288 ge--;
289 }
290
291 if (s->verbosity >= 3)
292 VPrintf5( " initial group %d, [%d .. %d], "
293 "has %d syms (%4.1f%%)\n",
294 nPart, gs, ge, aFreq,
295 (100.0 * (float)aFreq) / (float)(s->nMTF) );
296
297 for (v = 0; v < alphaSize; v++)
298 if (v >= gs && v <= ge)
299 s->len[nPart-1][v] = BZ_LESSER_ICOST; else
300 s->len[nPart-1][v] = BZ_GREATER_ICOST;
301
302 nPart--;
303 gs = ge+1;
304 remF -= aFreq;
305 }
306 }
307
308 /*---
309 Iterate up to BZ_N_ITERS times to improve the tables.
310 ---*/
311 for (iter = 0; iter < BZ_N_ITERS; iter++) {
312
313 for (t = 0; t < nGroups; t++) fave[t] = 0;
314
315 for (t = 0; t < nGroups; t++)
316 for (v = 0; v < alphaSize; v++)
317 s->rfreq[t][v] = 0;
318
319 /*---
320 Set up an auxiliary length table which is used to fast-track
321 the common case (nGroups == 6).
322 ---*/
323 if (nGroups == 6) {
324 for (v = 0; v < alphaSize; v++) {
325 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
326 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
327 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
328 }
329 }
330
331 nSelectors = 0;
332 totc = 0;
333 gs = 0;
334 while (True) {
335
336 /*--- Set group start & end marks. --*/
337 if (gs >= s->nMTF) break;
338 ge = gs + BZ_G_SIZE - 1;
339 if (ge >= s->nMTF) ge = s->nMTF-1;
340
341 /*--
342 Calculate the cost of this group as coded
343 by each of the coding tables.
344 --*/
345 for (t = 0; t < nGroups; t++) cost[t] = 0;
346
347 if (nGroups == 6 && 50 == ge-gs+1) {
348 /*--- fast track the common case ---*/
349 register UInt32 cost01, cost23, cost45;
350 register UInt16 icv;
351 cost01 = cost23 = cost45 = 0;
352
353 # define BZ_ITER(nn) \
354 icv = mtfv[gs+(nn)]; \
355 cost01 += s->len_pack[icv][0]; \
356 cost23 += s->len_pack[icv][1]; \
357 cost45 += s->len_pack[icv][2]; \
358
359 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
360 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
361 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
362 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
363 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
364 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
365 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
366 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
367 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
368 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
369
370 # undef BZ_ITER
371
372 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
373 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
374 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
375
376 } else {
377 /*--- slow version which correctly handles all situations ---*/
378 for (i = gs; i <= ge; i++) {
379 UInt16 icv = mtfv[i];
380 for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
381 }
382 }
383
384 /*--
385 Find the coding table which is best for this group,
386 and record its identity in the selector table.
387 --*/
388 bc = 999999999; bt = -1;
389 for (t = 0; t < nGroups; t++)
390 if (cost[t] < bc) { bc = cost[t]; bt = t; };
391 totc += bc;
392 fave[bt]++;
393 s->selector[nSelectors] = bt;
394 nSelectors++;
395
396 /*--
397 Increment the symbol frequencies for the selected table.
398 --*/
399 if (nGroups == 6 && 50 == ge-gs+1) {
400 /*--- fast track the common case ---*/
401
402 # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
403
404 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
405 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
406 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
407 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
408 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
409 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
410 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
411 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
412 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
413 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
414
415 # undef BZ_ITUR
416
417 } else {
418 /*--- slow version which correctly handles all situations ---*/
419 for (i = gs; i <= ge; i++)
420 s->rfreq[bt][ mtfv[i] ]++;
421 }
422
423 gs = ge+1;
424 }
425 if (s->verbosity >= 3) {
426 VPrintf2 ( " pass %d: size is %d, grp uses are ",
427 iter+1, totc/8 );
428 for (t = 0; t < nGroups; t++)
429 VPrintf1 ( "%d ", fave[t] );
430 VPrintf0 ( "\n" );
431 }
432
433 /*--
434 Recompute the tables based on the accumulated frequencies.
435 --*/
436 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
437 comment in huffman.c for details. */
438 for (t = 0; t < nGroups; t++)
439 BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
440 alphaSize, 17 /*20*/ );
441 }
442
443 AssertH( nGroups < 8, 3002 );
444 AssertH( nSelectors < 32768 &&
445 nSelectors <= (2 + (900000 / BZ_G_SIZE)),
446 3003 );
447
448 /*--- Compute MTF values for the selectors. ---*/
449 {
450 UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
451 for (i = 0; i < nGroups; i++) pos[i] = i;
452 for (i = 0; i < nSelectors; i++) {
453 ll_i = s->selector[i];
454 j = 0;
455 tmp = pos[j];
456 while ( ll_i != tmp ) {
457 j++;
458 tmp2 = tmp;
459 tmp = pos[j];
460 pos[j] = tmp2;
461 };
462 pos[0] = tmp;
463 s->selectorMtf[i] = j;
464 }
465 };
466
467 /*--- Assign actual codes for the tables. --*/
468 for (t = 0; t < nGroups; t++) {
469 minLen = 32;
470 maxLen = 0;
471 for (i = 0; i < alphaSize; i++) {
472 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
473 if (s->len[t][i] < minLen) minLen = s->len[t][i];
474 }
475 AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
476 AssertH ( !(minLen < 1), 3005 );
477 BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
478 minLen, maxLen, alphaSize );
479 }
480
481 /*--- Transmit the mapping table. ---*/
482 {
483 Bool inUse16[16];
484 for (i = 0; i < 16; i++) {
485 inUse16[i] = False;
486 for (j = 0; j < 16; j++)
487 if (s->inUse[i * 16 + j]) inUse16[i] = True;
488 }
489
490 nBytes = s->numZ;
491 for (i = 0; i < 16; i++)
492 if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
493
494 for (i = 0; i < 16; i++)
495 if (inUse16[i])
496 for (j = 0; j < 16; j++) {
497 if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
498 }
499
500 if (s->verbosity >= 3)
501 VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
502 }
503
504 /*--- Now the selectors. ---*/
505 nBytes = s->numZ;
506 bsW ( s, 3, nGroups );
507 bsW ( s, 15, nSelectors );
508 for (i = 0; i < nSelectors; i++) {
509 for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
510 bsW(s,1,0);
511 }
512 if (s->verbosity >= 3)
513 VPrintf1( "selectors %d, ", s->numZ-nBytes );
514
515 /*--- Now the coding tables. ---*/
516 nBytes = s->numZ;
517
518 for (t = 0; t < nGroups; t++) {
519 Int32 curr = s->len[t][0];
520 bsW ( s, 5, curr );
521 for (i = 0; i < alphaSize; i++) {
522 while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
523 while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
524 bsW ( s, 1, 0 );
525 }
526 }
527
528 if (s->verbosity >= 3)
529 VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
530
531 /*--- And finally, the block data proper ---*/
532 nBytes = s->numZ;
533 selCtr = 0;
534 gs = 0;
535 while (True) {
536 if (gs >= s->nMTF) break;
537 ge = gs + BZ_G_SIZE - 1;
538 if (ge >= s->nMTF) ge = s->nMTF-1;
539 AssertH ( s->selector[selCtr] < nGroups, 3006 );
540
541 if (nGroups == 6 && 50 == ge-gs+1) {
542 /*--- fast track the common case ---*/
543 UInt16 mtfv_i;
544 UChar* s_len_sel_selCtr
545 = &(s->len[s->selector[selCtr]][0]);
546 Int32* s_code_sel_selCtr
547 = &(s->code[s->selector[selCtr]][0]);
548
549 # define BZ_ITAH(nn) \
550 mtfv_i = mtfv[gs+(nn)]; \
551 bsW ( s, \
552 s_len_sel_selCtr[mtfv_i], \
553 s_code_sel_selCtr[mtfv_i] )
554
555 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
556 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
557 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
558 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
559 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
560 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
561 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
562 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
563 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
564 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
565
566 # undef BZ_ITAH
567
568 } else {
569 /*--- slow version which correctly handles all situations ---*/
570 for (i = gs; i <= ge; i++) {
571 bsW ( s,
572 s->len [s->selector[selCtr]] [mtfv[i]],
573 s->code [s->selector[selCtr]] [mtfv[i]] );
574 }
575 }
576
577 gs = ge+1;
578 selCtr++;
579 }
580 AssertH( selCtr == nSelectors, 3007 );
581
582 if (s->verbosity >= 3)
583 VPrintf1( "codes %d\n", s->numZ-nBytes );
584 }
585
586 /*---------------------------------------------------*/
BZ2_compressBlock(EState * s,Bool is_last_block)587 void BZ2_compressBlock ( EState* s, Bool is_last_block )
588 {
589 if (s->nblock > 0) {
590
591 BZ_FINALISE_CRC ( s->blockCRC );
592 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
593 s->combinedCRC ^= s->blockCRC;
594 if (s->blockNo > 1) s->numZ = 0;
595
596 if (s->verbosity >= 2)
597 VPrintf4( " block %d: crc = 0x%08x, "
598 "combined CRC = 0x%08x, size = %d\n",
599 s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
600
601 BZ2_blockSort ( s );
602 }
603
604 s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
605
606 /*-- If this is the first block, create the stream header. --*/
607 if (s->blockNo == 1) {
608 BZ2_bsInitWrite ( s );
609 bsPutUChar ( s, BZ_HDR_B );
610 bsPutUChar ( s, BZ_HDR_Z );
611 bsPutUChar ( s, BZ_HDR_h );
612 bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
613 }
614
615 if (s->nblock > 0) {
616
617 bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
618 bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
619 bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
620
621 /*-- Now the block's CRC, so it is in a known place. --*/
622 bsPutUInt32 ( s, s->blockCRC );
623
624 /*--
625 Now a single bit indicating (non-)randomisation.
626 As of version 0.9.5, we use a better sorting algorithm
627 which makes randomisation unnecessary. So always set
628 the randomised bit to 'no'. Of course, the decoder
629 still needs to be able to handle randomised blocks
630 so as to maintain backwards compatibility with
631 older versions of bzip2.
632 --*/
633 bsW(s,1,0);
634
635 bsW ( s, 24, s->origPtr );
636 generateMTFValues ( s );
637 sendMTFValues ( s );
638 }
639
640 /*-- If this is the last block, add the stream trailer. --*/
641 if (is_last_block) {
642
643 bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
644 bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
645 bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
646 bsPutUInt32 ( s, s->combinedCRC );
647 if (s->verbosity >= 2)
648 VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
649 bsFinishWrite ( s );
650 }
651 }
652
653 /*-------------------------------------------------------------*/
654 /*--- end compress.c ---*/
655 /*-------------------------------------------------------------*/
656