1
2 /*-------------------------------------------------------------*/
3 /*--- Compression machinery (not incl block sorting) ---*/
4 /*--- compress.c ---*/
5 /*-------------------------------------------------------------*/
6
7 /* ------------------------------------------------------------------
8 This file is part of bzip2/libbzip2, a program and library for
9 lossless, block-sorting data compression.
10
11 bzip2/libbzip2 version 1.0.5 of 10 December 2007
12 Copyright (C) 1996-2007 Julian Seward <jseward@bzip.org>
13
14 Please read the WARNING, DISCLAIMER and PATENTS sections in the
15 README file.
16
17 This program is released under the terms of the license contained
18 in the file LICENSE.
19 ------------------------------------------------------------------ */
20
21
22 /* CHANGES
23 0.9.0 -- original version.
24 0.9.0a/b -- no changes in this file.
25 0.9.0c -- changed setting of nGroups in sendMTFValues()
26 so as to do a bit better on small files
27 */
28
29 #include "bzlib_private.h"
30
31 ABC_NAMESPACE_IMPL_START
32
33
34
35 /*---------------------------------------------------*/
36 /*--- Bit stream I/O ---*/
37 /*---------------------------------------------------*/
38
39 /*---------------------------------------------------*/
BZ2_bsInitWrite(EState * s)40 void BZ2_bsInitWrite ( EState* s )
41 {
42 s->bsLive = 0;
43 s->bsBuff = 0;
44 }
45
46
47 /*---------------------------------------------------*/
48 static
bsFinishWrite(EState * s)49 void bsFinishWrite ( EState* s )
50 {
51 while (s->bsLive > 0) {
52 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
53 s->numZ++;
54 s->bsBuff <<= 8;
55 s->bsLive -= 8;
56 }
57 }
58
59
60 /*---------------------------------------------------*/
61 #define bsNEEDW(nz) \
62 { \
63 while (s->bsLive >= 8) { \
64 s->zbits[s->numZ] \
65 = (UChar)(s->bsBuff >> 24); \
66 s->numZ++; \
67 s->bsBuff <<= 8; \
68 s->bsLive -= 8; \
69 } \
70 }
71
72
73 /*---------------------------------------------------*/
74 static
75 __inline__
bsW(EState * s,Int32 n,UInt32 v)76 void bsW ( EState* s, Int32 n, UInt32 v )
77 {
78 bsNEEDW ( n );
79 s->bsBuff |= (v << (32 - s->bsLive - n));
80 s->bsLive += n;
81 }
82
83
84 /*---------------------------------------------------*/
85 static
bsPutUInt32(EState * s,UInt32 u)86 void bsPutUInt32 ( EState* s, UInt32 u )
87 {
88 bsW ( s, 8, (u >> 24) & 0xffL );
89 bsW ( s, 8, (u >> 16) & 0xffL );
90 bsW ( s, 8, (u >> 8) & 0xffL );
91 bsW ( s, 8, u & 0xffL );
92 }
93
94
95 /*---------------------------------------------------*/
96 static
bsPutUChar(EState * s,UChar c)97 void bsPutUChar ( EState* s, UChar c )
98 {
99 bsW( s, 8, (UInt32)c );
100 }
101
102
103 /*---------------------------------------------------*/
104 /*--- The back end proper ---*/
105 /*---------------------------------------------------*/
106
107 /*---------------------------------------------------*/
108 static
makeMaps_e(EState * s)109 void makeMaps_e ( EState* s )
110 {
111 Int32 i;
112 s->nInUse = 0;
113 for (i = 0; i < 256; i++)
114 if (s->inUse[i]) {
115 s->unseqToSeq[i] = s->nInUse;
116 s->nInUse++;
117 }
118 }
119
120
121 /*---------------------------------------------------*/
122 static
generateMTFValues(EState * s)123 void generateMTFValues ( EState* s )
124 {
125 UChar yy[256];
126 Int32 i, j;
127 Int32 zPend;
128 Int32 wr;
129 Int32 EOB;
130
131 /*
132 After sorting (eg, here),
133 s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
134 and
135 ((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
136 holds the original block data.
137
138 The first thing to do is generate the MTF values,
139 and put them in
140 ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
141 Because there are strictly fewer or equal MTF values
142 than block values, ptr values in this area are overwritten
143 with MTF values only when they are no longer needed.
144
145 The final compressed bitstream is generated into the
146 area starting at
147 (UChar*) (&((UChar*)s->arr2)[s->nblock])
148
149 These storage aliases are set up in bzCompressInit(),
150 except for the last one, which is arranged in
151 compressBlock().
152 */
153 UInt32* ptr = s->ptr;
154 UChar* block = s->block;
155 UInt16* mtfv = s->mtfv;
156
157 makeMaps_e ( s );
158 EOB = s->nInUse+1;
159
160 for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
161
162 wr = 0;
163 zPend = 0;
164 for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
165
166 for (i = 0; i < s->nblock; i++) {
167 UChar ll_i;
168 AssertD ( wr <= i, "generateMTFValues(1)" );
169 j = ptr[i]-1; if (j < 0) j += s->nblock;
170 ll_i = s->unseqToSeq[block[j]];
171 AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
172
173 if (yy[0] == ll_i) {
174 zPend++;
175 } else {
176
177 if (zPend > 0) {
178 zPend--;
179 while (True) {
180 if (zPend & 1) {
181 mtfv[wr] = BZ_RUNB; wr++;
182 s->mtfFreq[BZ_RUNB]++;
183 } else {
184 mtfv[wr] = BZ_RUNA; wr++;
185 s->mtfFreq[BZ_RUNA]++;
186 }
187 if (zPend < 2) break;
188 zPend = (zPend - 2) / 2;
189 };
190 zPend = 0;
191 }
192 {
193 register UChar rtmp;
194 register UChar* ryy_j;
195 register UChar rll_i;
196 rtmp = yy[1];
197 yy[1] = yy[0];
198 ryy_j = &(yy[1]);
199 rll_i = ll_i;
200 while ( rll_i != rtmp ) {
201 register UChar rtmp2;
202 ryy_j++;
203 rtmp2 = rtmp;
204 rtmp = *ryy_j;
205 *ryy_j = rtmp2;
206 };
207 yy[0] = rtmp;
208 j = ryy_j - &(yy[0]);
209 mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
210 }
211
212 }
213 }
214
215 if (zPend > 0) {
216 zPend--;
217 while (True) {
218 if (zPend & 1) {
219 mtfv[wr] = BZ_RUNB; wr++;
220 s->mtfFreq[BZ_RUNB]++;
221 } else {
222 mtfv[wr] = BZ_RUNA; wr++;
223 s->mtfFreq[BZ_RUNA]++;
224 }
225 if (zPend < 2) break;
226 zPend = (zPend - 2) / 2;
227 };
228 zPend = 0;
229 }
230
231 mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
232
233 s->nMTF = wr;
234 }
235
236
237 /*---------------------------------------------------*/
238 #define BZ_LESSER_ICOST 0
239 #define BZ_GREATER_ICOST 15
240
241 static
sendMTFValues(EState * s)242 void sendMTFValues ( EState* s )
243 {
244 Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
245 Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
246 Int32 nGroups, nBytes;
247
248 /*--
249 UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
250 is a global since the decoder also needs it.
251
252 Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
253 Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
254 are also globals only used in this proc.
255 Made global to keep stack frame size small.
256 --*/
257
258
259 UInt16 cost[BZ_N_GROUPS];
260 Int32 fave[BZ_N_GROUPS];
261
262 UInt16* mtfv = s->mtfv;
263
264 if (s->verbosity >= 3)
265 VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
266 "%d+2 syms in use\n",
267 s->nblock, s->nMTF, s->nInUse );
268
269 alphaSize = s->nInUse+2;
270 for (t = 0; t < BZ_N_GROUPS; t++)
271 for (v = 0; v < alphaSize; v++)
272 s->len[t][v] = BZ_GREATER_ICOST;
273
274 /*--- Decide how many coding tables to use ---*/
275 AssertH ( s->nMTF > 0, 3001 );
276 if (s->nMTF < 200) nGroups = 2; else
277 if (s->nMTF < 600) nGroups = 3; else
278 if (s->nMTF < 1200) nGroups = 4; else
279 if (s->nMTF < 2400) nGroups = 5; else
280 nGroups = 6;
281
282 /*--- Generate an initial set of coding tables ---*/
283 {
284 Int32 nPart, remF, tFreq, aFreq;
285
286 nPart = nGroups;
287 remF = s->nMTF;
288 gs = 0;
289 while (nPart > 0) {
290 tFreq = remF / nPart;
291 ge = gs-1;
292 aFreq = 0;
293 while (aFreq < tFreq && ge < alphaSize-1) {
294 ge++;
295 aFreq += s->mtfFreq[ge];
296 }
297
298 if (ge > gs
299 && nPart != nGroups && nPart != 1
300 && ((nGroups-nPart) % 2 == 1)) {
301 aFreq -= s->mtfFreq[ge];
302 ge--;
303 }
304
305 if (s->verbosity >= 3)
306 VPrintf5( " initial group %d, [%d .. %d], "
307 "has %d syms (%4.1f%%)\n",
308 nPart, gs, ge, aFreq,
309 (100.0 * (float)aFreq) / (float)(s->nMTF) );
310
311 for (v = 0; v < alphaSize; v++)
312 if (v >= gs && v <= ge)
313 s->len[nPart-1][v] = BZ_LESSER_ICOST; else
314 s->len[nPart-1][v] = BZ_GREATER_ICOST;
315
316 nPart--;
317 gs = ge+1;
318 remF -= aFreq;
319 }
320 }
321
322 /*---
323 Iterate up to BZ_N_ITERS times to improve the tables.
324 ---*/
325 for (iter = 0; iter < BZ_N_ITERS; iter++) {
326
327 for (t = 0; t < nGroups; t++) fave[t] = 0;
328
329 for (t = 0; t < nGroups; t++)
330 for (v = 0; v < alphaSize; v++)
331 s->rfreq[t][v] = 0;
332
333 /*---
334 Set up an auxiliary length table which is used to fast-track
335 the common case (nGroups == 6).
336 ---*/
337 if (nGroups == 6) {
338 for (v = 0; v < alphaSize; v++) {
339 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
340 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
341 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
342 }
343 }
344
345 nSelectors = 0;
346 totc = 0;
347 gs = 0;
348 while (True) {
349
350 /*--- Set group start & end marks. --*/
351 if (gs >= s->nMTF) break;
352 ge = gs + BZ_G_SIZE - 1;
353 if (ge >= s->nMTF) ge = s->nMTF-1;
354
355 /*--
356 Calculate the cost of this group as coded
357 by each of the coding tables.
358 --*/
359 for (t = 0; t < nGroups; t++) cost[t] = 0;
360
361 if (nGroups == 6 && 50 == ge-gs+1) {
362 /*--- fast track the common case ---*/
363 register UInt32 cost01, cost23, cost45;
364 register UInt16 icv;
365 cost01 = cost23 = cost45 = 0;
366
367 # define BZ_ITER(nn) \
368 icv = mtfv[gs+(nn)]; \
369 cost01 += s->len_pack[icv][0]; \
370 cost23 += s->len_pack[icv][1]; \
371 cost45 += s->len_pack[icv][2]; \
372
373 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
374 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
375 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
376 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
377 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
378 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
379 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
380 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
381 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
382 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
383
384 # undef BZ_ITER
385
386 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
387 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
388 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
389
390 } else {
391 /*--- slow version which correctly handles all situations ---*/
392 for (i = gs; i <= ge; i++) {
393 UInt16 icv = mtfv[i];
394 for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
395 }
396 }
397
398 /*--
399 Find the coding table which is best for this group,
400 and record its identity in the selector table.
401 --*/
402 bc = 999999999; bt = -1;
403 for (t = 0; t < nGroups; t++)
404 if (cost[t] < bc) { bc = cost[t]; bt = t; };
405 totc += bc;
406 fave[bt]++;
407 s->selector[nSelectors] = bt;
408 nSelectors++;
409
410 /*--
411 Increment the symbol frequencies for the selected table.
412 --*/
413 if (nGroups == 6 && 50 == ge-gs+1) {
414 /*--- fast track the common case ---*/
415
416 # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
417
418 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
419 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
420 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
421 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
422 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
423 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
424 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
425 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
426 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
427 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
428
429 # undef BZ_ITUR
430
431 } else {
432 /*--- slow version which correctly handles all situations ---*/
433 for (i = gs; i <= ge; i++)
434 s->rfreq[bt][ mtfv[i] ]++;
435 }
436
437 gs = ge+1;
438 }
439 if (s->verbosity >= 3) {
440 VPrintf2 ( " pass %d: size is %d, grp uses are ",
441 iter+1, totc/8 );
442 for (t = 0; t < nGroups; t++)
443 VPrintf1 ( "%d ", fave[t] );
444 VPrintf0 ( "\n" );
445 }
446
447 /*--
448 Recompute the tables based on the accumulated frequencies.
449 --*/
450 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
451 comment in huffman.c for details. */
452 for (t = 0; t < nGroups; t++)
453 BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
454 alphaSize, 17 /*20*/ );
455 }
456
457
458 AssertH( nGroups < 8, 3002 );
459 AssertH( nSelectors < 32768 &&
460 nSelectors <= (2 + (900000 / BZ_G_SIZE)),
461 3003 );
462
463
464 /*--- Compute MTF values for the selectors. ---*/
465 {
466 UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
467 for (i = 0; i < nGroups; i++) pos[i] = i;
468 for (i = 0; i < nSelectors; i++) {
469 ll_i = s->selector[i];
470 j = 0;
471 tmp = pos[j];
472 while ( ll_i != tmp ) {
473 j++;
474 tmp2 = tmp;
475 tmp = pos[j];
476 pos[j] = tmp2;
477 };
478 pos[0] = tmp;
479 s->selectorMtf[i] = j;
480 }
481 };
482
483 /*--- Assign actual codes for the tables. --*/
484 for (t = 0; t < nGroups; t++) {
485 minLen = 32;
486 maxLen = 0;
487 for (i = 0; i < alphaSize; i++) {
488 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
489 if (s->len[t][i] < minLen) minLen = s->len[t][i];
490 }
491 AssertH ( !(maxLen > 17 /*20*/ ), 3004 );
492 AssertH ( !(minLen < 1), 3005 );
493 BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
494 minLen, maxLen, alphaSize );
495 }
496
497 /*--- Transmit the mapping table. ---*/
498 {
499 Bool inUse16[16];
500 for (i = 0; i < 16; i++) {
501 inUse16[i] = False;
502 for (j = 0; j < 16; j++)
503 if (s->inUse[i * 16 + j]) inUse16[i] = True;
504 }
505
506 nBytes = s->numZ;
507 for (i = 0; i < 16; i++)
508 if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
509
510 for (i = 0; i < 16; i++)
511 if (inUse16[i])
512 for (j = 0; j < 16; j++) {
513 if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
514 }
515
516 if (s->verbosity >= 3)
517 VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
518 }
519
520 /*--- Now the selectors. ---*/
521 nBytes = s->numZ;
522 bsW ( s, 3, nGroups );
523 bsW ( s, 15, nSelectors );
524 for (i = 0; i < nSelectors; i++) {
525 for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
526 bsW(s,1,0);
527 }
528 if (s->verbosity >= 3)
529 VPrintf1( "selectors %d, ", s->numZ-nBytes );
530
531 /*--- Now the coding tables. ---*/
532 nBytes = s->numZ;
533
534 for (t = 0; t < nGroups; t++) {
535 Int32 curr = s->len[t][0];
536 bsW ( s, 5, curr );
537 for (i = 0; i < alphaSize; i++) {
538 while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
539 while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
540 bsW ( s, 1, 0 );
541 }
542 }
543
544 if (s->verbosity >= 3)
545 VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
546
547 /*--- And finally, the block data proper ---*/
548 nBytes = s->numZ;
549 selCtr = 0;
550 gs = 0;
551 while (True) {
552 if (gs >= s->nMTF) break;
553 ge = gs + BZ_G_SIZE - 1;
554 if (ge >= s->nMTF) ge = s->nMTF-1;
555 AssertH ( s->selector[selCtr] < nGroups, 3006 );
556
557 if (nGroups == 6 && 50 == ge-gs+1) {
558 /*--- fast track the common case ---*/
559 UInt16 mtfv_i;
560 UChar* s_len_sel_selCtr
561 = &(s->len[s->selector[selCtr]][0]);
562 Int32* s_code_sel_selCtr
563 = &(s->code[s->selector[selCtr]][0]);
564
565 # define BZ_ITAH(nn) \
566 mtfv_i = mtfv[gs+(nn)]; \
567 bsW ( s, \
568 s_len_sel_selCtr[mtfv_i], \
569 s_code_sel_selCtr[mtfv_i] )
570
571 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
572 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
573 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
574 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
575 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
576 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
577 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
578 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
579 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
580 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
581
582 # undef BZ_ITAH
583
584 } else {
585 /*--- slow version which correctly handles all situations ---*/
586 for (i = gs; i <= ge; i++) {
587 bsW ( s,
588 s->len [s->selector[selCtr]] [mtfv[i]],
589 s->code [s->selector[selCtr]] [mtfv[i]] );
590 }
591 }
592
593
594 gs = ge+1;
595 selCtr++;
596 }
597 AssertH( selCtr == nSelectors, 3007 );
598
599 if (s->verbosity >= 3)
600 VPrintf1( "codes %d\n", s->numZ-nBytes );
601 }
602
603
604 /*---------------------------------------------------*/
BZ2_compressBlock(EState * s,Bool is_last_block)605 void BZ2_compressBlock ( EState* s, Bool is_last_block )
606 {
607 if (s->nblock > 0) {
608
609 BZ_FINALISE_CRC ( s->blockCRC );
610 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
611 s->combinedCRC ^= s->blockCRC;
612 if (s->blockNo > 1) s->numZ = 0;
613
614 if (s->verbosity >= 2)
615 VPrintf4( " block %d: crc = 0x%08x, "
616 "combined CRC = 0x%08x, size = %d\n",
617 s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
618
619 BZ2_blockSort ( s );
620 }
621
622 s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
623
624 /*-- If this is the first block, create the stream header. --*/
625 if (s->blockNo == 1) {
626 BZ2_bsInitWrite ( s );
627 bsPutUChar ( s, BZ_HDR_B );
628 bsPutUChar ( s, BZ_HDR_Z );
629 bsPutUChar ( s, BZ_HDR_h );
630 bsPutUChar ( s, (UChar)(BZ_HDR_0 + s->blockSize100k) );
631 }
632
633 if (s->nblock > 0) {
634
635 bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
636 bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
637 bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
638
639 /*-- Now the block's CRC, so it is in a known place. --*/
640 bsPutUInt32 ( s, s->blockCRC );
641
642 /*--
643 Now a single bit indicating (non-)randomisation.
644 As of version 0.9.5, we use a better sorting algorithm
645 which makes randomisation unnecessary. So always set
646 the randomised bit to 'no'. Of course, the decoder
647 still needs to be able to handle randomised blocks
648 so as to maintain backwards compatibility with
649 older versions of bzip2.
650 --*/
651 bsW(s,1,0);
652
653 bsW ( s, 24, s->origPtr );
654 generateMTFValues ( s );
655 sendMTFValues ( s );
656 }
657
658
659 /*-- If this is the last block, add the stream trailer. --*/
660 if (is_last_block) {
661
662 bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
663 bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
664 bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
665 bsPutUInt32 ( s, s->combinedCRC );
666 if (s->verbosity >= 2)
667 VPrintf1( " final combined CRC = 0x%08x\n ", s->combinedCRC );
668 bsFinishWrite ( s );
669 }
670 }
671
672
673 /*-------------------------------------------------------------*/
674 /*--- end compress.c ---*/
675 /*-------------------------------------------------------------*/
676 ABC_NAMESPACE_IMPL_END
677
678