1 /* ******************************************************************
2 * hist : Histogram functions
3 * part of Finite State Entropy project
4 * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
5 *
6 * You can contact the author at :
7 * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
8 * - Public forum : https://groups.google.com/forum/#!forum/lz4c
9 *
10 * This source code is licensed under both the BSD-style license (found in the
11 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
12 * in the COPYING file in the root directory of this source tree).
13 * You may select, at your option, one of the above-listed licenses.
14 ****************************************************************** */
15
16 /* --- dependencies --- */
17 #include "mem.h" /* U32, BYTE, etc. */
18 #include "debug.h" /* assert, DEBUGLOG */
19 #include "error_private.h" /* ERROR */
20 #include "hist.h"
21
22
23 /* --- Error management --- */
HIST_isError(size_t code)24 unsigned HIST_isError(size_t code) { return ERR_isError(code); }
25
26 /*-**************************************************************
27 * Histogram functions
28 ****************************************************************/
HIST_count_simple(unsigned * count,unsigned * maxSymbolValuePtr,const void * src,size_t srcSize)29 unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
30 const void* src, size_t srcSize)
31 {
32 const BYTE* ip = (const BYTE*)src;
33 const BYTE* const end = ip + srcSize;
34 unsigned maxSymbolValue = *maxSymbolValuePtr;
35 unsigned largestCount=0;
36
37 memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
38 if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
39
40 while (ip<end) {
41 assert(*ip <= maxSymbolValue);
42 count[*ip++]++;
43 }
44
45 while (!count[maxSymbolValue]) maxSymbolValue--;
46 *maxSymbolValuePtr = maxSymbolValue;
47
48 { U32 s;
49 for (s=0; s<=maxSymbolValue; s++)
50 if (count[s] > largestCount) largestCount = count[s];
51 }
52
53 return largestCount;
54 }
55
56 typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
57
58 /* HIST_count_parallel_wksp() :
59 * store histogram into 4 intermediate tables, recombined at the end.
60 * this design makes better use of OoO cpus,
61 * and is noticeably faster when some values are heavily repeated.
62 * But it needs some additional workspace for intermediate tables.
63 * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
64 * @return : largest histogram frequency,
65 * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
HIST_count_parallel_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,HIST_checkInput_e check,U32 * const workSpace)66 static size_t HIST_count_parallel_wksp(
67 unsigned* count, unsigned* maxSymbolValuePtr,
68 const void* source, size_t sourceSize,
69 HIST_checkInput_e check,
70 U32* const workSpace)
71 {
72 const BYTE* ip = (const BYTE*)source;
73 const BYTE* const iend = ip+sourceSize;
74 unsigned maxSymbolValue = *maxSymbolValuePtr;
75 unsigned max=0;
76 U32* const Counting1 = workSpace;
77 U32* const Counting2 = Counting1 + 256;
78 U32* const Counting3 = Counting2 + 256;
79 U32* const Counting4 = Counting3 + 256;
80
81 memset(workSpace, 0, 4*256*sizeof(unsigned));
82
83 /* safety checks */
84 if (!sourceSize) {
85 memset(count, 0, maxSymbolValue + 1);
86 *maxSymbolValuePtr = 0;
87 return 0;
88 }
89 if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
90
91 /* by stripes of 16 bytes */
92 { U32 cached = MEM_read32(ip); ip += 4;
93 while (ip < iend-15) {
94 U32 c = cached; cached = MEM_read32(ip); ip += 4;
95 Counting1[(BYTE) c ]++;
96 Counting2[(BYTE)(c>>8) ]++;
97 Counting3[(BYTE)(c>>16)]++;
98 Counting4[ c>>24 ]++;
99 c = cached; cached = MEM_read32(ip); ip += 4;
100 Counting1[(BYTE) c ]++;
101 Counting2[(BYTE)(c>>8) ]++;
102 Counting3[(BYTE)(c>>16)]++;
103 Counting4[ c>>24 ]++;
104 c = cached; cached = MEM_read32(ip); ip += 4;
105 Counting1[(BYTE) c ]++;
106 Counting2[(BYTE)(c>>8) ]++;
107 Counting3[(BYTE)(c>>16)]++;
108 Counting4[ c>>24 ]++;
109 c = cached; cached = MEM_read32(ip); ip += 4;
110 Counting1[(BYTE) c ]++;
111 Counting2[(BYTE)(c>>8) ]++;
112 Counting3[(BYTE)(c>>16)]++;
113 Counting4[ c>>24 ]++;
114 }
115 ip-=4;
116 }
117
118 /* finish last symbols */
119 while (ip<iend) Counting1[*ip++]++;
120
121 if (check) { /* verify stats will fit into destination table */
122 U32 s; for (s=255; s>maxSymbolValue; s--) {
123 Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
124 if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
125 } }
126
127 { U32 s;
128 if (maxSymbolValue > 255) maxSymbolValue = 255;
129 for (s=0; s<=maxSymbolValue; s++) {
130 count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
131 if (count[s] > max) max = count[s];
132 } }
133
134 while (!count[maxSymbolValue]) maxSymbolValue--;
135 *maxSymbolValuePtr = maxSymbolValue;
136 return (size_t)max;
137 }
138
139 /* HIST_countFast_wksp() :
140 * Same as HIST_countFast(), but using an externally provided scratch buffer.
141 * `workSpace` is a writable buffer which must be 4-bytes aligned,
142 * `workSpaceSize` must be >= HIST_WKSP_SIZE
143 */
HIST_countFast_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,void * workSpace,size_t workSpaceSize)144 size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
145 const void* source, size_t sourceSize,
146 void* workSpace, size_t workSpaceSize)
147 {
148 if (sourceSize < 1500) /* heuristic threshold */
149 return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
150 if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
151 if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
152 return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
153 }
154
155 /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
HIST_countFast(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize)156 size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
157 const void* source, size_t sourceSize)
158 {
159 unsigned tmpCounters[HIST_WKSP_SIZE_U32];
160 return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
161 }
162
163 /* HIST_count_wksp() :
164 * Same as HIST_count(), but using an externally provided scratch buffer.
165 * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
HIST_count_wksp(unsigned * count,unsigned * maxSymbolValuePtr,const void * source,size_t sourceSize,void * workSpace,size_t workSpaceSize)166 size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
167 const void* source, size_t sourceSize,
168 void* workSpace, size_t workSpaceSize)
169 {
170 if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
171 if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
172 if (*maxSymbolValuePtr < 255)
173 return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
174 *maxSymbolValuePtr = 255;
175 return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
176 }
177
HIST_count(unsigned * count,unsigned * maxSymbolValuePtr,const void * src,size_t srcSize)178 size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
179 const void* src, size_t srcSize)
180 {
181 unsigned tmpCounters[HIST_WKSP_SIZE_U32];
182 return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
183 }
184