1 /****************************************************************************
2  *  This file is part of PPMd project                                       *
3  *  Written and distributed to public domain by Dmitry Shkarin 1997,        *
4  *  1999-2000                                                               *
5  *  Contents: memory allocation routines                                    *
6  ****************************************************************************/
7 
8 static const uint UNIT_SIZE=Max(sizeof(RARPPM_CONTEXT),sizeof(RARPPM_MEM_BLK));
9 static const uint FIXED_UNIT_SIZE=12;
10 
SubAllocator()11 SubAllocator::SubAllocator()
12 {
13   Clean();
14 }
15 
16 
Clean()17 void SubAllocator::Clean()
18 {
19   SubAllocatorSize=0;
20 }
21 
22 
InsertNode(void * p,int indx)23 inline void SubAllocator::InsertNode(void* p,int indx)
24 {
25   ((RAR_NODE*) p)->next=FreeList[indx].next;
26   FreeList[indx].next=(RAR_NODE*) p;
27 }
28 
29 
RemoveNode(int indx)30 inline void* SubAllocator::RemoveNode(int indx)
31 {
32   RAR_NODE* RetVal=FreeList[indx].next;
33   FreeList[indx].next=RetVal->next;
34   return RetVal;
35 }
36 
37 
U2B(int NU)38 inline uint SubAllocator::U2B(int NU)
39 {
40   // We calculate the size of units in bytes based on real UNIT_SIZE.
41   // In original implementation it was 8*NU+4*NU.
42   return UNIT_SIZE*NU;
43 }
44 
45 
46 
47 // Calculate RARPPM_MEM_BLK+Items address. Real RARPPM_MEM_BLK size must be
48 // equal to UNIT_SIZE, so we cannot just add Items to RARPPM_MEM_BLK address.
MBPtr(RARPPM_MEM_BLK * BasePtr,int Items)49 inline RARPPM_MEM_BLK* SubAllocator::MBPtr(RARPPM_MEM_BLK *BasePtr,int Items)
50 {
51   return((RARPPM_MEM_BLK*)( ((byte *)(BasePtr))+U2B(Items) ));
52 }
53 
54 
SplitBlock(void * pv,int OldIndx,int NewIndx)55 inline void SubAllocator::SplitBlock(void* pv,int OldIndx,int NewIndx)
56 {
57   int i, UDiff=Indx2Units[OldIndx]-Indx2Units[NewIndx];
58   byte* p=((byte*) pv)+U2B(Indx2Units[NewIndx]);
59   if (Indx2Units[i=Units2Indx[UDiff-1]] != UDiff)
60   {
61     InsertNode(p,--i);
62     p += U2B(i=Indx2Units[i]);
63     UDiff -= i;
64   }
65   InsertNode(p,Units2Indx[UDiff-1]);
66 }
67 
68 
StopSubAllocator()69 void SubAllocator::StopSubAllocator()
70 {
71   if ( SubAllocatorSize )
72   {
73     SubAllocatorSize=0;
74     //free(HeapStart);
75   }
76 }
77 
78 
StartSubAllocator(int SASize)79 bool SubAllocator::StartSubAllocator(int SASize)
80 {
81   uint t=SASize << 20;
82   if (SubAllocatorSize == t)
83     return true;
84   StopSubAllocator();
85 
86   // Original algorithm expects FIXED_UNIT_SIZE, but actual structure size
87   // can be larger. So let's recalculate the allocated size and add two more
88   // units: one as reserve for HeapEnd overflow checks and another
89   // to provide the space to correctly align UnitsStart.
90   uint AllocSize=t/FIXED_UNIT_SIZE*UNIT_SIZE+2*UNIT_SIZE;
91   //if ((HeapStart=(byte *)malloc(AllocSize)) == NULL)
92   if ((HeapStart=(byte *)HeapStartFixed) == NULL)
93   {
94     ErrHandler.MemoryError();
95     return false;
96   }
97 
98   // HeapEnd did not present in original algorithm. We added it to control
99   // invalid memory access attempts when processing corrupt archived data.
100   HeapEnd=HeapStart+AllocSize-UNIT_SIZE;
101 
102   SubAllocatorSize=t;
103   return true;
104 }
105 
106 
InitSubAllocator()107 void SubAllocator::InitSubAllocator()
108 {
109   int i, k;
110   memset(FreeList,0,sizeof(FreeList));
111   pText=HeapStart;
112 
113   // Original algorithm operates with 12 byte FIXED_UNIT_SIZE, but actual
114   // size of RARPPM_MEM_BLK and RARPPM_CONTEXT structures can exceed this value
115   // because of alignment and larger pointer fields size.
116   // So we define UNIT_SIZE for this larger size and adjust memory
117   // pointers accordingly.
118 
119   // Size2 is (HiUnit-LoUnit) memory area size to allocate as originally
120   // supposed by compression algorithm. It is 7/8 of total allocated size.
121   uint Size2=FIXED_UNIT_SIZE*(SubAllocatorSize/8/FIXED_UNIT_SIZE*7);
122 
123   // RealSize2 is the real adjusted size of (HiUnit-LoUnit) memory taking
124   // into account that our UNIT_SIZE can be larger than FIXED_UNIT_SIZE.
125   uint RealSize2=Size2/FIXED_UNIT_SIZE*UNIT_SIZE;
126 
127   // Size1 is the size of memory area from HeapStart to FakeUnitsStart
128   // as originally supposed by compression algorithm. This area can contain
129   // different data types, both single symbols and structures.
130   uint Size1=SubAllocatorSize-Size2;
131 
132   // Real size of this area. We correct it according to UNIT_SIZE vs
133   // FIXED_UNIT_SIZE difference. Also we add one more UNIT_SIZE
134   // to compensate a possible reminder from Size1/FIXED_UNIT_SIZE,
135   // which would be lost otherwise. We add UNIT_SIZE instead of
136   // this Size1%FIXED_UNIT_SIZE reminder, because it allows to align
137   // UnitsStart easily and adding more than reminder is ok for algorithm.
138   uint RealSize1=Size1/FIXED_UNIT_SIZE*UNIT_SIZE+UNIT_SIZE;
139 
140   // RealSize1 must be divided by UNIT_SIZE without a reminder, so UnitsStart
141   // is aligned to UNIT_SIZE. It is important for those architectures,
142   // where a proper memory alignment is mandatory. Since we produce RealSize1
143   // multiplying by UNIT_SIZE, this condition is always true. So LoUnit,
144   // UnitsStart, HeapStart are properly aligned,
145   LoUnit=UnitsStart=HeapStart+RealSize1;
146 
147   // When we reach FakeUnitsStart, we restart the model. It is where
148   // the original algorithm expected to see UnitsStart. Real UnitsStart
149   // can have a larger value.
150   FakeUnitsStart=HeapStart+Size1;
151 
152   HiUnit=LoUnit+RealSize2;
153   for (i=0,k=1;i < N1     ;i++,k += 1)
154     Indx2Units[i]=k;
155   for (k++;i < N1+N2      ;i++,k += 2)
156     Indx2Units[i]=k;
157   for (k++;i < N1+N2+N3   ;i++,k += 3)
158     Indx2Units[i]=k;
159   for (k++;i < N1+N2+N3+N4;i++,k += 4)
160     Indx2Units[i]=k;
161   for (GlueCount=k=i=0;k < 128;k++)
162   {
163     i += (Indx2Units[i] < k+1);
164     Units2Indx[k]=i;
165   }
166 }
167 
168 
GlueFreeBlocks()169 inline void SubAllocator::GlueFreeBlocks()
170 {
171   RARPPM_MEM_BLK s0, * p, * p1;
172   int i, k, sz;
173   if (LoUnit != HiUnit)
174     *LoUnit=0;
175   for (i=0, s0.next=s0.prev=&s0;i < N_INDEXES;i++)
176     while ( FreeList[i].next )
177     {
178       p=(RARPPM_MEM_BLK*)RemoveNode(i);
179       p->insertAt(&s0);
180       p->Stamp=0xFFFF;
181       p->NU=Indx2Units[i];
182     }
183   for (p=s0.next;p != &s0;p=p->next)
184     while ((p1=MBPtr(p,p->NU))->Stamp == 0xFFFF && int(p->NU)+p1->NU < 0x10000)
185     {
186       p1->remove();
187       p->NU += p1->NU;
188     }
189   while ((p=s0.next) != &s0)
190   {
191     for (p->remove(), sz=p->NU;sz > 128;sz -= 128, p=MBPtr(p,128))
192       InsertNode(p,N_INDEXES-1);
193     if (Indx2Units[i=Units2Indx[sz-1]] != sz)
194     {
195       k=sz-Indx2Units[--i];
196       InsertNode(MBPtr(p,sz-k),k-1);
197     }
198     InsertNode(p,i);
199   }
200 }
201 
AllocUnitsRare(int indx)202 void* SubAllocator::AllocUnitsRare(int indx)
203 {
204   if ( !GlueCount )
205   {
206     GlueCount = 255;
207     GlueFreeBlocks();
208     if ( FreeList[indx].next )
209       return RemoveNode(indx);
210   }
211   int i=indx;
212   do
213   {
214     if (++i == N_INDEXES)
215     {
216       GlueCount--;
217       i=U2B(Indx2Units[indx]);
218       int j=FIXED_UNIT_SIZE*Indx2Units[indx];
219       if (FakeUnitsStart - pText > j)
220       {
221         FakeUnitsStart -= j;
222         UnitsStart -= i;
223         return UnitsStart;
224       }
225       return NULL;
226     }
227   } while ( !FreeList[i].next );
228   void* RetVal=RemoveNode(i);
229   SplitBlock(RetVal,i,indx);
230   return RetVal;
231 }
232 
233 
AllocUnits(int NU)234 inline void* SubAllocator::AllocUnits(int NU)
235 {
236   int indx=Units2Indx[NU-1];
237   if ( FreeList[indx].next )
238     return RemoveNode(indx);
239   void* RetVal=LoUnit;
240   LoUnit += U2B(Indx2Units[indx]);
241   if (LoUnit <= HiUnit)
242     return RetVal;
243   LoUnit -= U2B(Indx2Units[indx]);
244   return AllocUnitsRare(indx);
245 }
246 
247 
AllocContext()248 void* SubAllocator::AllocContext()
249 {
250   if (HiUnit != LoUnit)
251     return (HiUnit -= UNIT_SIZE);
252   if ( FreeList->next )
253     return RemoveNode(0);
254   return AllocUnitsRare(0);
255 }
256 
257 
ExpandUnits(void * OldPtr,int OldNU)258 void* SubAllocator::ExpandUnits(void* OldPtr,int OldNU)
259 {
260   int i0=Units2Indx[OldNU-1], i1=Units2Indx[OldNU-1+1];
261   if (i0 == i1)
262     return OldPtr;
263   void* ptr=AllocUnits(OldNU+1);
264   if ( ptr )
265   {
266     memcpy(ptr,OldPtr,U2B(OldNU));
267     InsertNode(OldPtr,i0);
268   }
269   return ptr;
270 }
271 
272 
ShrinkUnits(void * OldPtr,int OldNU,int NewNU)273 void* SubAllocator::ShrinkUnits(void* OldPtr,int OldNU,int NewNU)
274 {
275   int i0=Units2Indx[OldNU-1], i1=Units2Indx[NewNU-1];
276   if (i0 == i1)
277     return OldPtr;
278   if ( FreeList[i1].next )
279   {
280     void* ptr=RemoveNode(i1);
281     memcpy(ptr,OldPtr,U2B(NewNU));
282     InsertNode(OldPtr,i0);
283     return ptr;
284   }
285   else
286   {
287     SplitBlock(OldPtr,i0,i1);
288     return OldPtr;
289   }
290 }
291 
292 
FreeUnits(void * ptr,int OldNU)293 void SubAllocator::FreeUnits(void* ptr,int OldNU)
294 {
295   InsertNode(ptr,Units2Indx[OldNU-1]);
296 }
297