1 /*
2 * Copyright (c) 2009 Dmitry Kazakov <dimula73@gmail.com>
3 * Copyright (c) 2018 Andrey Kamakin <a.kamakin@icloud.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */
19
20 // to disable assert when the leak tracker is active
21 #include "config-memory-leak-tracker.h"
22
23 #include <QGlobalStatic>
24
25 #include "kis_tile_data_store.h"
26 #include "kis_tile_data.h"
27 #include "kis_debug.h"
28
29 #include "kis_tile_data_store_iterators.h"
30
31 Q_GLOBAL_STATIC(KisTileDataStore, s_instance)
32
33 //#define DEBUG_PRECLONE
34
35 #ifdef DEBUG_PRECLONE
36 #include <stdio.h>
37 #define DEBUG_PRECLONE_ACTION(action, oldTD, newTD) \
38 printf("!!! %s:\t\t\t 0x%X -> 0x%X \t\t!!!\n", \
39 action, (quintptr)oldTD, (quintptr) newTD)
40 #define DEBUG_FREE_ACTION(td) \
41 printf("Tile data free'd \t(0x%X)\n", td)
42 #else
43 #define DEBUG_PRECLONE_ACTION(action, oldTD, newTD)
44 #define DEBUG_FREE_ACTION(td)
45 #endif
46
47 #ifdef DEBUG_HIT_MISS
48 qint64 __preclone_miss = 0;
49 qint64 __preclone_hit = 0;
50
51 qint64 __preclone_miss_user_count = 0;
52 qint64 __preclone_miss_age = 0;
53
54 #define DEBUG_COUNT_PRECLONE_HIT(td) __preclone_hit++
55 #define DEBUG_COUNT_PRECLONE_MISS(td) __preclone_miss++; __preclone_miss_user_count+=td->numUsers(); __preclone_miss_age+=td->age()
56 #define DEBUG_REPORT_PRECLONE_EFFICIENCY() \
57 dbgKrita << "Hits:" << __preclone_hit \
58 << "of" << __preclone_hit + __preclone_miss \
59 << "(" \
60 << qreal(__preclone_hit) / (__preclone_hit + __preclone_miss) \
61 << ")" \
62 << "miss users" << qreal(__preclone_miss_user_count) / __preclone_miss \
63 << "miss age" << qreal(__preclone_miss_age) / __preclone_miss
64 #else
65 #define DEBUG_COUNT_PRECLONE_HIT(td)
66 #define DEBUG_COUNT_PRECLONE_MISS(td)
67 #define DEBUG_REPORT_PRECLONE_EFFICIENCY()
68 #endif
69
KisTileDataStore()70 KisTileDataStore::KisTileDataStore()
71 : m_pooler(this),
72 m_swapper(this),
73 m_numTiles(0),
74 m_memoryMetric(0),
75 m_counter(1),
76 m_clockIndex(1)
77 {
78 m_pooler.start();
79 m_swapper.start();
80 }
81
~KisTileDataStore()82 KisTileDataStore::~KisTileDataStore()
83 {
84 m_pooler.terminatePooler();
85 m_swapper.terminateSwapper();
86
87 if (numTiles() > 0) {
88 errKrita << "Warning: some tiles have leaked:";
89 errKrita << "\tTiles in memory:" << numTilesInMemory() << "\n"
90 << "\tTotal tiles:" << numTiles();
91 }
92 }
93
instance()94 KisTileDataStore* KisTileDataStore::instance()
95 {
96 return s_instance;
97 }
98
memoryStatistics()99 KisTileDataStore::MemoryStatistics KisTileDataStore::memoryStatistics()
100 {
101 QReadLocker lock(&m_iteratorLock);
102
103 MemoryStatistics stats;
104
105 const qint64 metricCoeff = qint64(KisTileData::WIDTH) * KisTileData::HEIGHT;
106
107 stats.realMemorySize = m_pooler.lastRealMemoryMetric() * metricCoeff;
108 stats.historicalMemorySize = m_pooler.lastHistoricalMemoryMetric() * metricCoeff;
109 stats.poolSize = m_pooler.lastPoolMemoryMetric() * metricCoeff;
110
111 stats.totalMemorySize = memoryMetric() * metricCoeff + stats.poolSize;
112
113 stats.swapSize = m_swappedStore.totalMemoryMetric() * metricCoeff;
114
115 return stats;
116 }
117
tryForceUpdateMemoryStatisticsWhileIdle()118 void KisTileDataStore::tryForceUpdateMemoryStatisticsWhileIdle()
119 {
120 // in case the pooler is disabled, we should force it
121 // to update the stats
122 if (!m_pooler.isRunning()) {
123 m_pooler.forceUpdateMemoryStats();
124 }
125 }
126
registerTileDataImp(KisTileData * td)127 inline void KisTileDataStore::registerTileDataImp(KisTileData *td)
128 {
129 int index = m_counter.fetchAndAddOrdered(1);
130 td->m_tileNumber = index;
131
132 // make sure that access to the hash table is guarded by GC block
133 // (it avoids removal of the referenced cells caused by concurrent
134 // migrations)
135 m_tileDataMap.getGC().lockRawPointerAccess();
136 m_tileDataMap.assign(index, td);
137 m_tileDataMap.getGC().unlockRawPointerAccess();
138
139 m_numTiles.ref();
140 m_memoryMetric += td->pixelSize();
141 }
142
registerTileData(KisTileData * td)143 void KisTileDataStore::registerTileData(KisTileData *td)
144 {
145 QReadLocker lock(&m_iteratorLock);
146 registerTileDataImp(td);
147 }
148
unregisterTileDataImp(KisTileData * td)149 inline void KisTileDataStore::unregisterTileDataImp(KisTileData *td)
150 {
151 // make sure that access to the hash table is guarded by GC block
152 // (it avoids removal of the referenced cells caused by concurrent
153 // migrations)
154 m_tileDataMap.getGC().lockRawPointerAccess();
155
156 if (m_clockIndex == td->m_tileNumber) {
157 do {
158 m_clockIndex.ref();
159 } while (!m_tileDataMap.get(m_clockIndex.loadAcquire()) && m_clockIndex < m_counter);
160 }
161
162 int index = td->m_tileNumber;
163 td->m_tileNumber = -1;
164 m_tileDataMap.erase(index);
165 m_numTiles.deref();
166 m_memoryMetric -= td->pixelSize();
167
168 m_tileDataMap.getGC().unlockRawPointerAccess();
169 }
170
unregisterTileData(KisTileData * td)171 void KisTileDataStore::unregisterTileData(KisTileData *td)
172 {
173 QReadLocker lock(&m_iteratorLock);
174 unregisterTileDataImp(td);
175 }
176
allocTileData(qint32 pixelSize,const quint8 * defPixel)177 KisTileData *KisTileDataStore::allocTileData(qint32 pixelSize, const quint8 *defPixel)
178 {
179 KisTileData *td = new KisTileData(pixelSize, defPixel, this);
180 registerTileData(td);
181 return td;
182 }
183
duplicateTileData(KisTileData * rhs)184 KisTileData *KisTileDataStore::duplicateTileData(KisTileData *rhs)
185 {
186 KisTileData *td = 0;
187
188 if (rhs->m_clonesStack.pop(td)) {
189 DEBUG_PRECLONE_ACTION("+ Pre-clone HIT", rhs, td);
190 DEBUG_COUNT_PRECLONE_HIT(rhs);
191 } else {
192 rhs->blockSwapping();
193 td = new KisTileData(*rhs);
194 rhs->unblockSwapping();
195 DEBUG_PRECLONE_ACTION("- Pre-clone #MISS#", rhs, td);
196 DEBUG_COUNT_PRECLONE_MISS(rhs);
197 }
198
199 registerTileData(td);
200 return td;
201 }
202
freeTileData(KisTileData * td)203 void KisTileDataStore::freeTileData(KisTileData *td)
204 {
205 Q_ASSERT(td->m_store == this);
206
207 DEBUG_FREE_ACTION(td);
208
209 m_iteratorLock.lockForRead();
210 td->m_swapLock.lockForWrite();
211
212 if (!td->data()) {
213 m_swappedStore.forgetTileData(td);
214 } else {
215 unregisterTileDataImp(td);
216 }
217
218 td->m_swapLock.unlock();
219 m_iteratorLock.unlock();
220
221 delete td;
222 }
223
ensureTileDataLoaded(KisTileData * td)224 void KisTileDataStore::ensureTileDataLoaded(KisTileData *td)
225 {
226 // dbgKrita << "#### SWAP MISS! ####" << td << ppVar(td->mementoed()) << ppVar(td->age()) << ppVar(td->numUsers());
227 checkFreeMemory();
228
229 td->m_swapLock.lockForRead();
230
231 while (!td->data()) {
232 td->m_swapLock.unlock();
233
234 /**
235 * The order of this heavy locking is very important.
236 * Change it only in case, you really know what you are doing.
237 */
238 m_iteratorLock.lockForWrite();
239
240 /**
241 * If someone has managed to load the td from swap, then, most
242 * probably, they have already taken the swap lock. This may
243 * lead to a deadlock, because COW mechanism breaks lock
244 * ordering rules in duplicateTileData() (it takes m_listLock
245 * while the swap lock is held). In our case it is enough just
246 * to check whether the other thread has already fetched the
247 * data. Please notice that we do not take both of the locks
248 * while checking this, because holding m_listLock is
249 * enough. Nothing can happen to the tile while we hold
250 * m_listLock.
251 */
252
253 if (!td->data()) {
254 td->m_swapLock.lockForWrite();
255
256 m_swappedStore.swapInTileData(td);
257 registerTileDataImp(td);
258
259 td->m_swapLock.unlock();
260 }
261
262 m_iteratorLock.unlock();
263
264 /**
265 * <-- In theory, livelock is possible here...
266 */
267
268 td->m_swapLock.lockForRead();
269 }
270 }
271
trySwapTileData(KisTileData * td)272 bool KisTileDataStore::trySwapTileData(KisTileData *td)
273 {
274 /**
275 * This function is called with m_listLock acquired
276 */
277
278 bool result = false;
279 if (!td->m_swapLock.tryLockForWrite()) return result;
280
281 if (td->data()) {
282 if (m_swappedStore.trySwapOutTileData(td)) {
283 unregisterTileDataImp(td);
284 result = true;
285 }
286 }
287 td->m_swapLock.unlock();
288
289 return result;
290 }
291
beginIteration()292 KisTileDataStoreIterator* KisTileDataStore::beginIteration()
293 {
294 m_iteratorLock.lockForWrite();
295 return new KisTileDataStoreIterator(m_tileDataMap, this);
296 }
endIteration(KisTileDataStoreIterator * iterator)297 void KisTileDataStore::endIteration(KisTileDataStoreIterator* iterator)
298 {
299 delete iterator;
300 m_iteratorLock.unlock();
301 }
302
beginReverseIteration()303 KisTileDataStoreReverseIterator* KisTileDataStore::beginReverseIteration()
304 {
305 m_iteratorLock.lockForWrite();
306 return new KisTileDataStoreReverseIterator(m_tileDataMap, this);
307 }
endIteration(KisTileDataStoreReverseIterator * iterator)308 void KisTileDataStore::endIteration(KisTileDataStoreReverseIterator* iterator)
309 {
310 delete iterator;
311 m_iteratorLock.unlock();
312 DEBUG_REPORT_PRECLONE_EFFICIENCY();
313 }
314
beginClockIteration()315 KisTileDataStoreClockIterator* KisTileDataStore::beginClockIteration()
316 {
317 m_iteratorLock.lockForWrite();
318 return new KisTileDataStoreClockIterator(m_tileDataMap, m_clockIndex.loadAcquire(), this);
319 }
320
endIteration(KisTileDataStoreClockIterator * iterator)321 void KisTileDataStore::endIteration(KisTileDataStoreClockIterator* iterator)
322 {
323 m_clockIndex = iterator->getFinalPosition();
324 delete iterator;
325 m_iteratorLock.unlock();
326 }
327
debugPrintList()328 void KisTileDataStore::debugPrintList()
329 {
330 KisTileDataStoreIterator* iter = beginIteration();
331 KisTileData *item = 0;
332
333 while (iter->hasNext()) {
334 item = iter->next();
335 dbgTiles << "-------------------------\n"
336 << "TileData:\t\t\t" << item
337 << "\n refCount:\t" << item->m_refCount;
338 }
339
340 endIteration(iter);
341 }
342
debugSwapAll()343 void KisTileDataStore::debugSwapAll()
344 {
345 KisTileDataStoreIterator* iter = beginIteration();
346 KisTileData *item = 0;
347
348 while (iter->hasNext()) {
349 item = iter->next();
350 iter->trySwapOut(item);
351 }
352
353 endIteration(iter);
354
355 // dbgKrita << "Number of tiles:" << numTiles();
356 // dbgKrita << "Tiles in memory:" << numTilesInMemory();
357 // m_swappedStore.debugStatistics();
358 }
359
debugClear()360 void KisTileDataStore::debugClear()
361 {
362 QWriteLocker l(&m_iteratorLock);
363 ConcurrentMap<int, KisTileData*>::Iterator iter(m_tileDataMap);
364
365 while (iter.isValid()) {
366 delete iter.getValue();
367 iter.next();
368 }
369
370 m_counter = 1;
371 m_clockIndex = 1;
372 m_numTiles = 0;
373 m_memoryMetric = 0;
374 }
375
testingRereadConfig()376 void KisTileDataStore::testingRereadConfig()
377 {
378 m_pooler.testingRereadConfig();
379 m_swapper.testingRereadConfig();
380 kickPooler();
381 }
382
testingSuspendPooler()383 void KisTileDataStore::testingSuspendPooler()
384 {
385 m_pooler.terminatePooler();
386 }
387
testingResumePooler()388 void KisTileDataStore::testingResumePooler()
389 {
390 m_pooler.start();
391 }
392