1 /*
2  * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 47    Store Directory Routines */
10 
11 #include "squid.h"
12 #include "Debug.h"
13 #include "globals.h"
14 #include "profiler/Profiler.h"
15 #include "SquidConfig.h"
16 #include "Store.h"
17 #include "store/Disk.h"
18 #include "store/Disks.h"
19 #include "swap_log_op.h"
20 #include "util.h" // for tvSubDsec() which should be in SquidTime.h
21 
22 static STDIRSELECT storeDirSelectSwapDirRoundRobin;
23 static STDIRSELECT storeDirSelectSwapDirLeastLoad;
24 /**
25  * This function pointer is set according to 'store_dir_select_algorithm'
26  * in squid.conf.
27  */
28 STDIRSELECT *storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
29 
30 /// The entry size to use for Disk::canStore() size limit checks.
31 /// This is an optimization to avoid similar calculations in every cache_dir.
32 static int64_t
objectSizeForDirSelection(const StoreEntry & entry)33 objectSizeForDirSelection(const StoreEntry &entry)
34 {
35     // entry.objectLen() is negative here when we are still STORE_PENDING
36     int64_t minSize = entry.mem_obj->expectedReplySize();
37 
38     // If entry size is unknown, use already accumulated bytes as an estimate.
39     // Controller::accumulateMore() guarantees that there are enough of them.
40     if (minSize < 0)
41         minSize = entry.mem_obj->endOffset();
42 
43     assert(minSize >= 0);
44     minSize += entry.mem_obj->swap_hdr_sz;
45     return minSize;
46 }
47 
48 /**
49  * This new selection scheme simply does round-robin on all SwapDirs.
50  * A SwapDir is skipped if it is over the max_size (100%) limit, or
51  * overloaded.
52  */
53 static int
storeDirSelectSwapDirRoundRobin(const StoreEntry * e)54 storeDirSelectSwapDirRoundRobin(const StoreEntry * e)
55 {
56     const int64_t objsize = objectSizeForDirSelection(*e);
57 
58     // Increment the first candidate once per selection (not once per
59     // iteration) to reduce bias when some disk(s) attract more entries.
60     static int firstCandidate = 0;
61     if (++firstCandidate >= Config.cacheSwap.n_configured)
62         firstCandidate = 0;
63 
64     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
65         const int dirn = (firstCandidate + i) % Config.cacheSwap.n_configured;
66         const SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(dirn));
67 
68         int load = 0;
69         if (!sd->canStore(*e, objsize, load))
70             continue;
71 
72         if (load < 0 || load > 1000) {
73             continue;
74         }
75 
76         return dirn;
77     }
78 
79     return -1;
80 }
81 
82 /**
83  * Spread load across all of the store directories
84  *
85  * Note: We should modify this later on to prefer sticking objects
86  * in the *tightest fit* swapdir to conserve space, along with the
87  * actual swapdir usage. But for now, this hack will do while
88  * testing, so you should order your swapdirs in the config file
89  * from smallest max-size= to largest max-size=.
90  *
91  * We also have to choose nleast == nconf since we need to consider
92  * ALL swapdirs, regardless of state. Again, this is a hack while
93  * we sort out the real usefulness of this algorithm.
94  */
95 static int
storeDirSelectSwapDirLeastLoad(const StoreEntry * e)96 storeDirSelectSwapDirLeastLoad(const StoreEntry * e)
97 {
98     int64_t most_free = 0;
99     int64_t best_objsize = -1;
100     int least_load = INT_MAX;
101     int load;
102     int dirn = -1;
103     int i;
104     RefCount<SwapDir> SD;
105 
106     const int64_t objsize = objectSizeForDirSelection(*e);
107 
108     for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
109         SD = dynamic_cast<SwapDir *>(INDEXSD(i));
110         SD->flags.selected = false;
111 
112         if (!SD->canStore(*e, objsize, load))
113             continue;
114 
115         if (load < 0 || load > 1000)
116             continue;
117 
118         if (load > least_load)
119             continue;
120 
121         const int64_t cur_free = SD->maxSize() - SD->currentSize();
122 
123         /* If the load is equal, then look in more details */
124         if (load == least_load) {
125             /* best max-size fit */
126             if (best_objsize != -1) {
127                 // cache_dir with the smallest max-size gets the known-size object
128                 // cache_dir with the largest max-size gets the unknown-size object
129                 if ((objsize != -1 && SD->maxObjectSize() > best_objsize) ||
130                         (objsize == -1 && SD->maxObjectSize() < best_objsize))
131                     continue;
132             }
133 
134             /* most free */
135             if (cur_free < most_free)
136                 continue;
137         }
138 
139         least_load = load;
140         best_objsize = SD->maxObjectSize();
141         most_free = cur_free;
142         dirn = i;
143     }
144 
145     if (dirn >= 0)
146         dynamic_cast<SwapDir *>(INDEXSD(dirn))->flags.selected = true;
147 
148     return dirn;
149 }
150 
Disks()151 Store::Disks::Disks():
152     largestMinimumObjectSize(-1),
153     largestMaximumObjectSize(-1),
154     secondLargestMaximumObjectSize(-1)
155 {
156 }
157 
158 SwapDir *
store(int const x) const159 Store::Disks::store(int const x) const
160 {
161     return INDEXSD(x);
162 }
163 
164 SwapDir &
dir(const int i) const165 Store::Disks::dir(const int i) const
166 {
167     SwapDir *sd = INDEXSD(i);
168     assert(sd);
169     return *sd;
170 }
171 
172 int
callback()173 Store::Disks::callback()
174 {
175     int result = 0;
176     int j;
177     static int ndir = 0;
178 
179     do {
180         j = 0;
181 
182         for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
183             if (ndir >= Config.cacheSwap.n_configured)
184                 ndir = ndir % Config.cacheSwap.n_configured;
185 
186             int temp_result = store(ndir)->callback();
187 
188             ++ndir;
189 
190             j += temp_result;
191 
192             result += temp_result;
193 
194             if (j > 100)
195                 fatal ("too much io\n");
196         }
197     } while (j > 0);
198 
199     ++ndir;
200 
201     return result;
202 }
203 
204 void
create()205 Store::Disks::create()
206 {
207     if (Config.cacheSwap.n_configured == 0) {
208         debugs(0, DBG_PARSE_NOTE(DBG_CRITICAL), "No cache_dir stores are configured.");
209     }
210 
211     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
212         if (dir(i).active())
213             store(i)->create();
214     }
215 }
216 
217 StoreEntry *
get(const cache_key * key)218 Store::Disks::get(const cache_key *key)
219 {
220     if (const int cacheDirs = Config.cacheSwap.n_configured) {
221         // ask each cache_dir until the entry is found; use static starting
222         // point to avoid asking the same subset of disks more often
223         // TODO: coordinate with put() to be able to guess the right disk often
224         static int idx = 0;
225         for (int n = 0; n < cacheDirs; ++n) {
226             idx = (idx + 1) % cacheDirs;
227             SwapDir *sd = dynamic_cast<SwapDir*>(INDEXSD(idx));
228             if (!sd->active())
229                 continue;
230 
231             if (StoreEntry *e = sd->get(key)) {
232                 debugs(20, 7, "cache_dir " << idx << " has: " << *e);
233                 return e;
234             }
235         }
236     }
237 
238     debugs(20, 6, "none of " << Config.cacheSwap.n_configured <<
239            " cache_dirs have " << storeKeyText(key));
240     return nullptr;
241 }
242 
243 void
init()244 Store::Disks::init()
245 {
246     if (Config.Store.objectsPerBucket <= 0)
247         fatal("'store_objects_per_bucket' should be larger than 0.");
248 
249     if (Config.Store.avgObjectSize <= 0)
250         fatal("'store_avg_object_size' should be larger than 0.");
251 
252     /* Calculate size of hash table (maximum currently 64k buckets).  */
253     /* this is very bogus, its specific to the any Store maintaining an
254      * in-core index, not global */
255     size_t buckets = (Store::Root().maxSize() + Config.memMaxSize) / Config.Store.avgObjectSize;
256     debugs(20, DBG_IMPORTANT, "Swap maxSize " << (Store::Root().maxSize() >> 10) <<
257            " + " << ( Config.memMaxSize >> 10) << " KB, estimated " << buckets << " objects");
258     buckets /= Config.Store.objectsPerBucket;
259     debugs(20, DBG_IMPORTANT, "Target number of buckets: " << buckets);
260     /* ideally the full scan period should be configurable, for the
261      * moment it remains at approximately 24 hours.  */
262     store_hash_buckets = storeKeyHashBuckets(buckets);
263     debugs(20, DBG_IMPORTANT, "Using " << store_hash_buckets << " Store buckets");
264     debugs(20, DBG_IMPORTANT, "Max Mem  size: " << ( Config.memMaxSize >> 10) << " KB" <<
265            (Config.memShared ? " [shared]" : ""));
266     debugs(20, DBG_IMPORTANT, "Max Swap size: " << (Store::Root().maxSize() >> 10) << " KB");
267 
268     store_table = hash_create(storeKeyHashCmp,
269                               store_hash_buckets, storeKeyHashHash);
270 
271     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
272         /* this starts a search of the store dirs, loading their
273          * index. under the new Store api this should be
274          * driven by the StoreHashIndex, not by each store.
275         *
276         * That is, the HashIndex should perform a search of each dir it is
277         * indexing to do the hash insertions. The search is then able to
278         * decide 'from-memory', or 'from-clean-log' or 'from-dirty-log' or
279         * 'from-no-log'.
280         *
281          * Step 1: make the store rebuilds use a search internally
282         * Step 2: change the search logic to use the four modes described
283         *         above
284         * Step 3: have the hash index walk the searches itself.
285          */
286         if (dir(i).active())
287             store(i)->init();
288     }
289 
290     if (strcasecmp(Config.store_dir_select_algorithm, "round-robin") == 0) {
291         storeDirSelectSwapDir = storeDirSelectSwapDirRoundRobin;
292         debugs(47, DBG_IMPORTANT, "Using Round Robin store dir selection");
293     } else {
294         storeDirSelectSwapDir = storeDirSelectSwapDirLeastLoad;
295         debugs(47, DBG_IMPORTANT, "Using Least Load store dir selection");
296     }
297 }
298 
299 uint64_t
maxSize() const300 Store::Disks::maxSize() const
301 {
302     uint64_t result = 0;
303 
304     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
305         if (dir(i).doReportStat())
306             result += store(i)->maxSize();
307     }
308 
309     return result;
310 }
311 
312 uint64_t
minSize() const313 Store::Disks::minSize() const
314 {
315     uint64_t result = 0;
316 
317     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
318         if (dir(i).doReportStat())
319             result += store(i)->minSize();
320     }
321 
322     return result;
323 }
324 
325 uint64_t
currentSize() const326 Store::Disks::currentSize() const
327 {
328     uint64_t result = 0;
329 
330     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
331         if (dir(i).doReportStat())
332             result += store(i)->currentSize();
333     }
334 
335     return result;
336 }
337 
338 uint64_t
currentCount() const339 Store::Disks::currentCount() const
340 {
341     uint64_t result = 0;
342 
343     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
344         if (dir(i).doReportStat())
345             result += store(i)->currentCount();
346     }
347 
348     return result;
349 }
350 
351 int64_t
maxObjectSize() const352 Store::Disks::maxObjectSize() const
353 {
354     return largestMaximumObjectSize;
355 }
356 
357 void
updateLimits()358 Store::Disks::updateLimits()
359 {
360     largestMinimumObjectSize = -1;
361     largestMaximumObjectSize = -1;
362     secondLargestMaximumObjectSize = -1;
363 
364     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
365         const auto &disk = dir(i);
366         if (!disk.active())
367             continue;
368 
369         if (disk.minObjectSize() > largestMinimumObjectSize)
370             largestMinimumObjectSize = disk.minObjectSize();
371 
372         const auto diskMaxObjectSize = disk.maxObjectSize();
373         if (diskMaxObjectSize > largestMaximumObjectSize) {
374             if (largestMaximumObjectSize >= 0) // was set
375                 secondLargestMaximumObjectSize = largestMaximumObjectSize;
376             largestMaximumObjectSize = diskMaxObjectSize;
377         }
378     }
379 }
380 
381 int64_t
accumulateMore(const StoreEntry & entry) const382 Store::Disks::accumulateMore(const StoreEntry &entry) const
383 {
384     const auto accumulated = entry.mem_obj->availableForSwapOut();
385 
386     /*
387      * Keep accumulating more bytes until the set of disks eligible to accept
388      * the entry becomes stable, and, hence, accumulating more is not going to
389      * affect the cache_dir selection. A stable set is usually reached
390      * immediately (or soon) because most configurations either do not use
391      * cache_dirs with explicit min-size/max-size limits or use the same
392      * max-size limit for all cache_dirs (and low min-size limits).
393      */
394 
395     // Can the set of min-size cache_dirs accepting this entry change?
396     if (accumulated < largestMinimumObjectSize)
397         return largestMinimumObjectSize - accumulated;
398 
399     // Can the set of max-size cache_dirs accepting this entry change
400     // (other than when the entry exceeds the largest maximum; see below)?
401     if (accumulated <= secondLargestMaximumObjectSize)
402         return secondLargestMaximumObjectSize - accumulated + 1;
403 
404     /*
405      * Checking largestMaximumObjectSize instead eliminates the risk of starting
406      * to swap out an entry that later grows too big, but also implies huge
407      * accumulation in most environments. Accumulating huge entries not only
408      * consumes lots of RAM but also creates a burst of doPages() write requests
409      * that overwhelm the disk. To avoid these problems, we take the risk and
410      * allow swap out now. The disk will quit swapping out if the entry
411      * eventually grows too big for its selected cache_dir.
412      */
413     debugs(20, 3, "no: " << accumulated << '>' <<
414            secondLargestMaximumObjectSize << ',' << largestMinimumObjectSize);
415     return 0;
416 }
417 
418 void
getStats(StoreInfoStats & stats) const419 Store::Disks::getStats(StoreInfoStats &stats) const
420 {
421     // accumulate per-disk cache stats
422     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
423         StoreInfoStats dirStats;
424         store(i)->getStats(dirStats);
425         stats += dirStats;
426     }
427 
428     // common to all disks
429     stats.swap.open_disk_fd = store_open_disk_fd;
430 
431     // memory cache stats are collected in StoreController::getStats(), for now
432 }
433 
434 void
stat(StoreEntry & output) const435 Store::Disks::stat(StoreEntry & output) const
436 {
437     int i;
438 
439     /* Now go through each store, calling its stat routine */
440 
441     for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
442         storeAppendPrintf(&output, "\n");
443         store(i)->stat(output);
444     }
445 }
446 
447 void
reference(StoreEntry & e)448 Store::Disks::reference(StoreEntry &e)
449 {
450     e.disk().reference(e);
451 }
452 
453 bool
dereference(StoreEntry & e)454 Store::Disks::dereference(StoreEntry &e)
455 {
456     return e.disk().dereference(e);
457 }
458 
459 void
updateHeaders(StoreEntry * e)460 Store::Disks::updateHeaders(StoreEntry *e)
461 {
462     Must(e);
463     return e->disk().updateHeaders(e);
464 }
465 
466 void
maintain()467 Store::Disks::maintain()
468 {
469     int i;
470     /* walk each fs */
471 
472     for (i = 0; i < Config.cacheSwap.n_configured; ++i) {
473         /* XXX FixMe: This should be done "in parallell" on the different
474          * cache_dirs, not one at a time.
475          */
476         /* call the maintain function .. */
477         store(i)->maintain();
478     }
479 }
480 
481 void
sync()482 Store::Disks::sync()
483 {
484     for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
485         store(i)->sync();
486 }
487 
488 void
evictCached(StoreEntry & e)489 Store::Disks::evictCached(StoreEntry &e) {
490     if (e.hasDisk()) {
491         // TODO: move into Fs::Ufs::UFSSwapDir::evictCached()
492         if (!EBIT_TEST(e.flags, KEY_PRIVATE)) {
493             // log before evictCached() below may clear hasDisk()
494             storeDirSwapLog(&e, SWAP_LOG_DEL);
495         }
496 
497         e.disk().evictCached(e);
498         return;
499     }
500 
501     if (const auto key = e.publicKey())
502         evictIfFound(key);
503 }
504 
505 void
evictIfFound(const cache_key * key)506 Store::Disks::evictIfFound(const cache_key *key)
507 {
508     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
509         if (dir(i).active())
510             dir(i).evictIfFound(key);
511     }
512 }
513 
514 bool
anchorToCache(StoreEntry & entry,bool & inSync)515 Store::Disks::anchorToCache(StoreEntry &entry, bool &inSync)
516 {
517     if (const int cacheDirs = Config.cacheSwap.n_configured) {
518         // ask each cache_dir until the entry is found; use static starting
519         // point to avoid asking the same subset of disks more often
520         // TODO: coordinate with put() to be able to guess the right disk often
521         static int idx = 0;
522         for (int n = 0; n < cacheDirs; ++n) {
523             idx = (idx + 1) % cacheDirs;
524             SwapDir &sd = dir(idx);
525             if (!sd.active())
526                 continue;
527 
528             if (sd.anchorToCache(entry, inSync)) {
529                 debugs(20, 3, "cache_dir " << idx << " anchors " << entry);
530                 return true;
531             }
532         }
533     }
534 
535     debugs(20, 4, "none of " << Config.cacheSwap.n_configured <<
536            " cache_dirs have " << entry);
537     return false;
538 }
539 
540 bool
updateAnchored(StoreEntry & entry)541 Store::Disks::updateAnchored(StoreEntry &entry)
542 {
543     return entry.hasDisk() &&
544            dir(entry.swap_dirn).updateAnchored(entry);
545 }
546 
547 bool
smpAware() const548 Store::Disks::smpAware() const
549 {
550     for (int i = 0; i < Config.cacheSwap.n_configured; ++i) {
551         // A mix is not supported, but we conservatively check every
552         // dir because features like collapsed revalidation should
553         // currently be disabled if any dir is SMP-aware
554         if (dir(i).smpAware())
555             return true;
556     }
557     return false;
558 }
559 
560 bool
hasReadableEntry(const StoreEntry & e) const561 Store::Disks::hasReadableEntry(const StoreEntry &e) const
562 {
563     for (int i = 0; i < Config.cacheSwap.n_configured; ++i)
564         if (dir(i).active() && dir(i).hasReadableEntry(e))
565             return true;
566     return false;
567 }
568 
569 void
storeDirOpenSwapLogs()570 storeDirOpenSwapLogs()
571 {
572     for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
573         INDEXSD(dirn)->openLog();
574 }
575 
576 void
storeDirCloseSwapLogs()577 storeDirCloseSwapLogs()
578 {
579     for (int dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
580         INDEXSD(dirn)->closeLog();
581 }
582 
583 /**
584  *  storeDirWriteCleanLogs
585  *
586  *  Writes a "clean" swap log file from in-memory metadata.
587  *  This is a rewrite of the original function to troll each
588  *  StoreDir and write the logs, and flush at the end of
589  *  the run. Thanks goes to Eric Stern, since this solution
590  *  came out of his COSS code.
591  */
592 int
storeDirWriteCleanLogs(int reopen)593 storeDirWriteCleanLogs(int reopen)
594 {
595     const StoreEntry *e = NULL;
596     int n = 0;
597 
598     struct timeval start;
599     double dt;
600     RefCount<SwapDir> sd;
601     int dirn;
602     int notdone = 1;
603 
604     // Check for store_dirs_rebuilding because fatal() often calls us in early
605     // initialization phases, before store log is initialized and ready. Also,
606     // some stores do not support log cleanup during Store rebuilding.
607     if (StoreController::store_dirs_rebuilding) {
608         debugs(20, DBG_IMPORTANT, "Not currently OK to rewrite swap log.");
609         debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Operation aborted.");
610         return 0;
611     }
612 
613     debugs(20, DBG_IMPORTANT, "storeDirWriteCleanLogs: Starting...");
614     getCurrentTime();
615     start = current_time;
616 
617     for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
618         sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
619 
620         if (sd->writeCleanStart() < 0) {
621             debugs(20, DBG_IMPORTANT, "log.clean.start() failed for dir #" << sd->index);
622             continue;
623         }
624     }
625 
626     /*
627      * This may look inefficient as CPU wise it is more efficient to do this
628      * sequentially, but I/O wise the parallellism helps as it allows more
629      * hdd spindles to be active.
630      */
631     while (notdone) {
632         notdone = 0;
633 
634         for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn) {
635             sd = dynamic_cast<SwapDir *>(INDEXSD(dirn));
636 
637             if (NULL == sd->cleanLog)
638                 continue;
639 
640             e = sd->cleanLog->nextEntry();
641 
642             if (!e)
643                 continue;
644 
645             notdone = 1;
646 
647             if (!sd->canLog(*e))
648                 continue;
649 
650             sd->cleanLog->write(*e);
651 
652             if ((++n & 0xFFFF) == 0) {
653                 getCurrentTime();
654                 debugs(20, DBG_IMPORTANT, "  " << std::setw(7) << n  <<
655                        " entries written so far.");
656             }
657         }
658     }
659 
660     /* Flush */
661     for (dirn = 0; dirn < Config.cacheSwap.n_configured; ++dirn)
662         dynamic_cast<SwapDir *>(INDEXSD(dirn))->writeCleanDone();
663 
664     if (reopen)
665         storeDirOpenSwapLogs();
666 
667     getCurrentTime();
668 
669     dt = tvSubDsec(start, current_time);
670 
671     debugs(20, DBG_IMPORTANT, "  Finished.  Wrote " << n << " entries.");
672     debugs(20, DBG_IMPORTANT, "  Took "<< std::setw(3)<< std::setprecision(2) << dt <<
673            " seconds ("<< std::setw(6) << ((double) n / (dt > 0.0 ? dt : 1.0)) << " entries/sec).");
674 
675     return n;
676 }
677 
678 /* Globals that should be converted to static Store::Disks methods */
679 
680 void
allocate_new_swapdir(Store::DiskConfig * swap)681 allocate_new_swapdir(Store::DiskConfig *swap)
682 {
683     if (!swap->swapDirs) {
684         swap->n_allocated = 4;
685         swap->swapDirs = new SwapDir::Pointer[swap->n_allocated];
686     }
687 
688     if (swap->n_allocated == swap->n_configured) {
689         swap->n_allocated <<= 1;
690         const auto tmp = new SwapDir::Pointer[swap->n_allocated];
691         for (int i = 0; i < swap->n_configured; ++i) {
692             tmp[i] = swap->swapDirs[i];
693         }
694         delete[] swap->swapDirs;
695         swap->swapDirs = tmp;
696     }
697 }
698 
699 void
free_cachedir(Store::DiskConfig * swap)700 free_cachedir(Store::DiskConfig *swap)
701 {
702     /* DON'T FREE THESE FOR RECONFIGURE */
703 
704     if (reconfiguring)
705         return;
706 
707     /* TODO XXX this lets the swapdir free resources asynchronously
708      * swap->swapDirs[i]->deactivate();
709      * but there may be such a means already.
710      * RBC 20041225
711      */
712 
713     // only free's the array memory itself
714     // the SwapDir objects may remain (ref-counted)
715     delete[] swap->swapDirs;
716     swap->swapDirs = nullptr;
717     swap->n_allocated = 0;
718     swap->n_configured = 0;
719 }
720 
721 /* Globals that should be moved to some Store::UFS-specific logging module */
722 
723 /**
724  * An entry written to the swap log MUST have the following
725  * properties.
726  *   1.  It MUST be a public key.  It does no good to log
727  *       a public ADD, change the key, then log a private
728  *       DEL.  So we need to log a DEL before we change a
729  *       key from public to private.
730  *   2.  It MUST have a valid (> -1) swap_filen.
731  */
732 void
storeDirSwapLog(const StoreEntry * e,int op)733 storeDirSwapLog(const StoreEntry * e, int op)
734 {
735     assert (e);
736     assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
737     assert(e->hasDisk());
738     /*
739      * icons and such; don't write them to the swap log
740      */
741 
742     if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
743         return;
744 
745     assert(op > SWAP_LOG_NOP && op < SWAP_LOG_MAX);
746 
747     debugs(20, 3, "storeDirSwapLog: " <<
748            swap_log_op_str[op] << " " <<
749            e->getMD5Text() << " " <<
750            e->swap_dirn << " " <<
751            std::hex << std::uppercase << std::setfill('0') << std::setw(8) << e->swap_filen);
752 
753     dynamic_cast<SwapDir *>(INDEXSD(e->swap_dirn))->logEntry(*e, op);
754 }
755 
756