1 /*
2  * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
3  *
4  * Squid software is distributed under GPLv2+ license and includes
5  * contributions from numerous individuals and organizations.
6  * Please see the COPYING and CONTRIBUTORS files for details.
7  */
8 
9 /* DEBUG: section 20    Storage Manager */
10 
11 #include "squid.h"
12 #include "base/TextException.h"
13 #include "CacheDigest.h"
14 #include "CacheManager.h"
15 #include "comm/Connection.h"
16 #include "comm/Read.h"
17 #include "ETag.h"
18 #include "event.h"
19 #include "fde.h"
20 #include "globals.h"
21 #include "http.h"
22 #include "HttpReply.h"
23 #include "HttpRequest.h"
24 #include "mem_node.h"
25 #include "MemObject.h"
26 #include "mgr/Registration.h"
27 #include "mgr/StoreIoAction.h"
28 #include "profiler/Profiler.h"
29 #include "repl_modules.h"
30 #include "RequestFlags.h"
31 #include "SquidConfig.h"
32 #include "SquidTime.h"
33 #include "StatCounters.h"
34 #include "stmem.h"
35 #include "Store.h"
36 #include "store/Controller.h"
37 #include "store/Disk.h"
38 #include "store/Disks.h"
39 #include "store_digest.h"
40 #include "store_key_md5.h"
41 #include "store_log.h"
42 #include "store_rebuild.h"
43 #include "StoreClient.h"
44 #include "StoreIOState.h"
45 #include "StoreMeta.h"
46 #include "StrList.h"
47 #include "swap_log_op.h"
48 #include "tools.h"
49 #if USE_DELAY_POOLS
50 #include "DelayPools.h"
51 #endif
52 
53 /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB
54  * XXX: convert to MEMPROXY_CLASS() API
55  */
56 #include "mem/Pool.h"
57 
58 #include <climits>
59 #include <stack>
60 
61 #define REBUILD_TIMESTAMP_DELTA_MAX 2
62 
63 #define STORE_IN_MEM_BUCKETS            (229)
64 
65 /** \todo Convert these string constants to enum string-arrays generated */
66 
67 const char *memStatusStr[] = {
68     "NOT_IN_MEMORY",
69     "IN_MEMORY"
70 };
71 
72 const char *pingStatusStr[] = {
73     "PING_NONE",
74     "PING_WAITING",
75     "PING_DONE"
76 };
77 
78 const char *storeStatusStr[] = {
79     "STORE_OK",
80     "STORE_PENDING"
81 };
82 
83 const char *swapStatusStr[] = {
84     "SWAPOUT_NONE",
85     "SWAPOUT_WRITING",
86     "SWAPOUT_DONE",
87     "SWAPOUT_FAILED"
88 };
89 
90 /*
91  * This defines an repl type
92  */
93 
94 typedef struct _storerepl_entry storerepl_entry_t;
95 
96 struct _storerepl_entry {
97     const char *typestr;
98     REMOVALPOLICYCREATE *create;
99 };
100 
101 static storerepl_entry_t *storerepl_list = NULL;
102 
103 /*
104  * local function prototypes
105  */
106 static int getKeyCounter(void);
107 static OBJH storeCheckCachableStats;
108 static EVH storeLateRelease;
109 
110 /*
111  * local variables
112  */
113 static std::stack<StoreEntry*> LateReleaseStack;
114 MemAllocator *StoreEntry::pool = NULL;
115 
116 void
Stats(StoreEntry * output)117 Store::Stats(StoreEntry * output)
118 {
119     assert(output);
120     Root().stat(*output);
121 }
122 
123 // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS
124 // definitions but doing so exposes bug 4370, and maybe 4354 and 4355
125 void *
operator new(size_t bytecount)126 StoreEntry::operator new (size_t bytecount)
127 {
128     assert(bytecount == sizeof (StoreEntry));
129 
130     if (!pool) {
131         pool = memPoolCreate ("StoreEntry", bytecount);
132     }
133 
134     return pool->alloc();
135 }
136 
137 void
operator delete(void * address)138 StoreEntry::operator delete (void *address)
139 {
140     pool->freeOne(address);
141 }
142 
143 bool
makePublic(const KeyScope scope)144 StoreEntry::makePublic(const KeyScope scope)
145 {
146     /* This object can be cached for a long time */
147     return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope);
148 }
149 
150 void
makePrivate(const bool shareable)151 StoreEntry::makePrivate(const bool shareable)
152 {
153     releaseRequest(shareable); /* delete object when not used */
154 }
155 
156 void
clearPrivate()157 StoreEntry::clearPrivate()
158 {
159     assert(!EBIT_TEST(flags, RELEASE_REQUEST));
160     EBIT_CLR(flags, KEY_PRIVATE);
161     shareableWhenPrivate = false;
162 }
163 
164 bool
cacheNegatively()165 StoreEntry::cacheNegatively()
166 {
167     /* This object may be negatively cached */
168     if (makePublic()) {
169         negativeCache();
170         return true;
171     }
172     return false;
173 }
174 
175 size_t
inUseCount()176 StoreEntry::inUseCount()
177 {
178     if (!pool)
179         return 0;
180     return pool->getInUseCount();
181 }
182 
183 const char *
getMD5Text() const184 StoreEntry::getMD5Text() const
185 {
186     return storeKeyText((const cache_key *)key);
187 }
188 
189 #include "comm.h"
190 
191 void
DeferReader(void * theContext,CommRead const & aRead)192 StoreEntry::DeferReader(void *theContext, CommRead const &aRead)
193 {
194     StoreEntry *anEntry = (StoreEntry *)theContext;
195     anEntry->delayAwareRead(aRead.conn,
196                             aRead.buf,
197                             aRead.len,
198                             aRead.callback);
199 }
200 
201 void
delayAwareRead(const Comm::ConnectionPointer & conn,char * buf,int len,AsyncCall::Pointer callback)202 StoreEntry::delayAwareRead(const Comm::ConnectionPointer &conn, char *buf, int len, AsyncCall::Pointer callback)
203 {
204     size_t amountToRead = bytesWanted(Range<size_t>(0, len));
205     /* sketch: readdeferer* = getdeferer.
206      * ->deferRead (fd, buf, len, callback, DelayAwareRead, this)
207      */
208 
209     if (amountToRead <= 0) {
210         assert (mem_obj);
211         mem_obj->delayRead(DeferredRead(DeferReader, this, CommRead(conn, buf, len, callback)));
212         return;
213     }
214 
215     if (fd_table[conn->fd].closing()) {
216         // Readers must have closing callbacks if they want to be notified. No
217         // readers appeared to care around 2009/12/14 as they skipped reading
218         // for other reasons. Closing may already be true at the delyaAwareRead
219         // call time or may happen while we wait after delayRead() above.
220         debugs(20, 3, "will not read from closing " << conn << " for " << callback);
221         return; // the read callback will never be called
222     }
223 
224     comm_read(conn, buf, amountToRead, callback);
225 }
226 
227 size_t
bytesWanted(Range<size_t> const aRange,bool ignoreDelayPools) const228 StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const
229 {
230     if (mem_obj == NULL)
231         return aRange.end;
232 
233 #if URL_CHECKSUM_DEBUG
234 
235     mem_obj->checkUrlChecksum();
236 
237 #endif
238 
239     if (!mem_obj->readAheadPolicyCanRead())
240         return 0;
241 
242     return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools);
243 }
244 
245 bool
checkDeferRead(int) const246 StoreEntry::checkDeferRead(int) const
247 {
248     return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0);
249 }
250 
251 void
setNoDelay(bool const newValue)252 StoreEntry::setNoDelay(bool const newValue)
253 {
254     if (mem_obj)
255         mem_obj->setNoDelay(newValue);
256 }
257 
258 // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should
259 //      open swapin file, aggressively trim memory, and ignore read-ahead gap.
260 //      It does not mean we will read from disk exclusively (or at all!).
261 //      STORE_MEM_CLIENT covers all other cases, including in-memory entries,
262 //      newly created entries, and entries not backed by disk or memory cache.
263 // XXX: May create STORE_DISK_CLIENT with no disk caching configured.
264 // XXX: Collapsed clients cannot predict their type.
265 store_client_t
storeClientType() const266 StoreEntry::storeClientType() const
267 {
268     /* The needed offset isn't in memory
269      * XXX TODO: this is wrong for range requests
270      * as the needed offset may *not* be 0, AND
271      * offset 0 in the memory object is the HTTP headers.
272      */
273 
274     assert(mem_obj);
275 
276     if (mem_obj->inmem_lo)
277         return STORE_DISK_CLIENT;
278 
279     if (EBIT_TEST(flags, ENTRY_ABORTED)) {
280         /* I don't think we should be adding clients to aborted entries */
281         debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry");
282         return STORE_MEM_CLIENT;
283     }
284 
285     if (swapoutFailed())
286         return STORE_MEM_CLIENT;
287 
288     if (store_status == STORE_OK) {
289         /* the object has completed. */
290 
291         if (mem_obj->inmem_lo == 0 && !isEmpty()) {
292             if (swappedOut()) {
293                 debugs(20,7, HERE << mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz);
294                 if (mem_obj->endOffset() == mem_obj->object_sz) {
295                     /* hot object fully swapped in (XXX: or swapped out?) */
296                     return STORE_MEM_CLIENT;
297                 }
298             } else {
299                 /* Memory-only, or currently being swapped out */
300                 return STORE_MEM_CLIENT;
301             }
302         }
303         return STORE_DISK_CLIENT;
304     }
305 
306     /* here and past, entry is STORE_PENDING */
307     /*
308      * If this is the first client, let it be the mem client
309      */
310     if (mem_obj->nclients == 1)
311         return STORE_MEM_CLIENT;
312 
313     /*
314      * If there is no disk file to open yet, we must make this a
315      * mem client.  If we can't open the swapin file before writing
316      * to the client, there is no guarantee that we will be able
317      * to open it later when we really need it.
318      */
319     if (swap_status == SWAPOUT_NONE)
320         return STORE_MEM_CLIENT;
321 
322     /*
323      * otherwise, make subsequent clients read from disk so they
324      * can not delay the first, and vice-versa.
325      */
326     return STORE_DISK_CLIENT;
327 }
328 
StoreEntry()329 StoreEntry::StoreEntry() :
330     mem_obj(NULL),
331     timestamp(-1),
332     lastref(-1),
333     expires(-1),
334     lastModified_(-1),
335     swap_file_sz(0),
336     refcount(0),
337     flags(0),
338     swap_filen(-1),
339     swap_dirn(-1),
340     mem_status(NOT_IN_MEMORY),
341     ping_status(PING_NONE),
342     store_status(STORE_PENDING),
343     swap_status(SWAPOUT_NONE),
344     lock_count(0),
345     shareableWhenPrivate(false)
346 {
347     debugs(20, 5, "StoreEntry constructed, this=" << this);
348 }
349 
~StoreEntry()350 StoreEntry::~StoreEntry()
351 {
352     debugs(20, 5, "StoreEntry destructed, this=" << this);
353 }
354 
355 #if USE_ADAPTATION
356 void
deferProducer(const AsyncCall::Pointer & producer)357 StoreEntry::deferProducer(const AsyncCall::Pointer &producer)
358 {
359     if (!deferredProducer)
360         deferredProducer = producer;
361     else
362         debugs(20, 5, "Deferred producer call is already set to: " <<
363                *deferredProducer << ", requested call: " << *producer);
364 }
365 
366 void
kickProducer()367 StoreEntry::kickProducer()
368 {
369     if (deferredProducer != NULL) {
370         ScheduleCallHere(deferredProducer);
371         deferredProducer = NULL;
372     }
373 }
374 #endif
375 
376 void
destroyMemObject()377 StoreEntry::destroyMemObject()
378 {
379     debugs(20, 3, mem_obj << " in " << *this);
380 
381     // Store::Root() is FATALly missing during shutdown
382     if (hasTransients() && !shutting_down)
383         Store::Root().transientsDisconnect(*this);
384     if (hasMemStore() && !shutting_down)
385         Store::Root().memoryDisconnect(*this);
386 
387     if (MemObject *mem = mem_obj) {
388         setMemStatus(NOT_IN_MEMORY);
389         mem_obj = NULL;
390         delete mem;
391     }
392 }
393 
394 void
destroyStoreEntry(void * data)395 destroyStoreEntry(void *data)
396 {
397     debugs(20, 3, HERE << "destroyStoreEntry: destroying " <<  data);
398     StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data));
399     assert(e != NULL);
400 
401     if (e == NullStoreEntry::getInstance())
402         return;
403 
404     // Store::Root() is FATALly missing during shutdown
405     if (e->hasDisk() && !shutting_down)
406         e->disk().disconnect(*e);
407 
408     e->destroyMemObject();
409 
410     e->hashDelete();
411 
412     assert(e->key == NULL);
413 
414     delete e;
415 }
416 
417 /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */
418 
419 void
hashInsert(const cache_key * someKey)420 StoreEntry::hashInsert(const cache_key * someKey)
421 {
422     debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'");
423     assert(!key);
424     key = storeKeyDup(someKey);
425     hash_join(store_table, this);
426 }
427 
428 void
hashDelete()429 StoreEntry::hashDelete()
430 {
431     if (key) { // some test cases do not create keys and do not hashInsert()
432         hash_remove_link(store_table, this);
433         storeKeyFree((const cache_key *)key);
434         key = NULL;
435     }
436 }
437 
438 /* -------------------------------------------------------------------------- */
439 
440 void
lock(const char * context)441 StoreEntry::lock(const char *context)
442 {
443     ++lock_count;
444     debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this);
445 }
446 
447 void
touch()448 StoreEntry::touch()
449 {
450     lastref = squid_curtime;
451 }
452 
453 void
releaseRequest(const bool shareable)454 StoreEntry::releaseRequest(const bool shareable)
455 {
456     debugs(20, 3, shareable << ' ' << *this);
457     if (!shareable)
458         shareableWhenPrivate = false; // may already be false
459     if (EBIT_TEST(flags, RELEASE_REQUEST))
460         return;
461 
462     setPrivateKey(shareable, true);
463 }
464 
465 int
unlock(const char * context)466 StoreEntry::unlock(const char *context)
467 {
468     debugs(20, 3, (context ? context : "somebody") <<
469            " unlocking key " << getMD5Text() << ' ' << *this);
470     assert(lock_count > 0);
471     --lock_count;
472 
473     if (lock_count)
474         return (int) lock_count;
475 
476     abandon(context);
477     return 0;
478 }
479 
480 /// keep the unlocked StoreEntry object in the local store_table (if needed) or
481 /// delete it (otherwise)
482 void
doAbandon(const char * context)483 StoreEntry::doAbandon(const char *context)
484 {
485     debugs(20, 5, *this << " via " << (context ? context : "somebody"));
486     assert(!locked());
487     assert(storePendingNClients(this) == 0);
488 
489     // Both aborted local writers and aborted local readers (of remote writers)
490     // are STORE_PENDING, but aborted readers should never release().
491     if (EBIT_TEST(flags, RELEASE_REQUEST) ||
492             (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) {
493         this->release();
494         return;
495     }
496 
497     if (EBIT_TEST(flags, KEY_PRIVATE))
498         debugs(20, DBG_IMPORTANT, "WARNING: " << __FILE__ << ":" << __LINE__ << ": found KEY_PRIVATE");
499 
500     Store::Root().handleIdleEntry(*this); // may delete us
501 }
502 
503 void
getPublicByRequestMethod(StoreClient * aClient,HttpRequest * request,const HttpRequestMethod & method)504 StoreEntry::getPublicByRequestMethod  (StoreClient *aClient, HttpRequest * request, const HttpRequestMethod& method)
505 {
506     assert (aClient);
507     StoreEntry *result = storeGetPublicByRequestMethod( request, method);
508 
509     if (!result)
510         aClient->created (NullStoreEntry::getInstance());
511     else
512         aClient->created (result);
513 }
514 
515 void
getPublicByRequest(StoreClient * aClient,HttpRequest * request)516 StoreEntry::getPublicByRequest (StoreClient *aClient, HttpRequest * request)
517 {
518     assert (aClient);
519     StoreEntry *result = storeGetPublicByRequest (request);
520 
521     if (!result)
522         result = NullStoreEntry::getInstance();
523 
524     aClient->created (result);
525 }
526 
527 void
getPublic(StoreClient * aClient,const char * uri,const HttpRequestMethod & method)528 StoreEntry::getPublic (StoreClient *aClient, const char *uri, const HttpRequestMethod& method)
529 {
530     assert (aClient);
531     StoreEntry *result = storeGetPublic (uri, method);
532 
533     if (!result)
534         result = NullStoreEntry::getInstance();
535 
536     aClient->created (result);
537 }
538 
539 StoreEntry *
storeGetPublic(const char * uri,const HttpRequestMethod & method)540 storeGetPublic(const char *uri, const HttpRequestMethod& method)
541 {
542     return Store::Root().find(storeKeyPublic(uri, method));
543 }
544 
545 StoreEntry *
storeGetPublicByRequestMethod(HttpRequest * req,const HttpRequestMethod & method,const KeyScope keyScope)546 storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope)
547 {
548     return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope));
549 }
550 
551 StoreEntry *
storeGetPublicByRequest(HttpRequest * req,const KeyScope keyScope)552 storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope)
553 {
554     StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope);
555 
556     if (e == NULL && req->method == Http::METHOD_HEAD)
557         /* We can generate a HEAD reply from a cached GET object */
558         e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope);
559 
560     return e;
561 }
562 
563 static int
getKeyCounter(void)564 getKeyCounter(void)
565 {
566     static int key_counter = 0;
567 
568     if (++key_counter < 0)
569         key_counter = 1;
570 
571     return key_counter;
572 }
573 
574 /* RBC 20050104 AFAICT this should become simpler:
575  * rather than reinserting with a special key it should be marked
576  * as 'released' and then cleaned up when refcounting indicates.
577  * the StoreHashIndex could well implement its 'released' in the
578  * current manner.
579  * Also, clean log writing should skip over ia,t
580  * Otherwise, we need a 'remove from the index but not the store
581  * concept'.
582  */
583 void
setPrivateKey(const bool shareable,const bool permanent)584 StoreEntry::setPrivateKey(const bool shareable, const bool permanent)
585 {
586     debugs(20, 3, shareable << permanent << ' ' << *this);
587     if (permanent)
588         EBIT_SET(flags, RELEASE_REQUEST); // may already be set
589     if (!shareable)
590         shareableWhenPrivate = false; // may already be false
591 
592     if (EBIT_TEST(flags, KEY_PRIVATE))
593         return;
594 
595     if (key) {
596         Store::Root().evictCached(*this); // all caches/workers will know
597         hashDelete();
598     }
599 
600     if (mem_obj && mem_obj->hasUris())
601         mem_obj->id = getKeyCounter();
602     const cache_key *newkey = storeKeyPrivate();
603 
604     assert(hash_lookup(store_table, newkey) == NULL);
605     EBIT_SET(flags, KEY_PRIVATE);
606     shareableWhenPrivate = shareable;
607     hashInsert(newkey);
608 }
609 
610 bool
setPublicKey(const KeyScope scope)611 StoreEntry::setPublicKey(const KeyScope scope)
612 {
613     debugs(20, 3, *this);
614     if (key && !EBIT_TEST(flags, KEY_PRIVATE))
615         return true; // already public
616 
617     assert(mem_obj);
618 
619     /*
620      * We can't make RELEASE_REQUEST objects public.  Depending on
621      * when RELEASE_REQUEST gets set, we might not be swapping out
622      * the object.  If we're not swapping out, then subsequent
623      * store clients won't be able to access object data which has
624      * been freed from memory.
625      *
626      * If RELEASE_REQUEST is set, setPublicKey() should not be called.
627      */
628 #if MORE_DEBUG_OUTPUT
629 
630     if (EBIT_TEST(flags, RELEASE_REQUEST))
631         debugs(20, DBG_IMPORTANT, "assertion failed: RELEASE key " << key << ", url " << mem_obj->url);
632 
633 #endif
634 
635     assert(!EBIT_TEST(flags, RELEASE_REQUEST));
636 
637     try {
638         EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure");
639         const cache_key *pubKey = calcPublicKey(scope);
640         Store::Root().addWriting(this, pubKey);
641         forcePublicKey(pubKey);
642         newVaryMarker.unlockAndReset("setPublicKey+success");
643         return true;
644     } catch (const std::exception &ex) {
645         debugs(20, 2, "for " << *this << " failed: " << ex.what());
646     }
647     return false;
648 }
649 
650 void
clearPublicKeyScope()651 StoreEntry::clearPublicKeyScope()
652 {
653     if (!key || EBIT_TEST(flags, KEY_PRIVATE))
654         return; // probably the old public key was deleted or made private
655 
656     // TODO: adjustVary() when collapsed revalidation supports that
657 
658     const cache_key *newKey = calcPublicKey(ksDefault);
659     if (!storeKeyHashCmp(key, newKey))
660         return; // probably another collapsed revalidation beat us to this change
661 
662     forcePublicKey(newKey);
663 }
664 
665 /// Unconditionally sets public key for this store entry.
666 /// Releases the old entry with the same public key (if any).
667 void
forcePublicKey(const cache_key * newkey)668 StoreEntry::forcePublicKey(const cache_key *newkey)
669 {
670     debugs(20, 3, storeKeyText(newkey) << " for " << *this);
671     assert(mem_obj);
672 
673     if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) {
674         assert(e2 != this);
675         debugs(20, 3, "releasing clashing " << *e2);
676         e2->release(true);
677     }
678 
679     if (key)
680         hashDelete();
681 
682     clearPrivate();
683 
684     assert(mem_obj->hasUris());
685     hashInsert(newkey);
686 
687     if (hasDisk())
688         storeDirSwapLog(this, SWAP_LOG_ADD);
689 }
690 
691 /// Calculates correct public key for feeding forcePublicKey().
692 /// Assumes adjustVary() has been called for this entry already.
693 const cache_key *
calcPublicKey(const KeyScope keyScope)694 StoreEntry::calcPublicKey(const KeyScope keyScope)
695 {
696     assert(mem_obj);
697     return mem_obj->request ?  storeKeyPublicByRequest(mem_obj->request, keyScope) :
698            storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope);
699 }
700 
701 /// Updates mem_obj->request->vary_headers to reflect the current Vary.
702 /// The vary_headers field is used to calculate the Vary marker key.
703 /// Releases the old Vary marker with an outdated key (if any).
704 /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil
705 /// \throws std::exception on failures
706 StoreEntry *
adjustVary()707 StoreEntry::adjustVary()
708 {
709     assert(mem_obj);
710 
711     if (!mem_obj->request)
712         return nullptr;
713 
714     HttpRequest *request = mem_obj->request;
715 
716     if (mem_obj->vary_headers.isEmpty()) {
717         /* First handle the case where the object no longer varies */
718         request->vary_headers.clear();
719     } else {
720         if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) {
721             /* Oops.. the variance has changed. Kill the base object
722              * to record the new variance key
723              */
724             request->vary_headers.clear();       /* free old "bad" variance key */
725             if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method))
726                 pe->release(true);
727         }
728 
729         /* Make sure the request knows the variance status */
730         if (request->vary_headers.isEmpty())
731             request->vary_headers = httpMakeVaryMark(request, mem_obj->getReply());
732     }
733 
734     // TODO: storeGetPublic() calls below may create unlocked entries.
735     // We should add/use storeHas() API or lock/unlock those entries.
736     if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) {
737         /* Create "vary" base object */
738         StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method);
739         // XXX: storeCreateEntry() already tries to make `pe` public under
740         // certain conditions. If those conditions do not apply to Vary markers,
741         // then refactor to call storeCreatePureEntry() above.  Otherwise,
742         // refactor to simply check whether `pe` is already public below.
743         if (!pe->makePublic()) {
744             pe->unlock("StoreEntry::adjustVary+failed_makePublic");
745             throw TexcHere("failed to make Vary marker public");
746         }
747         /* We are allowed to do this typecast */
748         HttpReply *rep = new HttpReply;
749         rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000);
750         String vary = mem_obj->getReply()->header.getList(Http::HdrType::VARY);
751 
752         if (vary.size()) {
753             /* Again, we own this structure layout */
754             rep->header.putStr(Http::HdrType::VARY, vary.termedBuf());
755             vary.clean();
756         }
757 
758 #if X_ACCELERATOR_VARY
759         vary = mem_obj->getReply()->header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY);
760 
761         if (vary.size() > 0) {
762             /* Again, we own this structure layout */
763             rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf());
764             vary.clean();
765         }
766 
767 #endif
768         pe->replaceHttpReply(rep, false); // no write until timestampsSet()
769 
770         pe->timestampsSet();
771 
772         pe->startWriting(); // after timestampsSet()
773 
774         pe->complete();
775 
776         return pe;
777     }
778     return nullptr;
779 }
780 
781 StoreEntry *
storeCreatePureEntry(const char * url,const char * log_url,const HttpRequestMethod & method)782 storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method)
783 {
784     StoreEntry *e = NULL;
785     debugs(20, 3, "storeCreateEntry: '" << url << "'");
786 
787     e = new StoreEntry();
788     e->createMemObject(url, log_url, method);
789 
790     e->store_status = STORE_PENDING;
791     e->refcount = 0;
792     e->lastref = squid_curtime;
793     e->timestamp = -1;          /* set in StoreEntry::timestampsSet() */
794     e->ping_status = PING_NONE;
795     EBIT_SET(e->flags, ENTRY_VALIDATED);
796     return e;
797 }
798 
799 StoreEntry *
storeCreateEntry(const char * url,const char * logUrl,const RequestFlags & flags,const HttpRequestMethod & method)800 storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method)
801 {
802     StoreEntry *e = storeCreatePureEntry(url, logUrl, method);
803     e->lock("storeCreateEntry");
804 
805     if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey())
806         return e;
807 
808     e->setPrivateKey(false, !flags.cachable);
809     return e;
810 }
811 
812 /* Mark object as expired */
813 void
expireNow()814 StoreEntry::expireNow()
815 {
816     debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'");
817     expires = squid_curtime;
818 }
819 
820 void
write(StoreIOBuffer writeBuffer)821 StoreEntry::write (StoreIOBuffer writeBuffer)
822 {
823     assert(mem_obj != NULL);
824     /* This assert will change when we teach the store to update */
825     PROF_start(StoreEntry_write);
826     assert(store_status == STORE_PENDING);
827 
828     // XXX: caller uses content offset, but we also store headers
829     if (const HttpReply *reply = mem_obj->getReply())
830         writeBuffer.offset += reply->hdr_sz;
831 
832     debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'");
833     PROF_stop(StoreEntry_write);
834     storeGetMemSpace(writeBuffer.length);
835     mem_obj->write(writeBuffer);
836 
837     if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) {
838         debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this);
839         EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
840     }
841 
842     invokeHandlers();
843 }
844 
845 /* Append incoming data from a primary server to an entry. */
846 void
append(char const * buf,int len)847 StoreEntry::append(char const *buf, int len)
848 {
849     assert(mem_obj != NULL);
850     assert(len >= 0);
851     assert(store_status == STORE_PENDING);
852 
853     StoreIOBuffer tempBuffer;
854     tempBuffer.data = (char *)buf;
855     tempBuffer.length = len;
856     /*
857      * XXX sigh, offset might be < 0 here, but it gets "corrected"
858      * later.  This offset crap is such a mess.
859      */
860     tempBuffer.offset = mem_obj->endOffset() - (getReply() ? getReply()->hdr_sz : 0);
861     write(tempBuffer);
862 }
863 
864 void
vappendf(const char * fmt,va_list vargs)865 StoreEntry::vappendf(const char *fmt, va_list vargs)
866 {
867     LOCAL_ARRAY(char, buf, 4096);
868     *buf = 0;
869     int x;
870 
871     va_list ap;
872     /* Fix of bug 753r. The value of vargs is undefined
873      * after vsnprintf() returns. Make a copy of vargs
874      * incase we loop around and call vsnprintf() again.
875      */
876     va_copy(ap,vargs);
877     errno = 0;
878     if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) {
879         fatal(xstrerr(errno));
880         return;
881     }
882     va_end(ap);
883 
884     if (x < static_cast<int>(sizeof(buf))) {
885         append(buf, x);
886         return;
887     }
888 
889     // okay, do it the slow way.
890     char *buf2 = new char[x+1];
891     int y = vsnprintf(buf2, x+1, fmt, vargs);
892     assert(y >= 0 && y == x);
893     append(buf2, y);
894     delete[] buf2;
895 }
896 
897 // deprecated. use StoreEntry::appendf() instead.
898 void
storeAppendPrintf(StoreEntry * e,const char * fmt,...)899 storeAppendPrintf(StoreEntry * e, const char *fmt,...)
900 {
901     va_list args;
902     va_start(args, fmt);
903     e->vappendf(fmt, args);
904     va_end(args);
905 }
906 
907 // deprecated. use StoreEntry::appendf() instead.
908 void
storeAppendVPrintf(StoreEntry * e,const char * fmt,va_list vargs)909 storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs)
910 {
911     e->vappendf(fmt, vargs);
912 }
913 
914 struct _store_check_cachable_hist {
915 
916     struct {
917         int non_get;
918         int not_entry_cachable;
919         int wrong_content_length;
920         int too_big;
921         int too_small;
922         int private_key;
923         int too_many_open_files;
924         int too_many_open_fds;
925         int missing_parts;
926     } no;
927 
928     struct {
929         int Default;
930     } yes;
931 } store_check_cachable_hist;
932 
933 int
storeTooManyDiskFilesOpen(void)934 storeTooManyDiskFilesOpen(void)
935 {
936     if (Config.max_open_disk_fds == 0)
937         return 0;
938 
939     if (store_open_disk_fd > Config.max_open_disk_fds)
940         return 1;
941 
942     return 0;
943 }
944 
945 int
checkTooSmall()946 StoreEntry::checkTooSmall()
947 {
948     if (EBIT_TEST(flags, ENTRY_SPECIAL))
949         return 0;
950 
951     if (STORE_OK == store_status)
952         if (mem_obj->object_sz >= 0 &&
953                 mem_obj->object_sz < Config.Store.minObjectSize)
954             return 1;
955     if (getReply()->content_length > -1)
956         if (getReply()->content_length < Config.Store.minObjectSize)
957             return 1;
958     return 0;
959 }
960 
961 bool
checkTooBig() const962 StoreEntry::checkTooBig() const
963 {
964     if (mem_obj->endOffset() > store_maxobjsize)
965         return true;
966 
967     if (getReply()->content_length < 0)
968         return false;
969 
970     return (getReply()->content_length > store_maxobjsize);
971 }
972 
973 // TODO: move "too many open..." checks outside -- we are called too early/late
974 bool
checkCachable()975 StoreEntry::checkCachable()
976 {
977     // XXX: This method is used for both memory and disk caches, but some
978     // checks are specific to disk caches. Move them to mayStartSwapOut().
979 
980     // XXX: This method may be called several times, sometimes with different
981     // outcomes, making store_check_cachable_hist counters misleading.
982 
983     // check this first to optimize handling of repeated calls for uncachables
984     if (EBIT_TEST(flags, RELEASE_REQUEST)) {
985         debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable");
986         ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename?
987         return 0; // avoid rerequesting release below
988     }
989 
990 #if CACHE_ALL_METHODS
991 
992     if (mem_obj->method != Http::METHOD_GET) {
993         debugs(20, 2, "StoreEntry::checkCachable: NO: non-GET method");
994         ++store_check_cachable_hist.no.non_get;
995     } else
996 #endif
997         if (store_status == STORE_OK && EBIT_TEST(flags, ENTRY_BAD_LENGTH)) {
998             debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length");
999             ++store_check_cachable_hist.no.wrong_content_length;
1000         } else if (!mem_obj || !getReply()) {
1001             // XXX: In bug 4131, we forgetHit() without mem_obj, so we need
1002             // this segfault protection, but how can we get such a HIT?
1003             debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this);
1004             ++store_check_cachable_hist.no.missing_parts;
1005         } else if (checkTooBig()) {
1006             debugs(20, 2, "StoreEntry::checkCachable: NO: too big");
1007             ++store_check_cachable_hist.no.too_big;
1008         } else if (checkTooSmall()) {
1009             debugs(20, 2, "StoreEntry::checkCachable: NO: too small");
1010             ++store_check_cachable_hist.no.too_small;
1011         } else if (EBIT_TEST(flags, KEY_PRIVATE)) {
1012             debugs(20, 3, "StoreEntry::checkCachable: NO: private key");
1013             ++store_check_cachable_hist.no.private_key;
1014         } else if (hasDisk()) {
1015             /*
1016              * the remaining cases are only relevant if we haven't
1017              * started swapping out the object yet.
1018              */
1019             return 1;
1020         } else if (storeTooManyDiskFilesOpen()) {
1021             debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open");
1022             ++store_check_cachable_hist.no.too_many_open_files;
1023         } else if (fdNFree() < RESERVED_FD) {
1024             debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open");
1025             ++store_check_cachable_hist.no.too_many_open_fds;
1026         } else {
1027             ++store_check_cachable_hist.yes.Default;
1028             return 1;
1029         }
1030 
1031     releaseRequest();
1032     return 0;
1033 }
1034 
1035 void
storeCheckCachableStats(StoreEntry * sentry)1036 storeCheckCachableStats(StoreEntry *sentry)
1037 {
1038     storeAppendPrintf(sentry, "Category\t Count\n");
1039 
1040 #if CACHE_ALL_METHODS
1041 
1042     storeAppendPrintf(sentry, "no.non_get\t%d\n",
1043                       store_check_cachable_hist.no.non_get);
1044 #endif
1045 
1046     storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n",
1047                       store_check_cachable_hist.no.not_entry_cachable);
1048     storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n",
1049                       store_check_cachable_hist.no.wrong_content_length);
1050     storeAppendPrintf(sentry, "no.negative_cached\t%d\n",
1051                       0); // TODO: Remove this backward compatibility hack.
1052     storeAppendPrintf(sentry, "no.missing_parts\t%d\n",
1053                       store_check_cachable_hist.no.missing_parts);
1054     storeAppendPrintf(sentry, "no.too_big\t%d\n",
1055                       store_check_cachable_hist.no.too_big);
1056     storeAppendPrintf(sentry, "no.too_small\t%d\n",
1057                       store_check_cachable_hist.no.too_small);
1058     storeAppendPrintf(sentry, "no.private_key\t%d\n",
1059                       store_check_cachable_hist.no.private_key);
1060     storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n",
1061                       store_check_cachable_hist.no.too_many_open_files);
1062     storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n",
1063                       store_check_cachable_hist.no.too_many_open_fds);
1064     storeAppendPrintf(sentry, "yes.default\t%d\n",
1065                       store_check_cachable_hist.yes.Default);
1066 }
1067 
1068 void
lengthWentBad(const char * reason)1069 StoreEntry::lengthWentBad(const char *reason)
1070 {
1071     debugs(20, 3, "because " << reason << ": " << *this);
1072     EBIT_SET(flags, ENTRY_BAD_LENGTH);
1073     releaseRequest();
1074 }
1075 
1076 void
complete()1077 StoreEntry::complete()
1078 {
1079     debugs(20, 3, "storeComplete: '" << getMD5Text() << "'");
1080 
1081     // To preserve forwarding retries, call FwdState::complete() instead.
1082     EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1083 
1084     if (store_status != STORE_PENDING) {
1085         /*
1086          * if we're not STORE_PENDING, then probably we got aborted
1087          * and there should be NO clients on this entry
1088          */
1089         assert(EBIT_TEST(flags, ENTRY_ABORTED));
1090         assert(mem_obj->nclients == 0);
1091         return;
1092     }
1093 
1094     /* This is suspect: mem obj offsets include the headers. do we adjust for that
1095      * in use of object_sz?
1096      */
1097     mem_obj->object_sz = mem_obj->endOffset();
1098 
1099     store_status = STORE_OK;
1100 
1101     assert(mem_status == NOT_IN_MEMORY);
1102 
1103     if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength())
1104         lengthWentBad("!validLength() in complete()");
1105 
1106 #if USE_CACHE_DIGESTS
1107     if (mem_obj->request)
1108         mem_obj->request->hier.store_complete_stop = current_time;
1109 
1110 #endif
1111     /*
1112      * We used to call invokeHandlers, then storeSwapOut.  However,
1113      * Madhukar Reddy <myreddy@persistence.com> reported that
1114      * responses without content length would sometimes get released
1115      * in client_side, thinking that the response is incomplete.
1116      */
1117     invokeHandlers();
1118 }
1119 
1120 /*
1121  * Someone wants to abort this transfer.  Set the reason in the
1122  * request structure, call the callback and mark the
1123  * entry for releasing
1124  */
1125 void
abort()1126 StoreEntry::abort()
1127 {
1128     ++statCounter.aborted_requests;
1129     assert(store_status == STORE_PENDING);
1130     assert(mem_obj != NULL);
1131     debugs(20, 6, "storeAbort: " << getMD5Text());
1132 
1133     lock("StoreEntry::abort");         /* lock while aborting */
1134     negativeCache();
1135 
1136     releaseRequest();
1137 
1138     EBIT_SET(flags, ENTRY_ABORTED);
1139 
1140     // allow the Store clients to be told about the problem
1141     EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT);
1142 
1143     setMemStatus(NOT_IN_MEMORY);
1144 
1145     store_status = STORE_OK;
1146 
1147     /* Notify the server side */
1148 
1149     /*
1150      * DPW 2007-05-07
1151      * Should we check abort.data for validity?
1152      */
1153     if (mem_obj->abort.callback) {
1154         if (!cbdataReferenceValid(mem_obj->abort.data))
1155             debugs(20, DBG_IMPORTANT,HERE << "queueing event when abort.data is not valid");
1156         eventAdd("mem_obj->abort.callback",
1157                  mem_obj->abort.callback,
1158                  mem_obj->abort.data,
1159                  0.0,
1160                  true);
1161         unregisterAbort();
1162     }
1163 
1164     /* XXX Should we reverse these two, so that there is no
1165      * unneeded disk swapping triggered?
1166      */
1167     /* Notify the client side */
1168     invokeHandlers();
1169 
1170     // abort swap out, invalidating what was created so far (release follows)
1171     swapOutFileClose(StoreIOState::writerGone);
1172 
1173     unlock("StoreEntry::abort");       /* unlock */
1174 }
1175 
1176 /**
1177  * Clear Memory storage to accommodate the given object len
1178  */
1179 void
storeGetMemSpace(int size)1180 storeGetMemSpace(int size)
1181 {
1182     PROF_start(storeGetMemSpace);
1183     if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1184         Store::Root().freeMemorySpace(size);
1185     PROF_stop(storeGetMemSpace);
1186 }
1187 
1188 /* thunk through to Store::Root().maintain(). Note that this would be better still
1189  * if registered against the root store itself, but that requires more complex
1190  * update logic - bigger fish to fry first. Long term each store when
1191  * it becomes active will self register
1192  */
1193 void
Maintain(void *)1194 Store::Maintain(void *)
1195 {
1196     Store::Root().maintain();
1197 
1198     /* Reregister a maintain event .. */
1199     eventAdd("MaintainSwapSpace", Maintain, NULL, 1.0, 1);
1200 
1201 }
1202 
1203 /* The maximum objects to scan for maintain storage space */
1204 #define MAINTAIN_MAX_SCAN       1024
1205 #define MAINTAIN_MAX_REMOVE     64
1206 
1207 void
release(const bool shareable)1208 StoreEntry::release(const bool shareable)
1209 {
1210     PROF_start(storeRelease);
1211     debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text());
1212     /* If, for any reason we can't discard this object because of an
1213      * outstanding request, mark it for pending release */
1214 
1215     if (locked()) {
1216         releaseRequest(shareable);
1217         PROF_stop(storeRelease);
1218         return;
1219     }
1220 
1221     if (Store::Controller::store_dirs_rebuilding && hasDisk()) {
1222         /* TODO: Teach disk stores to handle releases during rebuild instead. */
1223 
1224         // lock the entry until rebuilding is done
1225         lock("storeLateRelease");
1226         releaseRequest(shareable);
1227         LateReleaseStack.push(this);
1228         PROF_stop(storeRelease);
1229         return;
1230     }
1231 
1232     storeLog(STORE_LOG_RELEASE, this);
1233     Store::Root().evictCached(*this);
1234     destroyStoreEntry(static_cast<hash_link *>(this));
1235     PROF_stop(storeRelease);
1236 }
1237 
1238 static void
storeLateRelease(void *)1239 storeLateRelease(void *)
1240 {
1241     StoreEntry *e;
1242     static int n = 0;
1243 
1244     if (Store::Controller::store_dirs_rebuilding) {
1245         eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1246         return;
1247     }
1248 
1249     // TODO: this works but looks unelegant.
1250     for (int i = 0; i < 10; ++i) {
1251         if (LateReleaseStack.empty()) {
1252             debugs(20, DBG_IMPORTANT, "storeLateRelease: released " << n << " objects");
1253             return;
1254         } else {
1255             e = LateReleaseStack.top();
1256             LateReleaseStack.pop();
1257         }
1258 
1259         e->unlock("storeLateRelease");
1260         ++n;
1261     }
1262 
1263     eventAdd("storeLateRelease", storeLateRelease, NULL, 0.0, 1);
1264 }
1265 
1266 /* return 1 if a store entry is locked */
1267 int
locked() const1268 StoreEntry::locked() const
1269 {
1270     if (lock_count)
1271         return 1;
1272 
1273     /*
1274      * SPECIAL, PUBLIC entries should be "locked";
1275      * XXX: Their owner should lock them then instead of relying on this hack.
1276      */
1277     if (EBIT_TEST(flags, ENTRY_SPECIAL))
1278         if (!EBIT_TEST(flags, KEY_PRIVATE))
1279             return 1;
1280 
1281     return 0;
1282 }
1283 
1284 bool
validLength() const1285 StoreEntry::validLength() const
1286 {
1287     int64_t diff;
1288     const HttpReply *reply;
1289     assert(mem_obj != NULL);
1290     reply = getReply();
1291     debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'");
1292     debugs(20, 5, "storeEntryValidLength:     object_len = " <<
1293            objectLen());
1294     debugs(20, 5, "storeEntryValidLength:         hdr_sz = " << reply->hdr_sz);
1295     debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length);
1296 
1297     if (reply->content_length < 0) {
1298         debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text());
1299         return 1;
1300     }
1301 
1302     if (reply->hdr_sz == 0) {
1303         debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text());
1304         return 1;
1305     }
1306 
1307     if (mem_obj->method == Http::METHOD_HEAD) {
1308         debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text());
1309         return 1;
1310     }
1311 
1312     if (reply->sline.status() == Http::scNotModified)
1313         return 1;
1314 
1315     if (reply->sline.status() == Http::scNoContent)
1316         return 1;
1317 
1318     diff = reply->hdr_sz + reply->content_length - objectLen();
1319 
1320     if (diff == 0)
1321         return 1;
1322 
1323     debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff)  << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" );
1324 
1325     return 0;
1326 }
1327 
1328 static void
storeRegisterWithCacheManager(void)1329 storeRegisterWithCacheManager(void)
1330 {
1331     Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1);
1332     Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1);
1333     Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats",
1334                         storeCheckCachableStats, 0, 1);
1335 }
1336 
1337 void
storeInit(void)1338 storeInit(void)
1339 {
1340     storeKeyInit();
1341     mem_policy = createRemovalPolicy(Config.memPolicy);
1342     storeDigestInit();
1343     storeLogOpen();
1344     eventAdd("storeLateRelease", storeLateRelease, NULL, 1.0, 1);
1345     Store::Root().init();
1346     storeRebuildStart();
1347 
1348     storeRegisterWithCacheManager();
1349 }
1350 
1351 void
storeConfigure(void)1352 storeConfigure(void)
1353 {
1354     Store::Root().updateLimits();
1355 }
1356 
1357 bool
memoryCachable()1358 StoreEntry::memoryCachable()
1359 {
1360     if (!checkCachable())
1361         return 0;
1362 
1363     if (mem_obj == NULL)
1364         return 0;
1365 
1366     if (mem_obj->data_hdr.size() == 0)
1367         return 0;
1368 
1369     if (mem_obj->inmem_lo != 0)
1370         return 0;
1371 
1372     if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1)
1373         return 0;
1374 
1375     return 1;
1376 }
1377 
1378 int
checkNegativeHit() const1379 StoreEntry::checkNegativeHit() const
1380 {
1381     if (!EBIT_TEST(flags, ENTRY_NEGCACHED))
1382         return 0;
1383 
1384     if (expires <= squid_curtime)
1385         return 0;
1386 
1387     if (store_status != STORE_OK)
1388         return 0;
1389 
1390     return 1;
1391 }
1392 
1393 /**
1394  * Set object for negative caching.
1395  * Preserves any expiry information given by the server.
1396  * In absence of proper expiry info it will set to expire immediately,
1397  * or with HTTP-violations enabled the configured negative-TTL is observed
1398  */
1399 void
negativeCache()1400 StoreEntry::negativeCache()
1401 {
1402     // XXX: should make the default for expires 0 instead of -1
1403     //      so we can distinguish "Expires: -1" from nothing.
1404     if (expires <= 0)
1405 #if USE_HTTP_VIOLATIONS
1406         expires = squid_curtime + Config.negativeTtl;
1407 #else
1408         expires = squid_curtime;
1409 #endif
1410     if (expires > squid_curtime) {
1411         EBIT_SET(flags, ENTRY_NEGCACHED);
1412         debugs(20, 6, "expires = " << expires << " +" << (expires-squid_curtime) << ' ' << *this);
1413     }
1414 }
1415 
1416 void
storeFreeMemory(void)1417 storeFreeMemory(void)
1418 {
1419     Store::FreeMemory();
1420 #if USE_CACHE_DIGESTS
1421     delete store_digest;
1422 #endif
1423     store_digest = NULL;
1424 }
1425 
1426 int
expiresMoreThan(time_t expires,time_t when)1427 expiresMoreThan(time_t expires, time_t when)
1428 {
1429     if (expires < 0)            /* No Expires given */
1430         return 1;
1431 
1432     return (expires > (squid_curtime + when));
1433 }
1434 
1435 int
validToSend() const1436 StoreEntry::validToSend() const
1437 {
1438     if (EBIT_TEST(flags, RELEASE_REQUEST))
1439         return 0;
1440 
1441     if (EBIT_TEST(flags, ENTRY_NEGCACHED))
1442         if (expires <= squid_curtime)
1443             return 0;
1444 
1445     if (EBIT_TEST(flags, ENTRY_ABORTED))
1446         return 0;
1447 
1448     // now check that the entry has a cache backing or is collapsed
1449     if (hasDisk()) // backed by a disk cache
1450         return 1;
1451 
1452     if (swappingOut()) // will be backed by a disk cache
1453         return 1;
1454 
1455     if (!mem_obj) // not backed by a memory cache and not collapsed
1456         return 0;
1457 
1458     // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no
1459     // disk cache backing that store_client constructor will assert. XXX: This
1460     // is wrong for range requests (that could feed off nibbled memory) and for
1461     // entries backed by the shared memory cache (that could, in theory, get
1462     // nibbled bytes from that cache, but there is no such "memoryIn" code).
1463     if (mem_obj->inmem_lo) // in memory cache, but got nibbled at
1464         return 0;
1465 
1466     // The following check is correct but useless at this position. TODO: Move
1467     // it up when the shared memory cache can either replenish locally nibbled
1468     // bytes or, better, does not use local RAM copy at all.
1469     // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache
1470     //    return 1;
1471 
1472     return 1;
1473 }
1474 
1475 bool
timestampsSet()1476 StoreEntry::timestampsSet()
1477 {
1478     const HttpReply *reply = getReply();
1479     time_t served_date = reply->date;
1480     int age = reply->header.getInt(Http::HdrType::AGE);
1481     /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */
1482     /* make sure that 0 <= served_date <= squid_curtime */
1483 
1484     if (served_date < 0 || served_date > squid_curtime)
1485         served_date = squid_curtime;
1486 
1487     /* Bug 1791:
1488      * If the returned Date: is more than 24 hours older than
1489      * the squid_curtime, then one of us needs to use NTP to set our
1490      * clock.  We'll pretend that our clock is right.
1491      */
1492     else if (served_date < (squid_curtime - 24 * 60 * 60) )
1493         served_date = squid_curtime;
1494 
1495     /*
1496      * Compensate with Age header if origin server clock is ahead
1497      * of us and there is a cache in between us and the origin
1498      * server.  But DONT compensate if the age value is larger than
1499      * squid_curtime because it results in a negative served_date.
1500      */
1501     if (age > squid_curtime - served_date)
1502         if (squid_curtime > age)
1503             served_date = squid_curtime - age;
1504 
1505     // compensate for Squid-to-server and server-to-Squid delays
1506     if (mem_obj && mem_obj->request) {
1507         struct timeval responseTime;
1508         if (mem_obj->request->hier.peerResponseTime(responseTime))
1509             served_date -= responseTime.tv_sec;
1510     }
1511 
1512     time_t exp = 0;
1513     if (reply->expires > 0 && reply->date > -1)
1514         exp = served_date + (reply->expires - reply->date);
1515     else
1516         exp = reply->expires;
1517 
1518     if (timestamp == served_date && expires == exp) {
1519         // if the reply lacks LMT, then we now know that our effective
1520         // LMT (i.e., timestamp) will stay the same, otherwise, old and
1521         // new modification times must match
1522         if (reply->last_modified < 0 || reply->last_modified == lastModified())
1523             return false; // nothing has changed
1524     }
1525 
1526     expires = exp;
1527 
1528     lastModified_ = reply->last_modified;
1529 
1530     timestamp = served_date;
1531 
1532     return true;
1533 }
1534 
1535 void
registerAbort(STABH * cb,void * data)1536 StoreEntry::registerAbort(STABH * cb, void *data)
1537 {
1538     assert(mem_obj);
1539     assert(mem_obj->abort.callback == NULL);
1540     mem_obj->abort.callback = cb;
1541     mem_obj->abort.data = cbdataReference(data);
1542 }
1543 
1544 void
unregisterAbort()1545 StoreEntry::unregisterAbort()
1546 {
1547     assert(mem_obj);
1548     if (mem_obj->abort.callback) {
1549         mem_obj->abort.callback = NULL;
1550         cbdataReferenceDone(mem_obj->abort.data);
1551     }
1552 }
1553 
1554 void
dump(int l) const1555 StoreEntry::dump(int l) const
1556 {
1557     debugs(20, l, "StoreEntry->key: " << getMD5Text());
1558     debugs(20, l, "StoreEntry->next: " << next);
1559     debugs(20, l, "StoreEntry->mem_obj: " << mem_obj);
1560     debugs(20, l, "StoreEntry->timestamp: " << timestamp);
1561     debugs(20, l, "StoreEntry->lastref: " << lastref);
1562     debugs(20, l, "StoreEntry->expires: " << expires);
1563     debugs(20, l, "StoreEntry->lastModified_: " << lastModified_);
1564     debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz);
1565     debugs(20, l, "StoreEntry->refcount: " << refcount);
1566     debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this));
1567     debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn);
1568     debugs(20, l, "StoreEntry->swap_filen: " << swap_filen);
1569     debugs(20, l, "StoreEntry->lock_count: " << lock_count);
1570     debugs(20, l, "StoreEntry->mem_status: " << mem_status);
1571     debugs(20, l, "StoreEntry->ping_status: " << ping_status);
1572     debugs(20, l, "StoreEntry->store_status: " << store_status);
1573     debugs(20, l, "StoreEntry->swap_status: " << swap_status);
1574 }
1575 
1576 /*
1577  * NOTE, this function assumes only two mem states
1578  */
1579 void
setMemStatus(mem_status_t new_status)1580 StoreEntry::setMemStatus(mem_status_t new_status)
1581 {
1582     if (new_status == mem_status)
1583         return;
1584 
1585     // are we using a shared memory cache?
1586     if (Config.memShared && IamWorkerProcess()) {
1587         // This method was designed to update replacement policy, not to
1588         // actually purge something from the memory cache (TODO: rename?).
1589         // Shared memory cache does not have a policy that needs updates.
1590         mem_status = new_status;
1591         return;
1592     }
1593 
1594     assert(mem_obj != NULL);
1595 
1596     if (new_status == IN_MEMORY) {
1597         assert(mem_obj->inmem_lo == 0);
1598 
1599         if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1600             debugs(20, 4, "not inserting special " << *this << " into policy");
1601         } else {
1602             mem_policy->Add(mem_policy, this, &mem_obj->repl);
1603             debugs(20, 4, "inserted " << *this << " key: " << getMD5Text());
1604         }
1605 
1606         ++hot_obj_count; // TODO: maintain for the shared hot cache as well
1607     } else {
1608         if (EBIT_TEST(flags, ENTRY_SPECIAL)) {
1609             debugs(20, 4, "not removing special " << *this << " from policy");
1610         } else {
1611             mem_policy->Remove(mem_policy, this, &mem_obj->repl);
1612             debugs(20, 4, "removed " << *this);
1613         }
1614 
1615         --hot_obj_count;
1616     }
1617 
1618     mem_status = new_status;
1619 }
1620 
1621 const char *
url() const1622 StoreEntry::url() const
1623 {
1624     if (mem_obj == NULL)
1625         return "[null_mem_obj]";
1626     else
1627         return mem_obj->storeId();
1628 }
1629 
1630 void
createMemObject()1631 StoreEntry::createMemObject()
1632 {
1633     assert(!mem_obj);
1634     mem_obj = new MemObject();
1635 }
1636 
1637 void
createMemObject(const char * aUrl,const char * aLogUrl,const HttpRequestMethod & aMethod)1638 StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1639 {
1640     assert(!mem_obj);
1641     ensureMemObject(aUrl, aLogUrl, aMethod);
1642 }
1643 
1644 void
ensureMemObject(const char * aUrl,const char * aLogUrl,const HttpRequestMethod & aMethod)1645 StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod)
1646 {
1647     if (!mem_obj)
1648         mem_obj = new MemObject();
1649     mem_obj->setUris(aUrl, aLogUrl, aMethod);
1650 }
1651 
1652 /** disable sending content to the clients.
1653  *
1654  * This just sets DELAY_SENDING.
1655  */
1656 void
buffer()1657 StoreEntry::buffer()
1658 {
1659     EBIT_SET(flags, DELAY_SENDING);
1660 }
1661 
1662 /** flush any buffered content.
1663  *
1664  * This just clears DELAY_SENDING and Invokes the handlers
1665  * to begin sending anything that may be buffered.
1666  */
1667 void
flush()1668 StoreEntry::flush()
1669 {
1670     if (EBIT_TEST(flags, DELAY_SENDING)) {
1671         EBIT_CLR(flags, DELAY_SENDING);
1672         invokeHandlers();
1673     }
1674 }
1675 
1676 int64_t
objectLen() const1677 StoreEntry::objectLen() const
1678 {
1679     assert(mem_obj != NULL);
1680     return mem_obj->object_sz;
1681 }
1682 
1683 int64_t
contentLen() const1684 StoreEntry::contentLen() const
1685 {
1686     assert(mem_obj != NULL);
1687     assert(getReply() != NULL);
1688     return objectLen() - getReply()->hdr_sz;
1689 }
1690 
1691 HttpReply const *
getReply() const1692 StoreEntry::getReply () const
1693 {
1694     if (NULL == mem_obj)
1695         return NULL;
1696 
1697     return mem_obj->getReply();
1698 }
1699 
1700 void
reset()1701 StoreEntry::reset()
1702 {
1703     assert (mem_obj);
1704     debugs(20, 3, "StoreEntry::reset: " << url());
1705     mem_obj->reset();
1706     HttpReply *rep = (HttpReply *) getReply();       // bypass const
1707     rep->reset();
1708     expires = lastModified_ = timestamp = -1;
1709 }
1710 
1711 /*
1712  * storeFsInit
1713  *
1714  * This routine calls the SETUP routine for each fs type.
1715  * I don't know where the best place for this is, and I'm not going to shuffle
1716  * around large chunks of code right now (that can be done once its working.)
1717  */
1718 void
storeFsInit(void)1719 storeFsInit(void)
1720 {
1721     storeReplSetup();
1722 }
1723 
1724 /*
1725  * called to add another store removal policy module
1726  */
1727 void
storeReplAdd(const char * type,REMOVALPOLICYCREATE * create)1728 storeReplAdd(const char *type, REMOVALPOLICYCREATE * create)
1729 {
1730     int i;
1731 
1732     /* find the number of currently known repl types */
1733     for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) {
1734         if (strcmp(storerepl_list[i].typestr, type) == 0) {
1735             debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice.");
1736             return;
1737         }
1738     }
1739 
1740     /* add the new type */
1741     storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t)));
1742 
1743     memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t));
1744 
1745     storerepl_list[i].typestr = type;
1746 
1747     storerepl_list[i].create = create;
1748 }
1749 
1750 /*
1751  * Create a removal policy instance
1752  */
1753 RemovalPolicy *
createRemovalPolicy(RemovalPolicySettings * settings)1754 createRemovalPolicy(RemovalPolicySettings * settings)
1755 {
1756     storerepl_entry_t *r;
1757 
1758     for (r = storerepl_list; r && r->typestr; ++r) {
1759         if (strcmp(r->typestr, settings->type) == 0)
1760             return r->create(settings->args);
1761     }
1762 
1763     debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type);
1764     debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy");
1765     debugs(20, DBG_IMPORTANT, "ERROR:   and memory_replacement_policy in squid.conf!");
1766     fatalf("ERROR: Unknown policy %s\n", settings->type);
1767     return NULL;                /* NOTREACHED */
1768 }
1769 
1770 #if 0
1771 void
1772 storeSwapFileNumberSet(StoreEntry * e, sfileno filn)
1773 {
1774     if (e->swap_file_number == filn)
1775         return;
1776 
1777     if (filn < 0) {
1778         assert(-1 == filn);
1779         storeDirMapBitReset(e->swap_file_number);
1780         storeDirLRUDelete(e);
1781         e->swap_file_number = -1;
1782     } else {
1783         assert(-1 == e->swap_file_number);
1784         storeDirMapBitSet(e->swap_file_number = filn);
1785         storeDirLRUAdd(e);
1786     }
1787 }
1788 
1789 #endif
1790 
1791 void
storeErrorResponse(HttpReply * reply)1792 StoreEntry::storeErrorResponse(HttpReply *reply)
1793 {
1794     lock("StoreEntry::storeErrorResponse");
1795     buffer();
1796     replaceHttpReply(reply);
1797     flush();
1798     complete();
1799     negativeCache();
1800     releaseRequest(false); // if it is safe to negatively cache, sharing is OK
1801     unlock("StoreEntry::storeErrorResponse");
1802 }
1803 
1804 /*
1805  * Replace a store entry with
1806  * a new reply. This eats the reply.
1807  */
1808 void
replaceHttpReply(HttpReply * rep,bool andStartWriting)1809 StoreEntry::replaceHttpReply(HttpReply *rep, bool andStartWriting)
1810 {
1811     debugs(20, 3, "StoreEntry::replaceHttpReply: " << url());
1812 
1813     if (!mem_obj) {
1814         debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation");
1815         return;
1816     }
1817 
1818     mem_obj->replaceHttpReply(rep);
1819 
1820     if (andStartWriting)
1821         startWriting();
1822 }
1823 
1824 void
startWriting()1825 StoreEntry::startWriting()
1826 {
1827     /* TODO: when we store headers separately remove the header portion */
1828     /* TODO: mark the length of the headers ? */
1829     /* We ONLY want the headers */
1830 
1831     assert (isEmpty());
1832     assert(mem_obj);
1833 
1834     const HttpReply *rep = getReply();
1835     assert(rep);
1836 
1837     buffer();
1838     rep->packHeadersUsingSlowPacker(*this);
1839     mem_obj->markEndOfReplyHeaders();
1840 
1841     rep->body.packInto(this);
1842     flush();
1843 }
1844 
1845 char const *
getSerialisedMetaData()1846 StoreEntry::getSerialisedMetaData()
1847 {
1848     StoreMeta *tlv_list = storeSwapMetaBuild(this);
1849     int swap_hdr_sz;
1850     char *result = storeSwapMetaPack(tlv_list, &swap_hdr_sz);
1851     storeSwapTLVFree(tlv_list);
1852     assert (swap_hdr_sz >= 0);
1853     mem_obj->swap_hdr_sz = (size_t) swap_hdr_sz;
1854     return result;
1855 }
1856 
1857 /**
1858  * Abandon the transient entry our worker has created if neither the shared
1859  * memory cache nor the disk cache wants to store it. Collapsed requests, if
1860  * any, should notice and use Plan B instead of getting stuck waiting for us
1861  * to start swapping the entry out.
1862  */
1863 void
transientsAbandonmentCheck()1864 StoreEntry::transientsAbandonmentCheck()
1865 {
1866     if (mem_obj && !Store::Root().transientsReader(*this) && // this worker is responsible
1867             hasTransients() && // other workers may be interested
1868             !hasMemStore() && // rejected by the shared memory cache
1869             mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) {
1870         debugs(20, 7, "cannot be shared: " << *this);
1871         if (!shutting_down) // Store::Root() is FATALly missing during shutdown
1872             Store::Root().stopSharing(*this);
1873     }
1874 }
1875 
1876 void
memOutDecision(const bool)1877 StoreEntry::memOutDecision(const bool)
1878 {
1879     transientsAbandonmentCheck();
1880 }
1881 
1882 void
swapOutDecision(const MemObject::SwapOut::Decision & decision)1883 StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision)
1884 {
1885     // Abandon our transient entry if neither shared memory nor disk wants it.
1886     assert(mem_obj);
1887     mem_obj->swapout.decision = decision;
1888     transientsAbandonmentCheck();
1889 }
1890 
1891 void
trimMemory(const bool preserveSwappable)1892 StoreEntry::trimMemory(const bool preserveSwappable)
1893 {
1894     /*
1895      * DPW 2007-05-09
1896      * Bug #1943.  We must not let go any data for IN_MEMORY
1897      * objects.  We have to wait until the mem_status changes.
1898      */
1899     if (mem_status == IN_MEMORY)
1900         return;
1901 
1902     if (EBIT_TEST(flags, ENTRY_SPECIAL))
1903         return; // cannot trim because we do not load them again
1904 
1905     if (preserveSwappable)
1906         mem_obj->trimSwappable();
1907     else
1908         mem_obj->trimUnSwappable();
1909 
1910     debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo);
1911 }
1912 
1913 bool
modifiedSince(const time_t ims,const int imslen) const1914 StoreEntry::modifiedSince(const time_t ims, const int imslen) const
1915 {
1916     int object_length;
1917     const time_t mod_time = lastModified();
1918 
1919     debugs(88, 3, "modifiedSince: '" << url() << "'");
1920 
1921     debugs(88, 3, "modifiedSince: mod_time = " << mod_time);
1922 
1923     if (mod_time < 0)
1924         return true;
1925 
1926     /* Find size of the object */
1927     object_length = getReply()->content_length;
1928 
1929     if (object_length < 0)
1930         object_length = contentLen();
1931 
1932     if (mod_time > ims) {
1933         debugs(88, 3, "--> YES: entry newer than client");
1934         return true;
1935     } else if (mod_time < ims) {
1936         debugs(88, 3, "-->  NO: entry older than client");
1937         return false;
1938     } else if (imslen < 0) {
1939         debugs(88, 3, "-->  NO: same LMT, no client length");
1940         return false;
1941     } else if (imslen == object_length) {
1942         debugs(88, 3, "-->  NO: same LMT, same length");
1943         return false;
1944     } else {
1945         debugs(88, 3, "--> YES: same LMT, different length");
1946         return true;
1947     }
1948 }
1949 
1950 bool
hasEtag(ETag & etag) const1951 StoreEntry::hasEtag(ETag &etag) const
1952 {
1953     if (const HttpReply *reply = getReply()) {
1954         etag = reply->header.getETag(Http::HdrType::ETAG);
1955         if (etag.str)
1956             return true;
1957     }
1958     return false;
1959 }
1960 
1961 bool
hasIfMatchEtag(const HttpRequest & request) const1962 StoreEntry::hasIfMatchEtag(const HttpRequest &request) const
1963 {
1964     const String reqETags = request.header.getList(Http::HdrType::IF_MATCH);
1965     return hasOneOfEtags(reqETags, false);
1966 }
1967 
1968 bool
hasIfNoneMatchEtag(const HttpRequest & request) const1969 StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const
1970 {
1971     const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH);
1972     // weak comparison is allowed only for HEAD or full-body GET requests
1973     const bool allowWeakMatch = !request.flags.isRanged &&
1974                                 (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD);
1975     return hasOneOfEtags(reqETags, allowWeakMatch);
1976 }
1977 
1978 /// whether at least one of the request ETags matches entity ETag
1979 bool
hasOneOfEtags(const String & reqETags,const bool allowWeakMatch) const1980 StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const
1981 {
1982     const ETag repETag = getReply()->header.getETag(Http::HdrType::ETAG);
1983     if (!repETag.str)
1984         return strListIsMember(&reqETags, "*", ',');
1985 
1986     bool matched = false;
1987     const char *pos = NULL;
1988     const char *item;
1989     int ilen;
1990     while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) {
1991         if (!strncmp(item, "*", ilen))
1992             matched = true;
1993         else {
1994             String str;
1995             str.append(item, ilen);
1996             ETag reqETag;
1997             if (etagParseInit(&reqETag, str.termedBuf())) {
1998                 matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) :
1999                           etagIsStrongEqual(repETag, reqETag);
2000             }
2001         }
2002     }
2003     return matched;
2004 }
2005 
2006 Store::Disk &
disk() const2007 StoreEntry::disk() const
2008 {
2009     assert(hasDisk());
2010     const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn);
2011     assert(sd);
2012     return *sd;
2013 }
2014 
2015 bool
hasDisk(const sdirno dirn,const sfileno filen) const2016 StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const
2017 {
2018     checkDisk();
2019     if (dirn < 0 && filen < 0)
2020         return swap_dirn >= 0;
2021     Must(dirn >= 0);
2022     const bool matchingDisk = (swap_dirn == dirn);
2023     return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen);
2024 }
2025 
2026 void
attachToDisk(const sdirno dirn,const sfileno fno,const swap_status_t status)2027 StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status)
2028 {
2029     debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " <<
2030            swapStatusStr[status] << " " << dirn << " " <<
2031            std::hex << std::setw(8) << std::setfill('0') <<
2032            std::uppercase << fno);
2033     checkDisk();
2034     swap_dirn = dirn;
2035     swap_filen = fno;
2036     swap_status = status;
2037     checkDisk();
2038 }
2039 
2040 void
detachFromDisk()2041 StoreEntry::detachFromDisk()
2042 {
2043     swap_dirn = -1;
2044     swap_filen = -1;
2045     swap_status = SWAPOUT_NONE;
2046 }
2047 
2048 void
checkDisk() const2049 StoreEntry::checkDisk() const
2050 {
2051     try {
2052         if (swap_dirn < 0) {
2053             Must(swap_filen < 0);
2054             Must(swap_status == SWAPOUT_NONE);
2055         } else {
2056             Must(swap_filen >= 0);
2057             Must(swap_dirn < Config.cacheSwap.n_configured);
2058             if (swapoutFailed()) {
2059                 Must(EBIT_TEST(flags, RELEASE_REQUEST));
2060             } else {
2061                 Must(swappingOut() || swappedOut());
2062             }
2063         }
2064     } catch (...) {
2065         debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " <<
2066                *this << "; problem: " << CurrentException);
2067         throw;
2068     }
2069 }
2070 
2071 /*
2072  * return true if the entry is in a state where
2073  * it can accept more data (ie with write() method)
2074  */
2075 bool
isAccepting() const2076 StoreEntry::isAccepting() const
2077 {
2078     if (STORE_PENDING != store_status)
2079         return false;
2080 
2081     if (EBIT_TEST(flags, ENTRY_ABORTED))
2082         return false;
2083 
2084     return true;
2085 }
2086 
2087 const char *
describeTimestamps() const2088 StoreEntry::describeTimestamps() const
2089 {
2090     LOCAL_ARRAY(char, buf, 256);
2091     snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d",
2092              static_cast<int>(timestamp),
2093              static_cast<int>(lastref),
2094              static_cast<int>(lastModified_),
2095              static_cast<int>(expires));
2096     return buf;
2097 }
2098 
2099 static std::ostream &
operator <<(std::ostream & os,const Store::IoStatus & io)2100 operator <<(std::ostream &os, const Store::IoStatus &io)
2101 {
2102     switch (io) {
2103     case Store::ioUndecided:
2104         os << 'u';
2105         break;
2106     case Store::ioReading:
2107         os << 'r';
2108         break;
2109     case Store::ioWriting:
2110         os << 'w';
2111         break;
2112     case Store::ioDone:
2113         os << 'o';
2114         break;
2115     }
2116     return os;
2117 }
2118 
operator <<(std::ostream & os,const StoreEntry & e)2119 std::ostream &operator <<(std::ostream &os, const StoreEntry &e)
2120 {
2121     os << "e:";
2122 
2123     if (e.hasTransients()) {
2124         const auto &xitTable = e.mem_obj->xitTable;
2125         os << 't' << xitTable.io << xitTable.index;
2126     }
2127 
2128     if (e.hasMemStore()) {
2129         const auto &memCache = e.mem_obj->memCache;
2130         os << 'm' << memCache.io << memCache.index << '@' << memCache.offset;
2131     }
2132 
2133     // Do not use e.hasDisk() here because its checkDisk() call may calls us.
2134     if (e.swap_filen > -1 || e.swap_dirn > -1)
2135         os << 'd' << e.swap_filen << '@' << e.swap_dirn;
2136 
2137     os << '=';
2138 
2139     // print only non-default status values, using unique letters
2140     if (e.mem_status != NOT_IN_MEMORY ||
2141             e.store_status != STORE_PENDING ||
2142             e.swap_status != SWAPOUT_NONE ||
2143             e.ping_status != PING_NONE) {
2144         if (e.mem_status != NOT_IN_MEMORY) os << 'm';
2145         if (e.store_status != STORE_PENDING) os << 's';
2146         if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status;
2147         if (e.ping_status != PING_NONE) os << 'p' << e.ping_status;
2148     }
2149 
2150     // print only set flags, using unique letters
2151     if (e.flags) {
2152         if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S';
2153         if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R';
2154         if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P';
2155         if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X';
2156         if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F';
2157         if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E';
2158         if (EBIT_TEST(e.flags, KEY_PRIVATE)) {
2159             os << 'I';
2160             if (e.shareableWhenPrivate)
2161                 os << 'H';
2162         }
2163         if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W';
2164         if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N';
2165         if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V';
2166         if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L';
2167         if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A';
2168     }
2169 
2170     return os << '/' << &e << '*' << e.locks();
2171 }
2172 
2173 /* NullStoreEntry */
2174 
2175 NullStoreEntry NullStoreEntry::_instance;
2176 
2177 NullStoreEntry *
getInstance()2178 NullStoreEntry::getInstance()
2179 {
2180     return &_instance;
2181 }
2182 
2183 char const *
getMD5Text() const2184 NullStoreEntry::getMD5Text() const
2185 {
2186     return "N/A";
2187 }
2188 
2189 void
operator delete(void *)2190 NullStoreEntry::operator delete(void*)
2191 {
2192     fatal ("Attempt to delete NullStoreEntry\n");
2193 }
2194 
2195 char const *
getSerialisedMetaData()2196 NullStoreEntry::getSerialisedMetaData()
2197 {
2198     return NULL;
2199 }
2200 
2201 void
onException()2202 Store::EntryGuard::onException() noexcept
2203 {
2204     SWALLOW_EXCEPTIONS({
2205         entry_->releaseRequest(false);
2206         entry_->unlock(context_);
2207     });
2208 }
2209 
2210