1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /*
8  * Implementation details of the atoms table.
9  */
10 
11 #ifndef vm_AtomsTable_h
12 #define vm_AtomsTable_h
13 
14 #include <type_traits>  // std::{enable_if_t,is_const_v}
15 
16 #include "js/GCHashTable.h"
17 #include "js/TypeDecls.h"
18 #include "vm/JSAtom.h"
19 
20 /*
21  * The atoms table is a mapping from strings to JSAtoms that supports concurrent
22  * access and incremental sweeping.
23  *
24  * The table is partitioned based on the key into multiple sub-tables. Each
25  * sub-table is protected by a lock to ensure safety when accessed by helper
26  * threads. Concurrent access improves performance of off-thread parsing which
27  * frequently creates large numbers of atoms. Locking is only required when
28  * off-thread parsing is running.
29  */
30 
31 namespace js {
32 
33 // Take all atoms table locks to allow iterating over cells in the atoms zone.
34 class MOZ_RAII AutoLockAllAtoms {
35   JSRuntime* runtime;
36 
37  public:
38   explicit AutoLockAllAtoms(JSRuntime* rt);
39   ~AutoLockAllAtoms();
40 };
41 
42 // This is a tagged pointer to an atom that duplicates the atom's pinned flag so
43 // that we don't have to check the atom itself when marking pinned atoms (there
44 // can be a great many atoms). See bug 1445196.
45 class AtomStateEntry {
46   uintptr_t bits;
47 
48   static const uintptr_t NO_TAG_MASK = uintptr_t(-1) - 1;
49 
50  public:
AtomStateEntry()51   AtomStateEntry() : bits(0) {}
52   AtomStateEntry(const AtomStateEntry& other) = default;
AtomStateEntry(JSAtom * ptr,bool tagged)53   AtomStateEntry(JSAtom* ptr, bool tagged)
54       : bits(uintptr_t(ptr) | uintptr_t(tagged)) {
55     MOZ_ASSERT((uintptr_t(ptr) & 0x1) == 0);
56   }
57 
isPinned()58   bool isPinned() const { return bits & 0x1; }
59 
60   /*
61    * Non-branching code sequence. Note that the const_cast is safe because
62    * the hash function doesn't consider the tag to be a portion of the key.
63    */
setPinned(bool pinned)64   void setPinned(bool pinned) const {
65     const_cast<AtomStateEntry*>(this)->bits |= uintptr_t(pinned);
66   }
67 
asPtrUnbarriered()68   JSAtom* asPtrUnbarriered() const {
69     MOZ_ASSERT(bits);
70     return reinterpret_cast<JSAtom*>(bits & NO_TAG_MASK);
71   }
72 
73   JSAtom* asPtr(JSContext* cx) const;
74 
needsSweep()75   bool needsSweep() {
76     JSAtom* atom = asPtrUnbarriered();
77     return gc::IsAboutToBeFinalizedUnbarriered(&atom);
78   }
79 };
80 
81 struct AtomHasher {
82   struct Lookup;
83   static inline HashNumber hash(const Lookup& l);
84   static MOZ_ALWAYS_INLINE bool match(const AtomStateEntry& entry,
85                                       const Lookup& lookup);
rekeyAtomHasher86   static void rekey(AtomStateEntry& k, const AtomStateEntry& newKey) {
87     k = newKey;
88   }
89 };
90 
91 using AtomSet = JS::GCHashSet<AtomStateEntry, AtomHasher, SystemAllocPolicy>;
92 
93 // This class is a wrapper for AtomSet that is used to ensure the AtomSet is
94 // not modified. It should only expose read-only methods from AtomSet.
95 // Note however that the atoms within the table can be marked during GC.
96 class FrozenAtomSet {
97   AtomSet* mSet;
98 
99  public:
100   // This constructor takes ownership of the passed-in AtomSet.
FrozenAtomSet(AtomSet * set)101   explicit FrozenAtomSet(AtomSet* set) { mSet = set; }
102 
~FrozenAtomSet()103   ~FrozenAtomSet() { js_delete(mSet); }
104 
105   MOZ_ALWAYS_INLINE AtomSet::Ptr readonlyThreadsafeLookup(
106       const AtomSet::Lookup& l) const;
107 
sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf)108   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
109     return mSet->shallowSizeOfIncludingThis(mallocSizeOf);
110   }
111 
112   using Range = AtomSet::Range;
113 
all()114   AtomSet::Range all() const { return mSet->all(); }
115 };
116 
117 class AtomsTable {
118   static const size_t PartitionShift = 5;
119   static const size_t PartitionCount = 1 << PartitionShift;
120 
121   // Use a low initial capacity for atom hash tables to avoid penalizing
122   // runtimes which create a small number of atoms.
123   static const size_t InitialTableSize = 16;
124 
125   // A single partition, representing a subset of the atoms in the table.
126   struct Partition {
127     explicit Partition(uint32_t index);
128     ~Partition();
129 
130     // Lock that must be held to access this set.
131     Mutex lock;
132 
133     // The atoms in this set.
134     AtomSet atoms;
135 
136     // Set of atoms added while the |atoms| set is being swept.
137     AtomSet* atomsAddedWhileSweeping;
138   };
139 
140   Partition* partitions[PartitionCount];
141 
142 #ifdef DEBUG
143   bool allPartitionsLocked = false;
144 #endif
145 
146  public:
147   class AutoLock;
148 
149   // An iterator used for sweeping atoms incrementally.
150   class SweepIterator {
151     AtomsTable& atoms;
152     size_t partitionIndex;
153     mozilla::Maybe<AtomSet::Enum> atomsIter;
154 
155     void settle();
156     void startSweepingPartition();
157     void finishSweepingPartition();
158 
159    public:
160     explicit SweepIterator(AtomsTable& atoms);
161     bool empty() const;
162     AtomStateEntry front() const;
163     void removeFront();
164     void popFront();
165   };
166 
167   ~AtomsTable();
168   bool init();
169 
170   template <typename Chars>
171   MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyChars(
172       JSContext* cx, Chars chars, size_t length, PinningBehavior pin,
173       const mozilla::Maybe<uint32_t>& indexValue,
174       const AtomHasher::Lookup& lookup);
175 
176   template <typename CharT,
177             typename = std::enable_if_t<!std::is_const_v<CharT>>>
atomizeAndCopyChars(JSContext * cx,CharT * chars,size_t length,PinningBehavior pin,const mozilla::Maybe<uint32_t> & indexValue,const AtomHasher::Lookup & lookup)178   MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyChars(
179       JSContext* cx, CharT* chars, size_t length, PinningBehavior pin,
180       const mozilla::Maybe<uint32_t>& indexValue,
181       const AtomHasher::Lookup& lookup) {
182     return atomizeAndCopyChars(cx, const_cast<const CharT*>(chars), length, pin,
183                                indexValue, lookup);
184   }
185 
186   bool atomIsPinned(JSRuntime* rt, JSAtom* atom);
187 
188   void maybePinExistingAtom(JSContext* cx, JSAtom* atom);
189 
190   void tracePinnedAtoms(JSTracer* trc, const AutoAccessAtomsZone& access);
191 
192   // Sweep all atoms non-incrementally.
193   void traceWeak(JSTracer* trc);
194 
195   bool startIncrementalSweep();
196 
197   // Sweep some atoms incrementally and return whether we finished.
198   bool sweepIncrementally(SweepIterator& atomsToSweep, SliceBudget& budget);
199 
200 #ifdef DEBUG
mainThreadHasAllLocks()201   bool mainThreadHasAllLocks() const { return allPartitionsLocked; }
202 #endif
203 
204   size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
205 
206  private:
207   // Map a key to a partition based on its hash.
208   MOZ_ALWAYS_INLINE size_t getPartitionIndex(const AtomHasher::Lookup& lookup);
209 
210   void tracePinnedAtomsInSet(JSTracer* trc, AtomSet& atoms);
211   void mergeAtomsAddedWhileSweeping(Partition& partition);
212 
213   friend class AutoLockAllAtoms;
214   void lockAll();
215   void unlockAll();
216 };
217 
218 bool AtomIsPinned(JSContext* cx, JSAtom* atom);
219 
220 }  // namespace js
221 
222 #endif /* vm_AtomsTable_h */
223