1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 
5 /*============================================================
6 **
7 ** Class:  Hashtable
8 **
9 ** Purpose: Represents a collection of key/value pairs
10 **          that are organized based on the hash code
11 **          of the key.
12 **
13 ===========================================================*/
14 
15 using System.Diagnostics;
16 using System.Runtime.CompilerServices;
17 using System.Runtime.Serialization;
18 using System.Threading;
19 
20 namespace System.Collections
21 {
22     // The Hashtable class represents a dictionary of associated keys and values
23     // with constant lookup time.
24     //
25     // Objects used as keys in a hashtable must implement the GetHashCode
26     // and Equals methods (or they can rely on the default implementations
27     // inherited from Object if key equality is simply reference
28     // equality). Furthermore, the GetHashCode and Equals methods of
29     // a key object must produce the same results given the same parameters for the
30     // entire time the key is present in the hashtable. In practical terms, this
31     // means that key objects should be immutable, at least for the time they are
32     // used as keys in a hashtable.
33     //
34     // When entries are added to a hashtable, they are placed into
35     // buckets based on the hashcode of their keys. Subsequent lookups of
36     // keys will use the hashcode of the keys to only search a particular bucket,
37     // thus substantially reducing the number of key comparisons required to find
38     // an entry. A hashtable's maximum load factor, which can be specified
39     // when the hashtable is instantiated, determines the maximum ratio of
40     // hashtable entries to hashtable buckets. Smaller load factors cause faster
41     // average lookup times at the cost of increased memory consumption. The
42     // default maximum load factor of 1.0 generally provides the best balance
43     // between speed and size. As entries are added to a hashtable, the hashtable's
44     // actual load factor increases, and when the actual load factor reaches the
45     // maximum load factor value, the number of buckets in the hashtable is
46     // automatically increased by approximately a factor of two (to be precise, the
47     // number of hashtable buckets is increased to the smallest prime number that
48     // is larger than twice the current number of hashtable buckets).
49     //
50     // Each object provides their own hash function, accessed by calling
51     // GetHashCode().  However, one can write their own object
52     // implementing IEqualityComparer and pass it to a constructor on
53     // the Hashtable.  That hash function (and the equals method on the
54     // IEqualityComparer) would be used for all objects in the table.
55     //
56     [DebuggerTypeProxy(typeof(System.Collections.Hashtable.HashtableDebugView))]
57     [DebuggerDisplay("Count = {Count}")]
58     [Serializable]
59 #if !MONO
60     [System.Runtime.CompilerServices.TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
61 #endif
62     public class Hashtable : IDictionary, ISerializable, IDeserializationCallback, ICloneable
63     {
64         /*
65           This Hashtable uses double hashing.  There are hashsize buckets in the
66           table, and each bucket can contain 0 or 1 element.  We assign a bit to mark
67           whether there's been a collision when we inserted multiple elements
68           (ie, an inserted item was hashed at least a second time and we probed
69           this bucket, but it was already in use).  Using the collision bit, we
70           can terminate lookups & removes for elements that aren't in the hash
71           table more quickly.  We steal the most significant bit from the hash code
72           to store the collision bit.
73 
74           Our hash function is of the following form:
75 
76           h(key, n) = h1(key) + n*h2(key)
77 
78           where n is the number of times we've hit a collided bucket and rehashed
79           (on this particular lookup).  Here are our hash functions:
80 
81           h1(key) = GetHash(key);  // default implementation calls key.GetHashCode();
82           h2(key) = 1 + (((h1(key) >> 5) + 1) % (hashsize - 1));
83 
84           The h1 can return any number.  h2 must return a number between 1 and
85           hashsize - 1 that is relatively prime to hashsize (not a problem if
86           hashsize is prime).  (Knuth's Art of Computer Programming, Vol. 3, p. 528-9)
87           If this is true, then we are guaranteed to visit every bucket in exactly
88           hashsize probes, since the least common multiple of hashsize and h2(key)
89           will be hashsize * h2(key).  (This is the first number where adding h2 to
90           h1 mod hashsize will be 0 and we will search the same bucket twice).
91 
92           We previously used a different h2(key, n) that was not constant.  That is a
93           horrifically bad idea, unless you can prove that series will never produce
94           any identical numbers that overlap when you mod them by hashsize, for all
95           subranges from i to i+hashsize, for all i.  It's not worth investigating,
96           since there was no clear benefit from using that hash function, and it was
97           broken.
98 
99           For efficiency reasons, we've implemented this by storing h1 and h2 in a
100           temporary, and setting a variable called seed equal to h1.  We do a probe,
101           and if we collided, we simply add h2 to seed each time through the loop.
102 
103           A good test for h2() is to subclass Hashtable, provide your own implementation
104           of GetHash() that returns a constant, then add many items to the hash table.
105           Make sure Count equals the number of items you inserted.
106 
107           Note that when we remove an item from the hash table, we set the key
108           equal to buckets, if there was a collision in this bucket.  Otherwise
109           we'd either wipe out the collision bit, or we'd still have an item in
110           the hash table.
111 
112            --
113         */
114 
115         internal const Int32 HashPrime = 101;
116         private const Int32 InitialSize = 3;
117 
118         private const String LoadFactorName = "LoadFactor"; // Do not rename (binary serialization)
119         private const String VersionName = "Version"; // Do not rename (binary serialization)
120         private const String ComparerName = "Comparer"; // Do not rename (binary serialization)
121         private const String HashCodeProviderName = "HashCodeProvider"; // Do not rename (binary serialization)
122         private const String HashSizeName = "HashSize";  // Must save buckets.Length. Do not rename (binary serialization)
123         private const String KeysName = "Keys"; // Do not rename (binary serialization)
124         private const String ValuesName = "Values"; // Do not rename (binary serialization)
125         private const String KeyComparerName = "KeyComparer"; // Do not rename (binary serialization)
126 
127         // Deleted entries have their key set to buckets
128 
129         // The hash table data.
130         // This cannot be serialized
131         private struct bucket
132         {
133             public Object key;
134             public Object val;
135             public int hash_coll;   // Store hash code; sign bit means there was a collision.
136         }
137 
138         private bucket[] _buckets;
139 
140         // The total number of entries in the hash table.
141         private int _count;
142 
143         // The total number of collision bits set in the hashtable
144         private int _occupancy;
145 
146         private int _loadsize;
147         private float _loadFactor;
148 
149         private volatile int _version;
150         private volatile bool _isWriterInProgress;
151 
152         private ICollection _keys;
153         private ICollection _values;
154 
155         private IEqualityComparer _keycomparer;
156         private Object _syncRoot;
157 
158         [Obsolete("Please use EqualityComparer property.")]
159         protected IHashCodeProvider hcp
160         {
161             get
162             {
163                 if (_keycomparer is CompatibleComparer)
164                 {
165                     return ((CompatibleComparer)_keycomparer).HashCodeProvider;
166                 }
167                 else if (_keycomparer == null)
168                 {
169                     return null;
170                 }
171                 else
172                 {
173                     throw new ArgumentException(SR.Arg_CannotMixComparisonInfrastructure);
174                 }
175             }
176             set
177             {
178                 if (_keycomparer is CompatibleComparer)
179                 {
180                     CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer;
181                     _keycomparer = new CompatibleComparer(value, keyComparer.Comparer);
182                 }
183                 else if (_keycomparer == null)
184                 {
185                     _keycomparer = new CompatibleComparer(value, (IComparer)null);
186                 }
187                 else
188                 {
189                     throw new ArgumentException(SR.Arg_CannotMixComparisonInfrastructure);
190                 }
191             }
192         }
193 
194         [Obsolete("Please use KeyComparer properties.")]
195         protected IComparer comparer
196         {
197             get
198             {
199                 if (_keycomparer is CompatibleComparer)
200                 {
201                     return ((CompatibleComparer)_keycomparer).Comparer;
202                 }
203                 else if (_keycomparer == null)
204                 {
205                     return null;
206                 }
207                 else
208                 {
209                     throw new ArgumentException(SR.Arg_CannotMixComparisonInfrastructure);
210                 }
211             }
212             set
213             {
214                 if (_keycomparer is CompatibleComparer)
215                 {
216                     CompatibleComparer keyComparer = (CompatibleComparer)_keycomparer;
217                     _keycomparer = new CompatibleComparer(keyComparer.HashCodeProvider, value);
218                 }
219                 else if (_keycomparer == null)
220                 {
221                     _keycomparer = new CompatibleComparer((IHashCodeProvider)null, value);
222                 }
223                 else
224                 {
225                     throw new ArgumentException(SR.Arg_CannotMixComparisonInfrastructure);
226                 }
227             }
228         }
229 
230 
231         protected IEqualityComparer EqualityComparer
232         {
233             get
234             {
235                 return _keycomparer;
236             }
237         }
238 
239         // Note: this constructor is a bogus constructor that does nothing
240         // and is for use only with SyncHashtable.
Hashtable(bool trash)241         internal Hashtable(bool trash)
242         {
243         }
244 
245         // Constructs a new hashtable. The hashtable is created with an initial
246         // capacity of zero and a load factor of 1.0.
Hashtable()247         public Hashtable() : this(0, 1.0f)
248         {
249         }
250 
251         // Constructs a new hashtable with the given initial capacity and a load
252         // factor of 1.0. The capacity argument serves as an indication of
253         // the number of entries the hashtable will contain. When this number (or
254         // an approximation) is known, specifying it in the constructor can
255         // eliminate a number of resizing operations that would otherwise be
256         // performed when elements are added to the hashtable.
257         //
Hashtable(int capacity)258         public Hashtable(int capacity) : this(capacity, 1.0f)
259         {
260         }
261 
262         // Constructs a new hashtable with the given initial capacity and load
263         // factor. The capacity argument serves as an indication of the
264         // number of entries the hashtable will contain. When this number (or an
265         // approximation) is known, specifying it in the constructor can eliminate
266         // a number of resizing operations that would otherwise be performed when
267         // elements are added to the hashtable. The loadFactor argument
268         // indicates the maximum ratio of hashtable entries to hashtable buckets.
269         // Smaller load factors cause faster average lookup times at the cost of
270         // increased memory consumption. A load factor of 1.0 generally provides
271         // the best balance between speed and size.
272         //
Hashtable(int capacity, float loadFactor)273         public Hashtable(int capacity, float loadFactor)
274         {
275             if (capacity < 0)
276                 throw new ArgumentOutOfRangeException(nameof(capacity), SR.ArgumentOutOfRange_NeedNonNegNum);
277             if (!(loadFactor >= 0.1f && loadFactor <= 1.0f))
278                 throw new ArgumentOutOfRangeException(nameof(loadFactor), SR.Format(SR.ArgumentOutOfRange_HashtableLoadFactor, .1, 1.0));
279 
280             // Based on perf work, .72 is the optimal load factor for this table.
281             _loadFactor = 0.72f * loadFactor;
282 
283             double rawsize = capacity / _loadFactor;
284             if (rawsize > Int32.MaxValue)
285                 throw new ArgumentException(SR.Arg_HTCapacityOverflow, nameof(capacity));
286 
287             // Avoid awfully small sizes
288             int hashsize = (rawsize > InitialSize) ? HashHelpers.GetPrime((int)rawsize) : InitialSize;
289             _buckets = new bucket[hashsize];
290 
291             _loadsize = (int)(_loadFactor * hashsize);
292             _isWriterInProgress = false;
293             // Based on the current algorithm, loadsize must be less than hashsize.
294             Debug.Assert(_loadsize < hashsize, "Invalid hashtable loadsize!");
295         }
296 
Hashtable(int capacity, float loadFactor, IEqualityComparer equalityComparer)297         public Hashtable(int capacity, float loadFactor, IEqualityComparer equalityComparer) : this(capacity, loadFactor)
298         {
299             _keycomparer = equalityComparer;
300         }
301 
302         [Obsolete("Please use Hashtable(IEqualityComparer) instead.")]
Hashtable(IHashCodeProvider hcp, IComparer comparer)303         public Hashtable(IHashCodeProvider hcp, IComparer comparer)
304             : this(0, 1.0f, hcp, comparer)
305         {
306         }
307 
Hashtable(IEqualityComparer equalityComparer)308         public Hashtable(IEqualityComparer equalityComparer) : this(0, 1.0f, equalityComparer)
309         {
310         }
311 
312         [Obsolete("Please use Hashtable(int, IEqualityComparer) instead.")]
Hashtable(int capacity, IHashCodeProvider hcp, IComparer comparer)313         public Hashtable(int capacity, IHashCodeProvider hcp, IComparer comparer)
314             : this(capacity, 1.0f, hcp, comparer)
315         {
316         }
317 
Hashtable(int capacity, IEqualityComparer equalityComparer)318         public Hashtable(int capacity, IEqualityComparer equalityComparer)
319             : this(capacity, 1.0f, equalityComparer)
320         {
321         }
322 
323         // Constructs a new hashtable containing a copy of the entries in the given
324         // dictionary. The hashtable is created with a load factor of 1.0.
325         //
Hashtable(IDictionary d)326         public Hashtable(IDictionary d) : this(d, 1.0f)
327         {
328         }
329 
330         // Constructs a new hashtable containing a copy of the entries in the given
331         // dictionary. The hashtable is created with the given load factor.
332         //
Hashtable(IDictionary d, float loadFactor)333         public Hashtable(IDictionary d, float loadFactor)
334             : this(d, loadFactor, (IEqualityComparer)null)
335         {
336         }
337 
338         [Obsolete("Please use Hashtable(IDictionary, IEqualityComparer) instead.")]
Hashtable(IDictionary d, IHashCodeProvider hcp, IComparer comparer)339         public Hashtable(IDictionary d, IHashCodeProvider hcp, IComparer comparer)
340             : this(d, 1.0f, hcp, comparer)
341         {
342         }
343 
Hashtable(IDictionary d, IEqualityComparer equalityComparer)344         public Hashtable(IDictionary d, IEqualityComparer equalityComparer)
345             : this(d, 1.0f, equalityComparer)
346         {
347         }
348 
349         [Obsolete("Please use Hashtable(int, float, IEqualityComparer) instead.")]
Hashtable(int capacity, float loadFactor, IHashCodeProvider hcp, IComparer comparer)350         public Hashtable(int capacity, float loadFactor, IHashCodeProvider hcp, IComparer comparer)
351             : this(capacity, loadFactor)
352         {
353             if (hcp != null || comparer != null)
354             {
355                 _keycomparer = new CompatibleComparer(hcp, comparer);
356             }
357         }
358 
359         [Obsolete("Please use Hashtable(IDictionary, float, IEqualityComparer) instead.")]
Hashtable(IDictionary d, float loadFactor, IHashCodeProvider hcp, IComparer comparer)360         public Hashtable(IDictionary d, float loadFactor, IHashCodeProvider hcp, IComparer comparer)
361             : this((d != null ? d.Count : 0), loadFactor, hcp, comparer)
362         {
363             if (d == null)
364                 throw new ArgumentNullException(nameof(d), SR.ArgumentNull_Dictionary);
365 
366             IDictionaryEnumerator e = d.GetEnumerator();
367             while (e.MoveNext()) Add(e.Key, e.Value);
368         }
369 
Hashtable(IDictionary d, float loadFactor, IEqualityComparer equalityComparer)370         public Hashtable(IDictionary d, float loadFactor, IEqualityComparer equalityComparer)
371             : this((d != null ? d.Count : 0), loadFactor, equalityComparer)
372         {
373             if (d == null)
374                 throw new ArgumentNullException(nameof(d), SR.ArgumentNull_Dictionary);
375 
376             IDictionaryEnumerator e = d.GetEnumerator();
377             while (e.MoveNext()) Add(e.Key, e.Value);
378         }
379 
Hashtable(SerializationInfo info, StreamingContext context)380         protected Hashtable(SerializationInfo info, StreamingContext context)
381         {
382             //We can't do anything with the keys and values until the entire graph has been deserialized
383             //and we have a reasonable estimate that GetHashCode is not going to fail.  For the time being,
384             //we'll just cache this.  The graph is not valid until OnDeserialization has been called.
385             HashHelpers.SerializationInfoTable.Add(this, info);
386         }
387 
388         // ?InitHash? is basically an implementation of classic DoubleHashing (see http://en.wikipedia.org/wiki/Double_hashing)
389         //
390         // 1) The only ?correctness? requirement is that the ?increment? used to probe
391         //    a. Be non-zero
392         //    b. Be relatively prime to the table size ?hashSize?. (This is needed to insure you probe all entries in the table before you ?wrap? and visit entries already probed)
393         // 2) Because we choose table sizes to be primes, we just need to insure that the increment is 0 < incr < hashSize
394         //
395         // Thus this function would work: Incr = 1 + (seed % (hashSize-1))
396         //
397         // While this works well for ?uniformly distributed? keys, in practice, non-uniformity is common.
398         // In particular in practice we can see ?mostly sequential? where you get long clusters of keys that ?pack?.
399         // To avoid bad behavior you want it to be the case that the increment is ?large? even for ?small? values (because small
400         // values tend to happen more in practice). Thus we multiply ?seed? by a number that will make these small values
401         // bigger (and not hurt large values). We picked HashPrime (101) because it was prime, and if ?hashSize-1? is not a multiple of HashPrime
402         // (enforced in GetPrime), then incr has the potential of being every value from 1 to hashSize-1. The choice was largely arbitrary.
403         //
404         // Computes the hash function:  H(key, i) = h1(key) + i*h2(key, hashSize).
405         // The out parameter seed is h1(key), while the out parameter
406         // incr is h2(key, hashSize).  Callers of this function should
407         // add incr each time through a loop.
InitHash(Object key, int hashsize, out uint seed, out uint incr)408         private uint InitHash(Object key, int hashsize, out uint seed, out uint incr)
409         {
410             unchecked
411             {
412                 // Hashcode must be positive.  Also, we must not use the sign bit, since
413                 // that is used for the collision bit.
414                 uint hashcode = (uint)GetHash(key) & 0x7FFFFFFF;
415                 seed = (uint)hashcode;
416                 // Restriction: incr MUST be between 1 and hashsize - 1, inclusive for
417                 // the modular arithmetic to work correctly.  This guarantees you'll
418                 // visit every bucket in the table exactly once within hashsize
419                 // iterations.  Violate this and it'll cause obscure bugs forever.
420                 // If you change this calculation for h2(key), update putEntry too!
421                 incr = (uint)(1 + ((seed * HashPrime) % ((uint)hashsize - 1)));
422                 return hashcode;
423             }
424         }
425 
426         // Adds an entry with the given key and value to this hashtable. An
427         // ArgumentException is thrown if the key is null or if the key is already
428         // present in the hashtable.
429         //
Add(Object key, Object value)430         public virtual void Add(Object key, Object value)
431         {
432             Insert(key, value, true);
433         }
434 
435         // Removes all entries from this hashtable.
Clear()436         public virtual void Clear()
437         {
438             Debug.Assert(!_isWriterInProgress, "Race condition detected in usages of Hashtable - multiple threads appear to be writing to a Hashtable instance simultaneously!  Don't do that - use Hashtable.Synchronized.");
439 
440             if (_count == 0 && _occupancy == 0)
441                 return;
442 
443             _isWriterInProgress = true;
444             for (int i = 0; i < _buckets.Length; i++)
445             {
446                 _buckets[i].hash_coll = 0;
447                 _buckets[i].key = null;
448                 _buckets[i].val = null;
449             }
450 
451             _count = 0;
452             _occupancy = 0;
453             UpdateVersion();
454             _isWriterInProgress = false;
455         }
456 
457         // Clone returns a virtually identical copy of this hash table.  This does
458         // a shallow copy - the Objects in the table aren't cloned, only the references
459         // to those Objects.
Clone()460         public virtual Object Clone()
461         {
462             bucket[] lbuckets = _buckets;
463             Hashtable ht = new Hashtable(_count, _keycomparer);
464             ht._version = _version;
465             ht._loadFactor = _loadFactor;
466             ht._count = 0;
467 
468             int bucket = lbuckets.Length;
469             while (bucket > 0)
470             {
471                 bucket--;
472                 Object keyv = lbuckets[bucket].key;
473                 if ((keyv != null) && (keyv != lbuckets))
474                 {
475                     ht[keyv] = lbuckets[bucket].val;
476                 }
477             }
478 
479             return ht;
480         }
481 
482         // Checks if this hashtable contains the given key.
Contains(Object key)483         public virtual bool Contains(Object key)
484         {
485             return ContainsKey(key);
486         }
487 
488         // Checks if this hashtable contains an entry with the given key.  This is
489         // an O(1) operation.
490         //
ContainsKey(Object key)491         public virtual bool ContainsKey(Object key)
492         {
493             if (key == null)
494             {
495                 throw new ArgumentNullException(nameof(key), SR.ArgumentNull_Key);
496             }
497 
498             uint seed;
499             uint incr;
500             // Take a snapshot of buckets, in case another thread resizes table
501             bucket[] lbuckets = _buckets;
502             uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr);
503             int ntry = 0;
504 
505             bucket b;
506             int bucketNumber = (int)(seed % (uint)lbuckets.Length);
507             do
508             {
509                 b = lbuckets[bucketNumber];
510                 if (b.key == null)
511                 {
512                     return false;
513                 }
514                 if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
515                     KeyEquals(b.key, key))
516                     return true;
517                 bucketNumber = (int)(((long)bucketNumber + incr) % (uint)lbuckets.Length);
518             } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
519             return false;
520         }
521 
522 
523 
524         // Checks if this hashtable contains an entry with the given value. The
525         // values of the entries of the hashtable are compared to the given value
526         // using the Object.Equals method. This method performs a linear
527         // search and is thus be substantially slower than the ContainsKey
528         // method.
529         //
ContainsValue(Object value)530         public virtual bool ContainsValue(Object value)
531         {
532             if (value == null)
533             {
534                 for (int i = _buckets.Length; --i >= 0;)
535                 {
536                     if (_buckets[i].key != null && _buckets[i].key != _buckets && _buckets[i].val == null)
537                         return true;
538                 }
539             }
540             else
541             {
542                 for (int i = _buckets.Length; --i >= 0;)
543                 {
544                     Object val = _buckets[i].val;
545                     if (val != null && val.Equals(value)) return true;
546                 }
547             }
548             return false;
549         }
550 
551         // Copies the keys of this hashtable to a given array starting at a given
552         // index. This method is used by the implementation of the CopyTo method in
553         // the KeyCollection class.
CopyKeys(Array array, int arrayIndex)554         private void CopyKeys(Array array, int arrayIndex)
555         {
556             Debug.Assert(array != null);
557             Debug.Assert(array.Rank == 1);
558 
559             bucket[] lbuckets = _buckets;
560             for (int i = lbuckets.Length; --i >= 0;)
561             {
562                 Object keyv = lbuckets[i].key;
563                 if ((keyv != null) && (keyv != _buckets))
564                 {
565                     array.SetValue(keyv, arrayIndex++);
566                 }
567             }
568         }
569 
570         // Copies the keys of this hashtable to a given array starting at a given
571         // index. This method is used by the implementation of the CopyTo method in
572         // the KeyCollection class.
CopyEntries(Array array, int arrayIndex)573         private void CopyEntries(Array array, int arrayIndex)
574         {
575             Debug.Assert(array != null);
576             Debug.Assert(array.Rank == 1);
577 
578             bucket[] lbuckets = _buckets;
579             for (int i = lbuckets.Length; --i >= 0;)
580             {
581                 Object keyv = lbuckets[i].key;
582                 if ((keyv != null) && (keyv != _buckets))
583                 {
584                     DictionaryEntry entry = new DictionaryEntry(keyv, lbuckets[i].val);
585                     array.SetValue(entry, arrayIndex++);
586                 }
587             }
588         }
589 
590         // Copies the values in this hash table to an array at
591         // a given index.  Note that this only copies values, and not keys.
CopyTo(Array array, int arrayIndex)592         public virtual void CopyTo(Array array, int arrayIndex)
593         {
594             if (array == null)
595                 throw new ArgumentNullException(nameof(array), SR.ArgumentNull_Array);
596             if (array.Rank != 1)
597                 throw new ArgumentException(SR.Arg_RankMultiDimNotSupported, nameof(array));
598             if (arrayIndex < 0)
599                 throw new ArgumentOutOfRangeException(nameof(arrayIndex), SR.ArgumentOutOfRange_NeedNonNegNum);
600             if (array.Length - arrayIndex < Count)
601                 throw new ArgumentException(SR.Arg_ArrayPlusOffTooSmall);
602 
603             CopyEntries(array, arrayIndex);
604         }
605 
606         // Copies the values in this Hashtable to an KeyValuePairs array.
607         // KeyValuePairs is different from Dictionary Entry in that it has special
608         // debugger attributes on its fields.
609 
ToKeyValuePairsArray()610         internal virtual KeyValuePairs[] ToKeyValuePairsArray()
611         {
612             KeyValuePairs[] array = new KeyValuePairs[_count];
613             int index = 0;
614             bucket[] lbuckets = _buckets;
615             for (int i = lbuckets.Length; --i >= 0;)
616             {
617                 Object keyv = lbuckets[i].key;
618                 if ((keyv != null) && (keyv != _buckets))
619                 {
620                     array[index++] = new KeyValuePairs(keyv, lbuckets[i].val);
621                 }
622             }
623 
624             return array;
625         }
626 
627 
628         // Copies the values of this hashtable to a given array starting at a given
629         // index. This method is used by the implementation of the CopyTo method in
630         // the ValueCollection class.
CopyValues(Array array, int arrayIndex)631         private void CopyValues(Array array, int arrayIndex)
632         {
633             Debug.Assert(array != null);
634             Debug.Assert(array.Rank == 1);
635 
636             bucket[] lbuckets = _buckets;
637             for (int i = lbuckets.Length; --i >= 0;)
638             {
639                 Object keyv = lbuckets[i].key;
640                 if ((keyv != null) && (keyv != _buckets))
641                 {
642                     array.SetValue(lbuckets[i].val, arrayIndex++);
643                 }
644             }
645         }
646 
647         // Returns the value associated with the given key. If an entry with the
648         // given key is not found, the returned value is null.
649         //
650         public virtual Object this[Object key]
651         {
652             get
653             {
654                 if (key == null)
655                 {
656                     throw new ArgumentNullException(nameof(key), SR.ArgumentNull_Key);
657                 }
658 
659                 uint seed;
660                 uint incr;
661 
662 
663                 // Take a snapshot of buckets, in case another thread does a resize
664                 bucket[] lbuckets = _buckets;
665                 uint hashcode = InitHash(key, lbuckets.Length, out seed, out incr);
666                 int ntry = 0;
667 
668                 bucket b;
669                 int bucketNumber = (int)(seed % (uint)lbuckets.Length);
670                 do
671                 {
672                     int currentversion;
673 
674                     //     A read operation on hashtable has three steps:
675                     //        (1) calculate the hash and find the slot number.
676                     //        (2) compare the hashcode, if equal, go to step 3. Otherwise end.
677                     //        (3) compare the key, if equal, go to step 4. Otherwise end.
678                     //        (4) return the value contained in the bucket.
679                     //     After step 3 and before step 4. A writer can kick in a remove the old item and add a new one
680                     //     in the same bucket. So in the reader we need to check if the hash table is modified during above steps.
681                     //
682                     // Writers (Insert, Remove, Clear) will set 'isWriterInProgress' flag before it starts modifying
683                     // the hashtable and will ckear the flag when it is done.  When the flag is cleared, the 'version'
684                     // will be increased.  We will repeat the reading if a writer is in progress or done with the modification
685                     // during the read.
686                     //
687                     // Our memory model guarantee if we pick up the change in bucket from another processor,
688                     // we will see the 'isWriterProgress' flag to be true or 'version' is changed in the reader.
689                     //
690                     SpinWait spin = new SpinWait();
691                     while (true)
692                     {
693                         // this is volatile read, following memory accesses can not be moved ahead of it.
694                         currentversion = _version;
695                         b = lbuckets[bucketNumber];
696 
697                         if (!_isWriterInProgress && (currentversion == _version))
698                             break;
699 
700                         spin.SpinOnce();
701                     }
702 
703                     if (b.key == null)
704                     {
705                         return null;
706                     }
707                     if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
708                         KeyEquals(b.key, key))
709                         return b.val;
710                     bucketNumber = (int)(((long)bucketNumber + incr) % (uint)lbuckets.Length);
711                 } while (b.hash_coll < 0 && ++ntry < lbuckets.Length);
712                 return null;
713             }
714 
715             set
716             {
717                 Insert(key, value, false);
718             }
719         }
720 
721         // Increases the bucket count of this hashtable. This method is called from
722         // the Insert method when the actual load factor of the hashtable reaches
723         // the upper limit specified when the hashtable was constructed. The number
724         // of buckets in the hashtable is increased to the smallest prime number
725         // that is larger than twice the current number of buckets, and the entries
726         // in the hashtable are redistributed into the new buckets using the cached
727         // hashcodes.
expand()728         private void expand()
729         {
730             int rawsize = HashHelpers.ExpandPrime(_buckets.Length);
731             rehash(rawsize);
732         }
733 
734         // We occasionally need to rehash the table to clean up the collision bits.
rehash()735         private void rehash()
736         {
737             rehash(_buckets.Length);
738         }
739 
UpdateVersion()740         private void UpdateVersion()
741         {
742             // Version might become negative when version is Int32.MaxValue, but the oddity will be still be correct.
743             // So we don't need to special case this.
744             _version++;
745         }
746 
rehash(int newsize)747         private void rehash(int newsize)
748         {
749             // reset occupancy
750             _occupancy = 0;
751 
752             // Don't replace any internal state until we've finished adding to the
753             // new bucket[].  This serves two purposes:
754             //   1) Allow concurrent readers to see valid hashtable contents
755             //      at all times
756             //   2) Protect against an OutOfMemoryException while allocating this
757             //      new bucket[].
758             bucket[] newBuckets = new bucket[newsize];
759 
760             // rehash table into new buckets
761             int nb;
762             for (nb = 0; nb < _buckets.Length; nb++)
763             {
764                 bucket oldb = _buckets[nb];
765                 if ((oldb.key != null) && (oldb.key != _buckets))
766                 {
767                     int hashcode = oldb.hash_coll & 0x7FFFFFFF;
768                     putEntry(newBuckets, oldb.key, oldb.val, hashcode);
769                 }
770             }
771 
772             // New bucket[] is good to go - replace buckets and other internal state.
773             _isWriterInProgress = true;
774             _buckets = newBuckets;
775             _loadsize = (int)(_loadFactor * newsize);
776             UpdateVersion();
777             _isWriterInProgress = false;
778             // minimum size of hashtable is 3 now and maximum loadFactor is 0.72 now.
779             Debug.Assert(_loadsize < newsize, "Our current implementation means this is not possible.");
780         }
781 
782         // Returns an enumerator for this hashtable.
783         // If modifications made to the hashtable while an enumeration is
784         // in progress, the MoveNext and Current methods of the
785         // enumerator will throw an exception.
786         //
IEnumerable.GetEnumerator()787         IEnumerator IEnumerable.GetEnumerator()
788         {
789             return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
790         }
791 
792         // Returns a dictionary enumerator for this hashtable.
793         // If modifications made to the hashtable while an enumeration is
794         // in progress, the MoveNext and Current methods of the
795         // enumerator will throw an exception.
796         //
GetEnumerator()797         public virtual IDictionaryEnumerator GetEnumerator()
798         {
799             return new HashtableEnumerator(this, HashtableEnumerator.DictEntry);
800         }
801 
802         // Internal method to get the hash code for an Object.  This will call
803         // GetHashCode() on each object if you haven't provided an IHashCodeProvider
804         // instance.  Otherwise, it calls hcp.GetHashCode(obj).
GetHash(Object key)805         protected virtual int GetHash(Object key)
806         {
807             if (_keycomparer != null)
808                 return _keycomparer.GetHashCode(key);
809             return key.GetHashCode();
810         }
811 
812         // Is this Hashtable read-only?
813         public virtual bool IsReadOnly
814         {
815             get { return false; }
816         }
817 
818         public virtual bool IsFixedSize
819         {
820             get { return false; }
821         }
822 
823         // Is this Hashtable synchronized?  See SyncRoot property
824         public virtual bool IsSynchronized
825         {
826             get { return false; }
827         }
828 
829         // Internal method to compare two keys.  If you have provided an IComparer
830         // instance in the constructor, this method will call comparer.Compare(item, key).
831         // Otherwise, it will call item.Equals(key).
832         //
KeyEquals(Object item, Object key)833         protected virtual bool KeyEquals(Object item, Object key)
834         {
835             Debug.Assert(key != null, "key can't be null here!");
836             if (Object.ReferenceEquals(_buckets, item))
837             {
838                 return false;
839             }
840 
841             if (Object.ReferenceEquals(item, key))
842                 return true;
843 
844             if (_keycomparer != null)
845                 return _keycomparer.Equals(item, key);
846             return item == null ? false : item.Equals(key);
847         }
848 
849         // Returns a collection representing the keys of this hashtable. The order
850         // in which the returned collection represents the keys is unspecified, but
851         // it is guaranteed to be          buckets = newBuckets; the same order in which a collection returned by
852         // GetValues represents the values of the hashtable.
853         //
854         // The returned collection is live in the sense that any changes
855         // to the hash table are reflected in this collection.  It is not
856         // a static copy of all the keys in the hash table.
857         //
858         public virtual ICollection Keys
859         {
860             get
861             {
862                 if (_keys == null) _keys = new KeyCollection(this);
863                 return _keys;
864             }
865         }
866 
867         // Returns a collection representing the values of this hashtable. The
868         // order in which the returned collection represents the values is
869         // unspecified, but it is guaranteed to be the same order in which a
870         // collection returned by GetKeys represents the keys of the
871         // hashtable.
872         //
873         // The returned collection is live in the sense that any changes
874         // to the hash table are reflected in this collection.  It is not
875         // a static copy of all the keys in the hash table.
876         //
877         public virtual ICollection Values
878         {
879             get
880             {
881                 if (_values == null) _values = new ValueCollection(this);
882                 return _values;
883             }
884         }
885 
886         // Inserts an entry into this hashtable. This method is called from the Set
887         // and Add methods. If the add parameter is true and the given key already
888         // exists in the hashtable, an exception is thrown.
Insert(Object key, Object nvalue, bool add)889         private void Insert(Object key, Object nvalue, bool add)
890         {
891             if (key == null)
892             {
893                 throw new ArgumentNullException(nameof(key), SR.ArgumentNull_Key);
894             }
895 
896             if (_count >= _loadsize)
897             {
898                 expand();
899             }
900             else if (_occupancy > _loadsize && _count > 100)
901             {
902                 rehash();
903             }
904 
905             uint seed;
906             uint incr;
907             // Assume we only have one thread writing concurrently.  Modify
908             // buckets to contain new data, as long as we insert in the right order.
909             uint hashcode = InitHash(key, _buckets.Length, out seed, out incr);
910             int ntry = 0;
911             int emptySlotNumber = -1; // We use the empty slot number to cache the first empty slot. We chose to reuse slots
912             // create by remove that have the collision bit set over using up new slots.
913             int bucketNumber = (int)(seed % (uint)_buckets.Length);
914             do
915             {
916                 // Set emptySlot number to current bucket if it is the first available bucket that we have seen
917                 // that once contained an entry and also has had a collision.
918                 // We need to search this entire collision chain because we have to ensure that there are no
919                 // duplicate entries in the table.
920                 if (emptySlotNumber == -1 && (_buckets[bucketNumber].key == _buckets) && (_buckets[bucketNumber].hash_coll < 0))//(((buckets[bucketNumber].hash_coll & unchecked(0x80000000))!=0)))
921                     emptySlotNumber = bucketNumber;
922 
923                 // Insert the key/value pair into this bucket if this bucket is empty and has never contained an entry
924                 // OR
925                 // This bucket once contained an entry but there has never been a collision
926                 if ((_buckets[bucketNumber].key == null) ||
927                     (_buckets[bucketNumber].key == _buckets && ((_buckets[bucketNumber].hash_coll & unchecked(0x80000000)) == 0)))
928                 {
929                     // If we have found an available bucket that has never had a collision, but we've seen an available
930                     // bucket in the past that has the collision bit set, use the previous bucket instead
931                     if (emptySlotNumber != -1) // Reuse slot
932                         bucketNumber = emptySlotNumber;
933 
934                     // We pretty much have to insert in this order.  Don't set hash
935                     // code until the value & key are set appropriately.
936                     _isWriterInProgress = true;
937                     _buckets[bucketNumber].val = nvalue;
938                     _buckets[bucketNumber].key = key;
939                     _buckets[bucketNumber].hash_coll |= (int)hashcode;
940                     _count++;
941                     UpdateVersion();
942                     _isWriterInProgress = false;
943 
944                     return;
945                 }
946 
947                 // The current bucket is in use
948                 // OR
949                 // it is available, but has had the collision bit set and we have already found an available bucket
950                 if (((_buckets[bucketNumber].hash_coll & 0x7FFFFFFF) == hashcode) &&
951                     KeyEquals(_buckets[bucketNumber].key, key))
952                 {
953                     if (add)
954                     {
955                         throw new ArgumentException(SR.Format(SR.Argument_AddingDuplicate__, _buckets[bucketNumber].key, key));
956                     }
957                     _isWriterInProgress = true;
958                     _buckets[bucketNumber].val = nvalue;
959                     UpdateVersion();
960                     _isWriterInProgress = false;
961 
962                     return;
963                 }
964 
965                 // The current bucket is full, and we have therefore collided.  We need to set the collision bit
966                 // unless we have remembered an available slot previously.
967                 if (emptySlotNumber == -1)
968                 {// We don't need to set the collision bit here since we already have an empty slot
969                     if (_buckets[bucketNumber].hash_coll >= 0)
970                     {
971                         _buckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
972                         _occupancy++;
973                     }
974                 }
975 
976                 bucketNumber = (int)(((long)bucketNumber + incr) % (uint)_buckets.Length);
977             } while (++ntry < _buckets.Length);
978 
979             // This code is here if and only if there were no buckets without a collision bit set in the entire table
980             if (emptySlotNumber != -1)
981             {
982                 // We pretty much have to insert in this order.  Don't set hash
983                 // code until the value & key are set appropriately.
984                 _isWriterInProgress = true;
985                 _buckets[emptySlotNumber].val = nvalue;
986                 _buckets[emptySlotNumber].key = key;
987                 _buckets[emptySlotNumber].hash_coll |= (int)hashcode;
988                 _count++;
989                 UpdateVersion();
990                 _isWriterInProgress = false;
991 
992                 return;
993             }
994 
995             // If you see this assert, make sure load factor & count are reasonable.
996             // Then verify that our double hash function (h2, described at top of file)
997             // meets the requirements described above. You should never see this assert.
998             Debug.Fail("hash table insert failed!  Load factor too high, or our double hashing function is incorrect.");
999             throw new InvalidOperationException(SR.InvalidOperation_HashInsertFailed);
1000         }
1001 
putEntry(bucket[] newBuckets, Object key, Object nvalue, int hashcode)1002         private void putEntry(bucket[] newBuckets, Object key, Object nvalue, int hashcode)
1003         {
1004             Debug.Assert(hashcode >= 0, "hashcode >= 0");  // make sure collision bit (sign bit) wasn't set.
1005 
1006             uint seed = (uint)hashcode;
1007             uint incr = unchecked((uint)(1 + ((seed * HashPrime) % ((uint)newBuckets.Length - 1))));
1008             int bucketNumber = (int)(seed % (uint)newBuckets.Length);
1009             do
1010             {
1011                 if ((newBuckets[bucketNumber].key == null) || (newBuckets[bucketNumber].key == _buckets))
1012                 {
1013                     newBuckets[bucketNumber].val = nvalue;
1014                     newBuckets[bucketNumber].key = key;
1015                     newBuckets[bucketNumber].hash_coll |= hashcode;
1016                     return;
1017                 }
1018 
1019                 if (newBuckets[bucketNumber].hash_coll >= 0)
1020                 {
1021                     newBuckets[bucketNumber].hash_coll |= unchecked((int)0x80000000);
1022                     _occupancy++;
1023                 }
1024                 bucketNumber = (int)(((long)bucketNumber + incr) % (uint)newBuckets.Length);
1025             } while (true);
1026         }
1027 
1028         // Removes an entry from this hashtable. If an entry with the specified
1029         // key exists in the hashtable, it is removed. An ArgumentException is
1030         // thrown if the key is null.
1031         //
Remove(Object key)1032         public virtual void Remove(Object key)
1033         {
1034             if (key == null)
1035             {
1036                 throw new ArgumentNullException(nameof(key), SR.ArgumentNull_Key);
1037             }
1038 
1039             Debug.Assert(!_isWriterInProgress, "Race condition detected in usages of Hashtable - multiple threads appear to be writing to a Hashtable instance simultaneously!  Don't do that - use Hashtable.Synchronized.");
1040 
1041             uint seed;
1042             uint incr;
1043             // Assuming only one concurrent writer, write directly into buckets.
1044             uint hashcode = InitHash(key, _buckets.Length, out seed, out incr);
1045             int ntry = 0;
1046 
1047             bucket b;
1048             int bn = (int)(seed % (uint)_buckets.Length);  // bucketNumber
1049             do
1050             {
1051                 b = _buckets[bn];
1052                 if (((b.hash_coll & 0x7FFFFFFF) == hashcode) &&
1053                     KeyEquals(b.key, key))
1054                 {
1055                     _isWriterInProgress = true;
1056                     // Clear hash_coll field, then key, then value
1057                     _buckets[bn].hash_coll &= unchecked((int)0x80000000);
1058                     if (_buckets[bn].hash_coll != 0)
1059                     {
1060                         _buckets[bn].key = _buckets;
1061                     }
1062                     else
1063                     {
1064                         _buckets[bn].key = null;
1065                     }
1066                     _buckets[bn].val = null;  // Free object references sooner & simplify ContainsValue.
1067                     _count--;
1068                     UpdateVersion();
1069                     _isWriterInProgress = false;
1070                     return;
1071                 }
1072                 bn = (int)(((long)bn + incr) % (uint)_buckets.Length);
1073             } while (b.hash_coll < 0 && ++ntry < _buckets.Length);
1074         }
1075 
1076         // Returns the object to synchronize on for this hash table.
1077         public virtual Object SyncRoot
1078         {
1079             get
1080             {
1081                 if (_syncRoot == null)
1082                 {
1083                     System.Threading.Interlocked.CompareExchange<Object>(ref _syncRoot, new Object(), null);
1084                 }
1085                 return _syncRoot;
1086             }
1087         }
1088 
1089         // Returns the number of associations in this hashtable.
1090         //
1091         public virtual int Count
1092         {
1093             get { return _count; }
1094         }
1095 
1096         // Returns a thread-safe wrapper for a Hashtable.
1097         //
Synchronized(Hashtable table)1098         public static Hashtable Synchronized(Hashtable table)
1099         {
1100             if (table == null)
1101                 throw new ArgumentNullException(nameof(table));
1102             return new SyncHashtable(table);
1103         }
1104 
GetObjectData(SerializationInfo info, StreamingContext context)1105         public virtual void GetObjectData(SerializationInfo info, StreamingContext context)
1106         {
1107             if (info == null)
1108             {
1109                 throw new ArgumentNullException(nameof(info));
1110             }
1111 
1112             // This is imperfect - it only works well if all other writes are
1113             // also using our synchronized wrapper.  But it's still a good idea.
1114             lock (SyncRoot)
1115             {
1116                 // This method hasn't been fully tweaked to be safe for a concurrent writer.
1117                 int oldVersion = _version;
1118                 info.AddValue(LoadFactorName, _loadFactor);
1119                 info.AddValue(VersionName, _version);
1120 
1121                 //
1122                 // We need to maintain serialization compatibility with Everett and RTM.
1123                 // If the comparer is null or a compatible comparer, serialize Hashtable
1124                 // in a format that can be deserialized on Everett and RTM.
1125                 //
1126                 // Also, if the Hashtable is using randomized hashing, serialize the old
1127                 // view of the _keycomparer so perevious frameworks don't see the new types
1128 #pragma warning disable 618
1129                 IEqualityComparer keyComparerForSerilization = _keycomparer;
1130 
1131                 if (keyComparerForSerilization == null)
1132                 {
1133                     info.AddValue(ComparerName, null, typeof(IComparer));
1134                     info.AddValue(HashCodeProviderName, null, typeof(IHashCodeProvider));
1135                 }
1136                 else if (keyComparerForSerilization is CompatibleComparer)
1137                 {
1138                     CompatibleComparer c = keyComparerForSerilization as CompatibleComparer;
1139                     info.AddValue(ComparerName, c.Comparer, typeof(IComparer));
1140                     info.AddValue(HashCodeProviderName, c.HashCodeProvider, typeof(IHashCodeProvider));
1141                 }
1142                 else
1143                 {
1144                     info.AddValue(KeyComparerName, keyComparerForSerilization, typeof(IEqualityComparer));
1145                 }
1146 #pragma warning restore 618
1147 
1148                 info.AddValue(HashSizeName, _buckets.Length); //This is the length of the bucket array.
1149                 Object[] serKeys = new Object[_count];
1150                 Object[] serValues = new Object[_count];
1151                 CopyKeys(serKeys, 0);
1152                 CopyValues(serValues, 0);
1153                 info.AddValue(KeysName, serKeys, typeof(Object[]));
1154                 info.AddValue(ValuesName, serValues, typeof(Object[]));
1155 
1156                 // Explicitly check to see if anyone changed the Hashtable while we
1157                 // were serializing it.  That's a race in their code.
1158                 if (_version != oldVersion)
1159                 {
1160                     throw new InvalidOperationException(SR.InvalidOperation_EnumFailedVersion);
1161                 }
1162             }
1163         }
1164 
1165         //
1166         // DeserializationEvent Listener
1167         //
OnDeserialization(Object sender)1168         public virtual void OnDeserialization(Object sender)
1169         {
1170             if (_buckets != null)
1171             {
1172                 // Somebody had a dependency on this hashtable and fixed us up before the ObjectManager got to it.
1173                 return;
1174             }
1175 
1176             SerializationInfo siInfo;
1177             HashHelpers.SerializationInfoTable.TryGetValue(this, out siInfo);
1178 
1179             if (siInfo == null)
1180             {
1181                 throw new SerializationException(SR.Serialization_InvalidOnDeser);
1182             }
1183 
1184             int hashsize = 0;
1185             IComparer c = null;
1186 
1187 #pragma warning disable 618
1188             IHashCodeProvider hcp = null;
1189 #pragma warning restore 618
1190 
1191             Object[] serKeys = null;
1192             Object[] serValues = null;
1193 
1194             SerializationInfoEnumerator enumerator = siInfo.GetEnumerator();
1195 
1196             while (enumerator.MoveNext())
1197             {
1198                 switch (enumerator.Name)
1199                 {
1200                     case LoadFactorName:
1201                         _loadFactor = siInfo.GetSingle(LoadFactorName);
1202                         break;
1203                     case HashSizeName:
1204                         hashsize = siInfo.GetInt32(HashSizeName);
1205                         break;
1206                     case KeyComparerName:
1207                         _keycomparer = (IEqualityComparer)siInfo.GetValue(KeyComparerName, typeof(IEqualityComparer));
1208                         break;
1209                     case ComparerName:
1210                         c = (IComparer)siInfo.GetValue(ComparerName, typeof(IComparer));
1211                         break;
1212                     case HashCodeProviderName:
1213 #pragma warning disable 618
1214                         hcp = (IHashCodeProvider)siInfo.GetValue(HashCodeProviderName, typeof(IHashCodeProvider));
1215 #pragma warning restore 618
1216                         break;
1217                     case KeysName:
1218                         serKeys = (Object[])siInfo.GetValue(KeysName, typeof(Object[]));
1219                         break;
1220                     case ValuesName:
1221                         serValues = (Object[])siInfo.GetValue(ValuesName, typeof(Object[]));
1222                         break;
1223                 }
1224             }
1225 
1226             _loadsize = (int)(_loadFactor * hashsize);
1227 
1228             // V1 object doesn't has _keycomparer field.
1229             if ((_keycomparer == null) && ((c != null) || (hcp != null)))
1230             {
1231                 _keycomparer = new CompatibleComparer(hcp, c);
1232             }
1233 
1234             _buckets = new bucket[hashsize];
1235 
1236             if (serKeys == null)
1237             {
1238                 throw new SerializationException(SR.Serialization_MissingKeys);
1239             }
1240             if (serValues == null)
1241             {
1242                 throw new SerializationException(SR.Serialization_MissingValues);
1243             }
1244             if (serKeys.Length != serValues.Length)
1245             {
1246                 throw new SerializationException(SR.Serialization_KeyValueDifferentSizes);
1247             }
1248             for (int i = 0; i < serKeys.Length; i++)
1249             {
1250                 if (serKeys[i] == null)
1251                 {
1252                     throw new SerializationException(SR.Serialization_NullKey);
1253                 }
1254                 Insert(serKeys[i], serValues[i], true);
1255             }
1256 
1257             _version = siInfo.GetInt32(VersionName);
1258 
1259             HashHelpers.SerializationInfoTable.Remove(this);
1260         }
1261 
1262         // Implements a Collection for the keys of a hashtable. An instance of this
1263         // class is created by the GetKeys method of a hashtable.
1264         [Serializable]
1265         private class KeyCollection : ICollection
1266         {
1267             private Hashtable _hashtable;
1268 
KeyCollection(Hashtable hashtable)1269             internal KeyCollection(Hashtable hashtable)
1270             {
1271                 _hashtable = hashtable;
1272             }
1273 
CopyTo(Array array, int arrayIndex)1274             public virtual void CopyTo(Array array, int arrayIndex)
1275             {
1276                 if (array == null)
1277                     throw new ArgumentNullException(nameof(array));
1278                 if (array.Rank != 1)
1279                     throw new ArgumentException(SR.Arg_RankMultiDimNotSupported, nameof(array));
1280                 if (arrayIndex < 0)
1281                     throw new ArgumentOutOfRangeException(nameof(arrayIndex), SR.ArgumentOutOfRange_NeedNonNegNum);
1282                 if (array.Length - arrayIndex < _hashtable._count)
1283                     throw new ArgumentException(SR.Arg_ArrayPlusOffTooSmall);
1284                 _hashtable.CopyKeys(array, arrayIndex);
1285             }
1286 
GetEnumerator()1287             public virtual IEnumerator GetEnumerator()
1288             {
1289                 return new HashtableEnumerator(_hashtable, HashtableEnumerator.Keys);
1290             }
1291 
1292             public virtual bool IsSynchronized
1293             {
1294                 get { return _hashtable.IsSynchronized; }
1295             }
1296 
1297             public virtual Object SyncRoot
1298             {
1299                 get { return _hashtable.SyncRoot; }
1300             }
1301 
1302             public virtual int Count
1303             {
1304                 get { return _hashtable._count; }
1305             }
1306         }
1307 
1308         // Implements a Collection for the values of a hashtable. An instance of
1309         // this class is created by the GetValues method of a hashtable.
1310         [Serializable]
1311         private class ValueCollection : ICollection
1312         {
1313             private Hashtable _hashtable;
1314 
ValueCollection(Hashtable hashtable)1315             internal ValueCollection(Hashtable hashtable)
1316             {
1317                 _hashtable = hashtable;
1318             }
1319 
CopyTo(Array array, int arrayIndex)1320             public virtual void CopyTo(Array array, int arrayIndex)
1321             {
1322                 if (array == null)
1323                     throw new ArgumentNullException(nameof(array));
1324                 if (array.Rank != 1)
1325                     throw new ArgumentException(SR.Arg_RankMultiDimNotSupported, nameof(array));
1326                 if (arrayIndex < 0)
1327                     throw new ArgumentOutOfRangeException(nameof(arrayIndex), SR.ArgumentOutOfRange_NeedNonNegNum);
1328                 if (array.Length - arrayIndex < _hashtable._count)
1329                     throw new ArgumentException(SR.Arg_ArrayPlusOffTooSmall);
1330                 _hashtable.CopyValues(array, arrayIndex);
1331             }
1332 
GetEnumerator()1333             public virtual IEnumerator GetEnumerator()
1334             {
1335                 return new HashtableEnumerator(_hashtable, HashtableEnumerator.Values);
1336             }
1337 
1338             public virtual bool IsSynchronized
1339             {
1340                 get { return _hashtable.IsSynchronized; }
1341             }
1342 
1343             public virtual Object SyncRoot
1344             {
1345                 get { return _hashtable.SyncRoot; }
1346             }
1347 
1348             public virtual int Count
1349             {
1350                 get { return _hashtable._count; }
1351             }
1352         }
1353 
1354         // Synchronized wrapper for hashtable
1355         [Serializable]
1356         private class SyncHashtable : Hashtable, IEnumerable
1357         {
1358             protected Hashtable _table;
1359 
SyncHashtable(Hashtable table)1360             internal SyncHashtable(Hashtable table) : base(false)
1361             {
1362                 _table = table;
1363             }
1364 
SyncHashtable(SerializationInfo info, StreamingContext context)1365             internal SyncHashtable(SerializationInfo info, StreamingContext context) : base(info, context)
1366             {
1367                 throw new PlatformNotSupportedException();
1368             }
1369 
GetObjectData(SerializationInfo info, StreamingContext context)1370             public override void GetObjectData(SerializationInfo info, StreamingContext context)
1371             {
1372                 throw new PlatformNotSupportedException();
1373             }
1374 
1375             public override int Count
1376             {
1377                 get { return _table.Count; }
1378             }
1379 
1380             public override bool IsReadOnly
1381             {
1382                 get { return _table.IsReadOnly; }
1383             }
1384 
1385             public override bool IsFixedSize
1386             {
1387                 get { return _table.IsFixedSize; }
1388             }
1389 
1390             public override bool IsSynchronized
1391             {
1392                 get { return true; }
1393             }
1394 
1395             public override Object this[Object key]
1396             {
1397                 get
1398                 {
1399                     return _table[key];
1400                 }
1401                 set
1402                 {
1403                     lock (_table.SyncRoot)
1404                     {
1405                         _table[key] = value;
1406                     }
1407                 }
1408             }
1409 
1410             public override Object SyncRoot
1411             {
1412                 get { return _table.SyncRoot; }
1413             }
1414 
Add(Object key, Object value)1415             public override void Add(Object key, Object value)
1416             {
1417                 lock (_table.SyncRoot)
1418                 {
1419                     _table.Add(key, value);
1420                 }
1421             }
1422 
Clear()1423             public override void Clear()
1424             {
1425                 lock (_table.SyncRoot)
1426                 {
1427                     _table.Clear();
1428                 }
1429             }
1430 
Contains(Object key)1431             public override bool Contains(Object key)
1432             {
1433                 return _table.Contains(key);
1434             }
1435 
ContainsKey(Object key)1436             public override bool ContainsKey(Object key)
1437             {
1438                 if (key == null)
1439                 {
1440                     throw new ArgumentNullException(nameof(key), SR.ArgumentNull_Key);
1441                 }
1442                 return _table.ContainsKey(key);
1443             }
1444 
ContainsValue(Object key)1445             public override bool ContainsValue(Object key)
1446             {
1447                 lock (_table.SyncRoot)
1448                 {
1449                     return _table.ContainsValue(key);
1450                 }
1451             }
1452 
CopyTo(Array array, int arrayIndex)1453             public override void CopyTo(Array array, int arrayIndex)
1454             {
1455                 lock (_table.SyncRoot)
1456                 {
1457                     _table.CopyTo(array, arrayIndex);
1458                 }
1459             }
1460 
Clone()1461             public override Object Clone()
1462             {
1463                 lock (_table.SyncRoot)
1464                 {
1465                     return Hashtable.Synchronized((Hashtable)_table.Clone());
1466                 }
1467             }
1468 
IEnumerable.GetEnumerator()1469             IEnumerator IEnumerable.GetEnumerator()
1470             {
1471                 return _table.GetEnumerator();
1472             }
1473 
GetEnumerator()1474             public override IDictionaryEnumerator GetEnumerator()
1475             {
1476                 return _table.GetEnumerator();
1477             }
1478 
1479             public override ICollection Keys
1480             {
1481                 get
1482                 {
1483                     lock (_table.SyncRoot)
1484                     {
1485                         return _table.Keys;
1486                     }
1487                 }
1488             }
1489 
1490             public override ICollection Values
1491             {
1492                 get
1493                 {
1494                     lock (_table.SyncRoot)
1495                     {
1496                         return _table.Values;
1497                     }
1498                 }
1499             }
1500 
Remove(Object key)1501             public override void Remove(Object key)
1502             {
1503                 lock (_table.SyncRoot)
1504                 {
1505                     _table.Remove(key);
1506                 }
1507             }
1508 
OnDeserialization(Object sender)1509             public override void OnDeserialization(Object sender)
1510             {
1511                 // Does nothing.  We have to implement this because our parent HT implements it,
1512                 // but it doesn't do anything meaningful.  The real work will be done when we
1513                 // call OnDeserialization on our parent table.
1514             }
1515 
ToKeyValuePairsArray()1516             internal override KeyValuePairs[] ToKeyValuePairsArray()
1517             {
1518                 return _table.ToKeyValuePairsArray();
1519             }
1520         }
1521 
1522 
1523         // Implements an enumerator for a hashtable. The enumerator uses the
1524         // internal version number of the hashtable to ensure that no modifications
1525         // are made to the hashtable while an enumeration is in progress.
1526         [Serializable]
1527         private class HashtableEnumerator : IDictionaryEnumerator, ICloneable
1528         {
1529             private Hashtable _hashtable;
1530             private int _bucket;
1531             private int _version;
1532             private bool _current;
1533             private int _getObjectRetType;   // What should GetObject return?
1534             private Object _currentKey;
1535             private Object _currentValue;
1536 
1537             internal const int Keys = 1;
1538             internal const int Values = 2;
1539             internal const int DictEntry = 3;
1540 
HashtableEnumerator(Hashtable hashtable, int getObjRetType)1541             internal HashtableEnumerator(Hashtable hashtable, int getObjRetType)
1542             {
1543                 _hashtable = hashtable;
1544                 _bucket = hashtable._buckets.Length;
1545                 _version = hashtable._version;
1546                 _current = false;
1547                 _getObjectRetType = getObjRetType;
1548             }
1549 
Clone()1550             public object Clone() => MemberwiseClone();
1551 
1552             public virtual Object Key
1553             {
1554                 get
1555                 {
1556                     if (_current == false) throw new InvalidOperationException(SR.InvalidOperation_EnumNotStarted);
1557                     return _currentKey;
1558                 }
1559             }
1560 
MoveNext()1561             public virtual bool MoveNext()
1562             {
1563                 if (_version != _hashtable._version) throw new InvalidOperationException(SR.InvalidOperation_EnumFailedVersion);
1564                 while (_bucket > 0)
1565                 {
1566                     _bucket--;
1567                     Object keyv = _hashtable._buckets[_bucket].key;
1568                     if ((keyv != null) && (keyv != _hashtable._buckets))
1569                     {
1570                         _currentKey = keyv;
1571                         _currentValue = _hashtable._buckets[_bucket].val;
1572                         _current = true;
1573                         return true;
1574                     }
1575                 }
1576                 _current = false;
1577                 return false;
1578             }
1579 
1580             public virtual DictionaryEntry Entry
1581             {
1582                 get
1583                 {
1584                     if (_current == false) throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
1585                     return new DictionaryEntry(_currentKey, _currentValue);
1586                 }
1587             }
1588 
1589 
1590             public virtual Object Current
1591             {
1592                 get
1593                 {
1594                     if (_current == false) throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
1595 
1596                     if (_getObjectRetType == Keys)
1597                         return _currentKey;
1598                     else if (_getObjectRetType == Values)
1599                         return _currentValue;
1600                     else
1601                         return new DictionaryEntry(_currentKey, _currentValue);
1602                 }
1603             }
1604 
1605             public virtual Object Value
1606             {
1607                 get
1608                 {
1609                     if (_current == false) throw new InvalidOperationException(SR.InvalidOperation_EnumOpCantHappen);
1610                     return _currentValue;
1611                 }
1612             }
1613 
Reset()1614             public virtual void Reset()
1615             {
1616                 if (_version != _hashtable._version) throw new InvalidOperationException(SR.InvalidOperation_EnumFailedVersion);
1617                 _current = false;
1618                 _bucket = _hashtable._buckets.Length;
1619                 _currentKey = null;
1620                 _currentValue = null;
1621             }
1622         }
1623 
1624         // internal debug view class for hashtable
1625         internal class HashtableDebugView
1626         {
1627             private Hashtable _hashtable;
1628 
HashtableDebugView(Hashtable hashtable)1629             public HashtableDebugView(Hashtable hashtable)
1630             {
1631                 if (hashtable == null)
1632                 {
1633                     throw new ArgumentNullException(nameof(hashtable));
1634                 }
1635 
1636                 _hashtable = hashtable;
1637             }
1638 
1639             [DebuggerBrowsable(DebuggerBrowsableState.RootHidden)]
1640             public KeyValuePairs[] Items
1641             {
1642                 get
1643                 {
1644                     return _hashtable.ToKeyValuePairsArray();
1645                 }
1646             }
1647         }
1648     }
1649 
1650     internal static class HashHelpers
1651     {
1652         // Table of prime numbers to use as hash table sizes.
1653         // A typical resize algorithm would pick the smallest prime number in this array
1654         // that is larger than twice the previous capacity.
1655         // Suppose our Hashtable currently has capacity x and enough elements are added
1656         // such that a resize needs to occur. Resizing first computes 2x then finds the
1657         // first prime in the table greater than 2x, i.e. if primes are ordered
1658         // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n.
1659         // Doubling is important for preserving the asymptotic complexity of the
1660         // hashtable operations such as add.  Having a prime guarantees that double
1661         // hashing does not lead to infinite loops.  IE, your hash function will be
1662         // h1(key) + i*h2(key), 0 <= i < size.  h2 and the size must be relatively prime.
1663         public static readonly int[] primes = {
1664             3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919,
1665             1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591,
1666             17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
1667             187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263,
1668             1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369};
1669 
IsPrime(int candidate)1670         public static bool IsPrime(int candidate)
1671         {
1672             if ((candidate & 1) != 0)
1673             {
1674                 int limit = (int)Math.Sqrt(candidate);
1675                 for (int divisor = 3; divisor <= limit; divisor += 2)
1676                 {
1677                     if ((candidate % divisor) == 0)
1678                         return false;
1679                 }
1680                 return true;
1681             }
1682             return (candidate == 2);
1683         }
1684 
GetPrime(int min)1685         public static int GetPrime(int min)
1686         {
1687             if (min < 0)
1688                 throw new ArgumentException(SR.Arg_HTCapacityOverflow);
1689 
1690             for (int i = 0; i < primes.Length; i++)
1691             {
1692                 int prime = primes[i];
1693                 if (prime >= min) return prime;
1694             }
1695 
1696             //outside of our predefined table.
1697             //compute the hard way.
1698             for (int i = (min | 1); i < Int32.MaxValue; i += 2)
1699             {
1700                 if (IsPrime(i) && ((i - 1) % Hashtable.HashPrime != 0))
1701                     return i;
1702             }
1703             return min;
1704         }
1705 
1706         // Returns size of hashtable to grow to.
ExpandPrime(int oldSize)1707         public static int ExpandPrime(int oldSize)
1708         {
1709             int newSize = 2 * oldSize;
1710 
1711             // Allow the hashtables to grow to maximum possible size (~2G elements) before encountering capacity overflow.
1712             // Note that this check works even when _items.Length overflowed thanks to the (uint) cast
1713             if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize)
1714             {
1715                 Debug.Assert(MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength");
1716                 return MaxPrimeArrayLength;
1717             }
1718 
1719             return GetPrime(newSize);
1720         }
1721 
1722 
1723         // This is the maximum prime smaller than Array.MaxArrayLength
1724         public const int MaxPrimeArrayLength = 0x7FEFFFFD;
1725 
1726         private static ConditionalWeakTable<object, SerializationInfo> s_serializationInfoTable;
1727         public static ConditionalWeakTable<object, SerializationInfo> SerializationInfoTable => LazyInitializer.EnsureInitialized(ref s_serializationInfoTable);
1728     }
1729 }
1730