1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 //     * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 //     * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 //     * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // This file defines the map container and its helpers to support protobuf maps.
32 //
33 // The Map and MapIterator types are provided by this header file.
34 // Please avoid using other types defined here, unless they are public
35 // types within Map or MapIterator, such as Map::value_type.
36 
37 #ifndef GOOGLE_PROTOBUF_MAP_H__
38 #define GOOGLE_PROTOBUF_MAP_H__
39 
40 #include <iterator>
41 #include <limits>  // To support Visual Studio 2008
42 #include <set>
43 #include <utility>
44 
45 #include <google/protobuf/stubs/common.h>
46 #include <google/protobuf/arena.h>
47 #include <google/protobuf/generated_enum_util.h>
48 #include <google/protobuf/map_type_handler.h>
49 #include <google/protobuf/stubs/hash.h>
50 
51 namespace google {
52 namespace protobuf {
53 
54 template <typename Key, typename T>
55 class Map;
56 
57 class MapIterator;
58 
59 template <typename Enum> struct is_proto_enum;
60 
61 namespace internal {
62 template <typename Derived, typename Key, typename T,
63           WireFormatLite::FieldType key_wire_type,
64           WireFormatLite::FieldType value_wire_type, int default_enum_value>
65 class MapFieldLite;
66 
67 template <typename Derived, typename Key, typename T,
68           WireFormatLite::FieldType key_wire_type,
69           WireFormatLite::FieldType value_wire_type, int default_enum_value>
70 class MapField;
71 
72 template <typename Key, typename T>
73 class TypeDefinedMapFieldBase;
74 
75 class DynamicMapField;
76 
77 class GeneratedMessageReflection;
78 }  // namespace internal
79 
80 // This is the class for google::protobuf::Map's internal value_type. Instead of using
81 // std::pair as value_type, we use this class which provides us more control of
82 // its process of construction and destruction.
83 template <typename Key, typename T>
84 class MapPair {
85  public:
86   typedef const Key first_type;
87   typedef T second_type;
88 
MapPair(const Key & other_first,const T & other_second)89   MapPair(const Key& other_first, const T& other_second)
90       : first(other_first), second(other_second) {}
MapPair(const Key & other_first)91   explicit MapPair(const Key& other_first) : first(other_first), second() {}
MapPair(const MapPair & other)92   MapPair(const MapPair& other)
93       : first(other.first), second(other.second) {}
94 
~MapPair()95   ~MapPair() {}
96 
97   // Implicitly convertible to std::pair of compatible types.
98   template <typename T1, typename T2>
99   operator std::pair<T1, T2>() const {
100     return std::pair<T1, T2>(first, second);
101   }
102 
103   const Key first;
104   T second;
105 
106  private:
107   friend class ::google::protobuf::Arena;
108   friend class Map<Key, T>;
109 };
110 
111 // google::protobuf::Map is an associative container type used to store protobuf map
112 // fields.  Each Map instance may or may not use a different hash function, a
113 // different iteration order, and so on.  E.g., please don't examine
114 // implementation details to decide if the following would work:
115 //  Map<int, int> m0, m1;
116 //  m0[0] = m1[0] = m0[1] = m1[1] = 0;
117 //  assert(m0.begin()->first == m1.begin()->first);  // Bug!
118 //
119 // Map's interface is similar to std::unordered_map, except that Map is not
120 // designed to play well with exceptions.
121 template <typename Key, typename T>
122 class Map {
123  public:
124   typedef Key key_type;
125   typedef T mapped_type;
126   typedef MapPair<Key, T> value_type;
127 
128   typedef value_type* pointer;
129   typedef const value_type* const_pointer;
130   typedef value_type& reference;
131   typedef const value_type& const_reference;
132 
133   typedef size_t size_type;
134   typedef hash<Key> hasher;
135 
Map()136   Map() : arena_(NULL), default_enum_value_(0) { Init(); }
Map(Arena * arena)137   explicit Map(Arena* arena) : arena_(arena), default_enum_value_(0) { Init(); }
138 
Map(const Map & other)139   Map(const Map& other)
140       : arena_(NULL), default_enum_value_(other.default_enum_value_) {
141     Init();
142     insert(other.begin(), other.end());
143   }
144 
145   template <class InputIt>
Map(const InputIt & first,const InputIt & last)146   Map(const InputIt& first, const InputIt& last)
147       : arena_(NULL), default_enum_value_(0) {
148     Init();
149     insert(first, last);
150   }
151 
~Map()152   ~Map() {
153     clear();
154     if (arena_ == NULL) {
155       delete elements_;
156     }
157   }
158 
159  private:
Init()160   void Init() {
161     elements_ = Arena::Create<InnerMap>(arena_, 0u, hasher(), Allocator(arena_));
162   }
163 
164   // re-implement std::allocator to use arena allocator for memory allocation.
165   // Used for google::protobuf::Map implementation. Users should not use this class
166   // directly.
167   template <typename U>
168   class MapAllocator {
169    public:
170     typedef U value_type;
171     typedef value_type* pointer;
172     typedef const value_type* const_pointer;
173     typedef value_type& reference;
174     typedef const value_type& const_reference;
175     typedef size_t size_type;
176     typedef ptrdiff_t difference_type;
177 
MapAllocator()178     MapAllocator() : arena_(NULL) {}
MapAllocator(Arena * arena)179     explicit MapAllocator(Arena* arena) : arena_(arena) {}
180     template <typename X>
MapAllocator(const MapAllocator<X> & allocator)181     MapAllocator(const MapAllocator<X>& allocator)
182         : arena_(allocator.arena()) {}
183 
184     pointer allocate(size_type n, const void* /* hint */ = 0) {
185       // If arena is not given, malloc needs to be called which doesn't
186       // construct element object.
187       if (arena_ == NULL) {
188         return static_cast<pointer>(::operator new(n * sizeof(value_type)));
189       } else {
190         return reinterpret_cast<pointer>(
191             Arena::CreateArray<uint8>(arena_, n * sizeof(value_type)));
192       }
193     }
194 
deallocate(pointer p,size_type n)195     void deallocate(pointer p, size_type n) {
196       if (arena_ == NULL) {
197 #if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation)
198         ::operator delete(p, n * sizeof(value_type));
199 #else
200         (void)n;
201         ::operator delete(p);
202 #endif
203       }
204     }
205 
206 #if __cplusplus >= 201103L && !defined(GOOGLE_PROTOBUF_OS_APPLE) && \
207     !defined(GOOGLE_PROTOBUF_OS_NACL) &&                            \
208     !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
209     template<class NodeType, class... Args>
construct(NodeType * p,Args &&...args)210     void construct(NodeType* p, Args&&... args) {
211       // Clang 3.6 doesn't compile static casting to void* directly. (Issue
212       // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
213       // not cast away constness". So first the maybe const pointer is casted to
214       // const void* and after the const void* is const casted.
215       new (const_cast<void*>(static_cast<const void*>(p)))
216           NodeType(std::forward<Args>(args)...);
217     }
218 
219     template<class NodeType>
destroy(NodeType * p)220     void destroy(NodeType* p) {
221       p->~NodeType();
222     }
223 #else
construct(pointer p,const_reference t)224     void construct(pointer p, const_reference t) { new (p) value_type(t); }
225 
destroy(pointer p)226     void destroy(pointer p) { p->~value_type(); }
227 #endif
228 
229     template <typename X>
230     struct rebind {
231       typedef MapAllocator<X> other;
232     };
233 
234     template <typename X>
235     bool operator==(const MapAllocator<X>& other) const {
236       return arena_ == other.arena_;
237     }
238 
239     template <typename X>
240     bool operator!=(const MapAllocator<X>& other) const {
241       return arena_ != other.arena_;
242     }
243 
244     // To support Visual Studio 2008
max_size()245     size_type max_size() const {
246       // parentheses around (std::...:max) prevents macro warning of max()
247       return (std::numeric_limits<size_type>::max)();
248     }
249 
250     // To support gcc-4.4, which does not properly
251     // support templated friend classes
arena()252     Arena* arena() const {
253       return arena_;
254     }
255 
256    private:
257     typedef void DestructorSkippable_;
258     Arena* const arena_;
259   };
260 
261   // InnerMap's key type is Key and its value type is value_type*.  We use a
262   // custom class here and for Node, below, to ensure that k_ is at offset 0,
263   // allowing safe conversion from pointer to Node to pointer to Key, and vice
264   // versa when appropriate.
265   class KeyValuePair {
266    public:
KeyValuePair(const Key & k,value_type * v)267     KeyValuePair(const Key& k, value_type* v) : k_(k), v_(v) {}
268 
key()269     const Key& key() const { return k_; }
key()270     Key& key() { return k_; }
value()271     value_type* value() const { return v_; }
value()272     value_type*& value() { return v_; }
273 
274    private:
275     Key k_;
276     value_type* v_;
277   };
278 
279   typedef MapAllocator<KeyValuePair> Allocator;
280 
281   // InnerMap is a generic hash-based map.  It doesn't contain any
282   // protocol-buffer-specific logic.  It is a chaining hash map with the
283   // additional feature that some buckets can be converted to use an ordered
284   // container.  This ensures O(lg n) bounds on find, insert, and erase, while
285   // avoiding the overheads of ordered containers most of the time.
286   //
287   // The implementation doesn't need the full generality of unordered_map,
288   // and it doesn't have it.  More bells and whistles can be added as needed.
289   // Some implementation details:
290   // 1. The hash function has type hasher and the equality function
291   //    equal_to<Key>.  We inherit from hasher to save space
292   //    (empty-base-class optimization).
293   // 2. The number of buckets is a power of two.
294   // 3. Buckets are converted to trees in pairs: if we convert bucket b then
295   //    buckets b and b^1 will share a tree.  Invariant: buckets b and b^1 have
296   //    the same non-NULL value iff they are sharing a tree.  (An alternative
297   //    implementation strategy would be to have a tag bit per bucket.)
298   // 4. As is typical for hash_map and such, the Keys and Values are always
299   //    stored in linked list nodes.  Pointers to elements are never invalidated
300   //    until the element is deleted.
301   // 5. The trees' payload type is pointer to linked-list node.  Tree-converting
302   //    a bucket doesn't copy Key-Value pairs.
303   // 6. Once we've tree-converted a bucket, it is never converted back. However,
304   //    the items a tree contains may wind up assigned to trees or lists upon a
305   //    rehash.
306   // 7. The code requires no C++ features from C++11 or later.
307   // 8. Mutations to a map do not invalidate the map's iterators, pointers to
308   //    elements, or references to elements.
309   // 9. Except for erase(iterator), any non-const method can reorder iterators.
310   class InnerMap : private hasher {
311    public:
312     typedef value_type* Value;
313 
InnerMap(size_type n,hasher h,Allocator alloc)314     InnerMap(size_type n, hasher h, Allocator alloc)
315         : hasher(h),
316           num_elements_(0),
317           seed_(Seed()),
318           table_(NULL),
319           alloc_(alloc) {
320       n = TableSize(n);
321       table_ = CreateEmptyTable(n);
322       num_buckets_ = index_of_first_non_null_ = n;
323     }
324 
~InnerMap()325     ~InnerMap() {
326       if (table_ != NULL) {
327         clear();
328         Dealloc<void*>(table_, num_buckets_);
329       }
330     }
331 
332    private:
333     enum { kMinTableSize = 8 };
334 
335     // Linked-list nodes, as one would expect for a chaining hash table.
336     struct Node {
337       KeyValuePair kv;
338       Node* next;
339     };
340 
341     // This is safe only if the given pointer is known to point to a Key that is
342     // part of a Node.
NodePtrFromKeyPtr(Key * k)343     static Node* NodePtrFromKeyPtr(Key* k) {
344       return reinterpret_cast<Node*>(k);
345     }
346 
KeyPtrFromNodePtr(Node * node)347     static Key* KeyPtrFromNodePtr(Node* node) { return &node->kv.key(); }
348 
349     // Trees.  The payload type is pointer to Key, so that we can query the tree
350     // with Keys that are not in any particular data structure.  When we insert,
351     // though, the pointer is always pointing to a Key that is inside a Node.
352     struct KeyCompare {
operatorKeyCompare353       bool operator()(const Key* n0, const Key* n1) const { return *n0 < *n1; }
354     };
355     typedef typename Allocator::template rebind<Key*>::other KeyPtrAllocator;
356     typedef std::set<Key*, KeyCompare, KeyPtrAllocator> Tree;
357     typedef typename Tree::iterator TreeIterator;
358 
359     // iterator and const_iterator are instantiations of iterator_base.
360     template <typename KeyValueType>
361     struct iterator_base {
362       typedef KeyValueType& reference;
363       typedef KeyValueType* pointer;
364 
365       // Invariants:
366       // node_ is always correct. This is handy because the most common
367       // operations are operator* and operator-> and they only use node_.
368       // When node_ is set to a non-NULL value, all the other non-const fields
369       // are updated to be correct also, but those fields can become stale
370       // if the underlying map is modified.  When those fields are needed they
371       // are rechecked, and updated if necessary.
iterator_baseiterator_base372       iterator_base() : node_(NULL), m_(NULL), bucket_index_(0) {}
373 
iterator_baseiterator_base374       explicit iterator_base(const InnerMap* m) : m_(m) {
375         SearchFrom(m->index_of_first_non_null_);
376       }
377 
378       // Any iterator_base can convert to any other.  This is overkill, and we
379       // rely on the enclosing class to use it wisely.  The standard "iterator
380       // can convert to const_iterator" is OK but the reverse direction is not.
381       template <typename U>
iterator_baseiterator_base382       explicit iterator_base(const iterator_base<U>& it)
383           : node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {}
384 
iterator_baseiterator_base385       iterator_base(Node* n, const InnerMap* m, size_type index)
386           : node_(n), m_(m), bucket_index_(index) {}
387 
iterator_baseiterator_base388       iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index)
389           : node_(NodePtrFromKeyPtr(*tree_it)), m_(m), bucket_index_(index) {
390         // Invariant: iterators that use buckets with trees have an even
391         // bucket_index_.
392         GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0);
393       }
394 
395       // Advance through buckets, looking for the first that isn't empty.
396       // If nothing non-empty is found then leave node_ == NULL.
SearchFromiterator_base397       void SearchFrom(size_type start_bucket) {
398         GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
399                m_->table_[m_->index_of_first_non_null_] != NULL);
400         node_ = NULL;
401         for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_;
402              bucket_index_++) {
403           if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
404             node_ = static_cast<Node*>(m_->table_[bucket_index_]);
405             break;
406           } else if (m_->TableEntryIsTree(bucket_index_)) {
407             Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
408             GOOGLE_DCHECK(!tree->empty());
409             node_ = NodePtrFromKeyPtr(*tree->begin());
410             break;
411           }
412         }
413       }
414 
415       reference operator*() const { return node_->kv; }
416       pointer operator->() const { return &(operator*()); }
417 
418       friend bool operator==(const iterator_base& a, const iterator_base& b) {
419         return a.node_ == b.node_;
420       }
421       friend bool operator!=(const iterator_base& a, const iterator_base& b) {
422         return a.node_ != b.node_;
423       }
424 
425       iterator_base& operator++() {
426         if (node_->next == NULL) {
427           TreeIterator tree_it;
428           const bool is_list = revalidate_if_necessary(&tree_it);
429           if (is_list) {
430             SearchFrom(bucket_index_ + 1);
431           } else {
432             GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0);
433             Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]);
434             if (++tree_it == tree->end()) {
435               SearchFrom(bucket_index_ + 2);
436             } else {
437               node_ = NodePtrFromKeyPtr(*tree_it);
438             }
439           }
440         } else {
441           node_ = node_->next;
442         }
443         return *this;
444       }
445 
446       iterator_base operator++(int /* unused */) {
447         iterator_base tmp = *this;
448         ++*this;
449         return tmp;
450       }
451 
452       // Assumes node_ and m_ are correct and non-NULL, but other fields may be
453       // stale.  Fix them as needed.  Then return true iff node_ points to a
454       // Node in a list.  If false is returned then *it is modified to be
455       // a valid iterator for node_.
revalidate_if_necessaryiterator_base456       bool revalidate_if_necessary(TreeIterator* it) {
457         GOOGLE_DCHECK(node_ != NULL && m_ != NULL);
458         // Force bucket_index_ to be in range.
459         bucket_index_ &= (m_->num_buckets_ - 1);
460         // Common case: the bucket we think is relevant points to node_.
461         if (m_->table_[bucket_index_] == static_cast<void*>(node_))
462           return true;
463         // Less common: the bucket is a linked list with node_ somewhere in it,
464         // but not at the head.
465         if (m_->TableEntryIsNonEmptyList(bucket_index_)) {
466           Node* l = static_cast<Node*>(m_->table_[bucket_index_]);
467           while ((l = l->next) != NULL) {
468             if (l == node_) {
469               return true;
470             }
471           }
472         }
473         // Well, bucket_index_ still might be correct, but probably
474         // not.  Revalidate just to be sure.  This case is rare enough that we
475         // don't worry about potential optimizations, such as having a custom
476         // find-like method that compares Node* instead of const Key&.
477         iterator_base i(m_->find(*KeyPtrFromNodePtr(node_), it));
478         bucket_index_ = i.bucket_index_;
479         return m_->TableEntryIsList(bucket_index_);
480       }
481 
482       Node* node_;
483       const InnerMap* m_;
484       size_type bucket_index_;
485     };
486 
487    public:
488     typedef iterator_base<KeyValuePair> iterator;
489     typedef iterator_base<const KeyValuePair> const_iterator;
490 
begin()491     iterator begin() { return iterator(this); }
end()492     iterator end() { return iterator(); }
begin()493     const_iterator begin() const { return const_iterator(this); }
end()494     const_iterator end() const { return const_iterator(); }
495 
clear()496     void clear() {
497       for (size_type b = 0; b < num_buckets_; b++) {
498         if (TableEntryIsNonEmptyList(b)) {
499           Node* node = static_cast<Node*>(table_[b]);
500           table_[b] = NULL;
501           do {
502             Node* next = node->next;
503             DestroyNode(node);
504             node = next;
505           } while (node != NULL);
506         } else if (TableEntryIsTree(b)) {
507           Tree* tree = static_cast<Tree*>(table_[b]);
508           GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0);
509           table_[b] = table_[b + 1] = NULL;
510           typename Tree::iterator tree_it = tree->begin();
511           do {
512             Node* node = NodePtrFromKeyPtr(*tree_it);
513             typename Tree::iterator next = tree_it;
514             ++next;
515             tree->erase(tree_it);
516             DestroyNode(node);
517             tree_it = next;
518           } while (tree_it != tree->end());
519           DestroyTree(tree);
520           b++;
521         }
522       }
523       num_elements_ = 0;
524       index_of_first_non_null_ = num_buckets_;
525     }
526 
hash_function()527     const hasher& hash_function() const { return *this; }
528 
max_size()529     static size_type max_size() {
530       return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28);
531     }
size()532     size_type size() const { return num_elements_; }
empty()533     bool empty() const { return size() == 0; }
534 
find(const Key & k)535     iterator find(const Key& k) { return iterator(FindHelper(k).first); }
find(const Key & k)536     const_iterator find(const Key& k) const { return find(k, NULL); }
537 
538     // In traditional C++ style, this performs "insert if not present."
insert(const KeyValuePair & kv)539     std::pair<iterator, bool> insert(const KeyValuePair& kv) {
540       std::pair<const_iterator, size_type> p = FindHelper(kv.key());
541       // Case 1: key was already present.
542       if (p.first.node_ != NULL)
543         return std::make_pair(iterator(p.first), false);
544       // Case 2: insert.
545       if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
546         p = FindHelper(kv.key());
547       }
548       const size_type b = p.second;  // bucket number
549       Node* node = Alloc<Node>(1);
550       alloc_.construct(&node->kv, kv);
551       iterator result = InsertUnique(b, node);
552       ++num_elements_;
553       return std::make_pair(result, true);
554     }
555 
556     // The same, but if an insertion is necessary then the value portion of the
557     // inserted key-value pair is left uninitialized.
insert(const Key & k)558     std::pair<iterator, bool> insert(const Key& k) {
559       std::pair<const_iterator, size_type> p = FindHelper(k);
560       // Case 1: key was already present.
561       if (p.first.node_ != NULL)
562         return std::make_pair(iterator(p.first), false);
563       // Case 2: insert.
564       if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
565         p = FindHelper(k);
566       }
567       const size_type b = p.second;  // bucket number
568       Node* node = Alloc<Node>(1);
569       typedef typename Allocator::template rebind<Key>::other KeyAllocator;
570       KeyAllocator(alloc_).construct(&node->kv.key(), k);
571       iterator result = InsertUnique(b, node);
572       ++num_elements_;
573       return std::make_pair(result, true);
574     }
575 
576     Value& operator[](const Key& k) {
577       KeyValuePair kv(k, Value());
578       return insert(kv).first->value();
579     }
580 
erase(iterator it)581     void erase(iterator it) {
582       GOOGLE_DCHECK_EQ(it.m_, this);
583       typename Tree::iterator tree_it;
584       const bool is_list = it.revalidate_if_necessary(&tree_it);
585       size_type b = it.bucket_index_;
586       Node* const item = it.node_;
587       if (is_list) {
588         GOOGLE_DCHECK(TableEntryIsNonEmptyList(b));
589         Node* head = static_cast<Node*>(table_[b]);
590         head = EraseFromLinkedList(item, head);
591         table_[b] = static_cast<void*>(head);
592       } else {
593         GOOGLE_DCHECK(TableEntryIsTree(b));
594         Tree* tree = static_cast<Tree*>(table_[b]);
595         tree->erase(*tree_it);
596         if (tree->empty()) {
597           // Force b to be the minimum of b and b ^ 1.  This is important
598           // only because we want index_of_first_non_null_ to be correct.
599           b &= ~static_cast<size_type>(1);
600           DestroyTree(tree);
601           table_[b] = table_[b + 1] = NULL;
602         }
603       }
604       DestroyNode(item);
605       --num_elements_;
606       if (GOOGLE_PREDICT_FALSE(b == index_of_first_non_null_)) {
607         while (index_of_first_non_null_ < num_buckets_ &&
608                table_[index_of_first_non_null_] == NULL) {
609           ++index_of_first_non_null_;
610         }
611       }
612     }
613 
614    private:
find(const Key & k,TreeIterator * it)615     const_iterator find(const Key& k, TreeIterator* it) const {
616       return FindHelper(k, it).first;
617     }
FindHelper(const Key & k)618     std::pair<const_iterator, size_type> FindHelper(const Key& k) const {
619       return FindHelper(k, NULL);
620     }
FindHelper(const Key & k,TreeIterator * it)621     std::pair<const_iterator, size_type> FindHelper(const Key& k,
622                                                     TreeIterator* it) const {
623       size_type b = BucketNumber(k);
624       if (TableEntryIsNonEmptyList(b)) {
625         Node* node = static_cast<Node*>(table_[b]);
626         do {
627           if (IsMatch(*KeyPtrFromNodePtr(node), k)) {
628             return std::make_pair(const_iterator(node, this, b), b);
629           } else {
630             node = node->next;
631           }
632         } while (node != NULL);
633       } else if (TableEntryIsTree(b)) {
634         GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
635         b &= ~static_cast<size_t>(1);
636         Tree* tree = static_cast<Tree*>(table_[b]);
637         Key* key = const_cast<Key*>(&k);
638         typename Tree::iterator tree_it = tree->find(key);
639         if (tree_it != tree->end()) {
640           if (it != NULL) *it = tree_it;
641           return std::make_pair(const_iterator(tree_it, this, b), b);
642         }
643       }
644       return std::make_pair(end(), b);
645     }
646 
647     // Insert the given Node in bucket b.  If that would make bucket b too big,
648     // and bucket b is not a tree, create a tree for buckets b and b^1 to share.
649     // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
650     // bucket.  num_elements_ is not modified.
InsertUnique(size_type b,Node * node)651     iterator InsertUnique(size_type b, Node* node) {
652       GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ ||
653              table_[index_of_first_non_null_] != NULL);
654       // In practice, the code that led to this point may have already
655       // determined whether we are inserting into an empty list, a short list,
656       // or whatever.  But it's probably cheap enough to recompute that here;
657       // it's likely that we're inserting into an empty or short list.
658       iterator result;
659       GOOGLE_DCHECK(find(*KeyPtrFromNodePtr(node)) == end());
660       if (TableEntryIsEmpty(b)) {
661         result = InsertUniqueInList(b, node);
662       } else if (TableEntryIsNonEmptyList(b)) {
663         if (GOOGLE_PREDICT_FALSE(TableEntryIsTooLong(b))) {
664           TreeConvert(b);
665           result = InsertUniqueInTree(b, node);
666           GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1));
667         } else {
668           // Insert into a pre-existing list.  This case cannot modify
669           // index_of_first_non_null_, so we skip the code to update it.
670           return InsertUniqueInList(b, node);
671         }
672       } else {
673         // Insert into a pre-existing tree.  This case cannot modify
674         // index_of_first_non_null_, so we skip the code to update it.
675         return InsertUniqueInTree(b, node);
676       }
677       // parentheses around (std::min) prevents macro expansion of min(...)
678       index_of_first_non_null_ =
679           (std::min)(index_of_first_non_null_, result.bucket_index_);
680       return result;
681     }
682 
683     // Helper for InsertUnique.  Handles the case where bucket b is a
684     // not-too-long linked list.
InsertUniqueInList(size_type b,Node * node)685     iterator InsertUniqueInList(size_type b, Node* node) {
686       node->next = static_cast<Node*>(table_[b]);
687       table_[b] = static_cast<void*>(node);
688       return iterator(node, this, b);
689     }
690 
691     // Helper for InsertUnique.  Handles the case where bucket b points to a
692     // Tree.
InsertUniqueInTree(size_type b,Node * node)693     iterator InsertUniqueInTree(size_type b, Node* node) {
694       GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]);
695       // Maintain the invariant that node->next is NULL for all Nodes in Trees.
696       node->next = NULL;
697       return iterator(static_cast<Tree*>(table_[b])
698                       ->insert(KeyPtrFromNodePtr(node))
699                       .first,
700                       this, b & ~static_cast<size_t>(1));
701     }
702 
703     // Returns whether it did resize.  Currently this is only used when
704     // num_elements_ increases, though it could be used in other situations.
705     // It checks for load too low as well as load too high: because any number
706     // of erases can occur between inserts, the load could be as low as 0 here.
707     // Resizing to a lower size is not always helpful, but failing to do so can
708     // destroy the expected big-O bounds for some operations. By having the
709     // policy that sometimes we resize down as well as up, clients can easily
710     // keep O(size()) = O(number of buckets) if they want that.
ResizeIfLoadIsOutOfRange(size_type new_size)711     bool ResizeIfLoadIsOutOfRange(size_type new_size) {
712       const size_type kMaxMapLoadTimes16 = 12;  // controls RAM vs CPU tradeoff
713       const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16;
714       const size_type lo_cutoff = hi_cutoff / 4;
715       // We don't care how many elements are in trees.  If a lot are,
716       // we may resize even though there are many empty buckets.  In
717       // practice, this seems fine.
718       if (GOOGLE_PREDICT_FALSE(new_size >= hi_cutoff)) {
719         if (num_buckets_ <= max_size() / 2) {
720           Resize(num_buckets_ * 2);
721           return true;
722         }
723       } else if (GOOGLE_PREDICT_FALSE(new_size <= lo_cutoff &&
724                                num_buckets_ > kMinTableSize)) {
725         size_type lg2_of_size_reduction_factor = 1;
726         // It's possible we want to shrink a lot here... size() could even be 0.
727         // So, estimate how much to shrink by making sure we don't shrink so
728         // much that we would need to grow the table after a few inserts.
729         const size_type hypothetical_size = new_size * 5 / 4 + 1;
730         while ((hypothetical_size << lg2_of_size_reduction_factor) <
731                hi_cutoff) {
732           ++lg2_of_size_reduction_factor;
733         }
734         size_type new_num_buckets = std::max<size_type>(
735             kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
736         if (new_num_buckets != num_buckets_) {
737           Resize(new_num_buckets);
738           return true;
739         }
740       }
741       return false;
742     }
743 
744     // Resize to the given number of buckets.
Resize(size_t new_num_buckets)745     void Resize(size_t new_num_buckets) {
746       GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize);
747       void** const old_table = table_;
748       const size_type old_table_size = num_buckets_;
749       num_buckets_ = new_num_buckets;
750       table_ = CreateEmptyTable(num_buckets_);
751       const size_type start = index_of_first_non_null_;
752       index_of_first_non_null_ = num_buckets_;
753       for (size_type i = start; i < old_table_size; i++) {
754         if (TableEntryIsNonEmptyList(old_table, i)) {
755           TransferList(old_table, i);
756         } else if (TableEntryIsTree(old_table, i)) {
757           TransferTree(old_table, i++);
758         }
759       }
760       Dealloc<void*>(old_table, old_table_size);
761     }
762 
TransferList(void * const * table,size_type index)763     void TransferList(void* const* table, size_type index) {
764       Node* node = static_cast<Node*>(table[index]);
765       do {
766         Node* next = node->next;
767         InsertUnique(BucketNumber(*KeyPtrFromNodePtr(node)), node);
768         node = next;
769       } while (node != NULL);
770     }
771 
TransferTree(void * const * table,size_type index)772     void TransferTree(void* const* table, size_type index) {
773       Tree* tree = static_cast<Tree*>(table[index]);
774       typename Tree::iterator tree_it = tree->begin();
775       do {
776         Node* node = NodePtrFromKeyPtr(*tree_it);
777         InsertUnique(BucketNumber(**tree_it), node);
778       } while (++tree_it != tree->end());
779       DestroyTree(tree);
780     }
781 
EraseFromLinkedList(Node * item,Node * head)782     Node* EraseFromLinkedList(Node* item, Node* head) {
783       if (head == item) {
784         return head->next;
785       } else {
786         head->next = EraseFromLinkedList(item, head->next);
787         return head;
788       }
789     }
790 
TableEntryIsEmpty(size_type b)791     bool TableEntryIsEmpty(size_type b) const {
792       return TableEntryIsEmpty(table_, b);
793     }
TableEntryIsNonEmptyList(size_type b)794     bool TableEntryIsNonEmptyList(size_type b) const {
795       return TableEntryIsNonEmptyList(table_, b);
796     }
TableEntryIsTree(size_type b)797     bool TableEntryIsTree(size_type b) const {
798       return TableEntryIsTree(table_, b);
799     }
TableEntryIsList(size_type b)800     bool TableEntryIsList(size_type b) const {
801       return TableEntryIsList(table_, b);
802     }
TableEntryIsEmpty(void * const * table,size_type b)803     static bool TableEntryIsEmpty(void* const* table, size_type b) {
804       return table[b] == NULL;
805     }
TableEntryIsNonEmptyList(void * const * table,size_type b)806     static bool TableEntryIsNonEmptyList(void* const* table, size_type b) {
807       return table[b] != NULL && table[b] != table[b ^ 1];
808     }
TableEntryIsTree(void * const * table,size_type b)809     static bool TableEntryIsTree(void* const* table, size_type b) {
810       return !TableEntryIsEmpty(table, b) &&
811           !TableEntryIsNonEmptyList(table, b);
812     }
TableEntryIsList(void * const * table,size_type b)813     static bool TableEntryIsList(void* const* table, size_type b) {
814       return !TableEntryIsTree(table, b);
815     }
816 
TreeConvert(size_type b)817     void TreeConvert(size_type b) {
818       GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1));
819       typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);
820       Tree* tree = tree_allocator.allocate(1);
821       // We want to use the three-arg form of construct, if it exists, but we
822       // create a temporary and use the two-arg construct that's known to exist.
823       // It's clunky, but the compiler should be able to generate more-or-less
824       // the same code.
825       tree_allocator.construct(tree,
826                                Tree(KeyCompare(), KeyPtrAllocator(alloc_)));
827       // Now the tree is ready to use.
828       size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree);
829       GOOGLE_DCHECK_EQ(count, tree->size());
830       table_[b] = table_[b ^ 1] = static_cast<void*>(tree);
831     }
832 
833     // Copy a linked list in the given bucket to a tree.
834     // Returns the number of things it copied.
CopyListToTree(size_type b,Tree * tree)835     size_type CopyListToTree(size_type b, Tree* tree) {
836       size_type count = 0;
837       Node* node = static_cast<Node*>(table_[b]);
838       while (node != NULL) {
839         tree->insert(KeyPtrFromNodePtr(node));
840         ++count;
841         Node* next = node->next;
842         node->next = NULL;
843         node = next;
844       }
845       return count;
846     }
847 
848     // Return whether table_[b] is a linked list that seems awfully long.
849     // Requires table_[b] to point to a non-empty linked list.
TableEntryIsTooLong(size_type b)850     bool TableEntryIsTooLong(size_type b) {
851       const size_type kMaxLength = 8;
852       size_type count = 0;
853       Node* node = static_cast<Node*>(table_[b]);
854       do {
855         ++count;
856         node = node->next;
857       } while (node != NULL);
858       // Invariant: no linked list ever is more than kMaxLength in length.
859       GOOGLE_DCHECK_LE(count, kMaxLength);
860       return count >= kMaxLength;
861     }
862 
BucketNumber(const Key & k)863     size_type BucketNumber(const Key& k) const {
864       // We inherit from hasher, so one-arg operator() provides a hash function.
865       size_type h = (*const_cast<InnerMap*>(this))(k);
866       return (h + seed_) & (num_buckets_ - 1);
867     }
868 
IsMatch(const Key & k0,const Key & k1)869     bool IsMatch(const Key& k0, const Key& k1) const {
870       return std::equal_to<Key>()(k0, k1);
871     }
872 
873     // Return a power of two no less than max(kMinTableSize, n).
874     // Assumes either n < kMinTableSize or n is a power of two.
TableSize(size_type n)875     size_type TableSize(size_type n) {
876       return n < static_cast<size_type>(kMinTableSize)
877                  ? static_cast<size_type>(kMinTableSize)
878                  : n;
879     }
880 
881     // Use alloc_ to allocate an array of n objects of type U.
882     template <typename U>
Alloc(size_type n)883     U* Alloc(size_type n) {
884       typedef typename Allocator::template rebind<U>::other alloc_type;
885       return alloc_type(alloc_).allocate(n);
886     }
887 
888     // Use alloc_ to deallocate an array of n objects of type U.
889     template <typename U>
Dealloc(U * t,size_type n)890     void Dealloc(U* t, size_type n) {
891       typedef typename Allocator::template rebind<U>::other alloc_type;
892       alloc_type(alloc_).deallocate(t, n);
893     }
894 
DestroyNode(Node * node)895     void DestroyNode(Node* node) {
896       alloc_.destroy(&node->kv);
897       Dealloc<Node>(node, 1);
898     }
899 
DestroyTree(Tree * tree)900     void DestroyTree(Tree* tree) {
901       typename Allocator::template rebind<Tree>::other tree_allocator(alloc_);
902       tree_allocator.destroy(tree);
903       tree_allocator.deallocate(tree, 1);
904     }
905 
CreateEmptyTable(size_type n)906     void** CreateEmptyTable(size_type n) {
907       GOOGLE_DCHECK(n >= kMinTableSize);
908       GOOGLE_DCHECK_EQ(n & (n - 1), 0);
909       void** result = Alloc<void*>(n);
910       memset(result, 0, n * sizeof(result[0]));
911       return result;
912     }
913 
914     // Return a randomish value.
Seed()915     size_type Seed() const {
916       size_type s = static_cast<size_type>(reinterpret_cast<uintptr_t>(this));
917 #if defined(__x86_64__) && defined(__GNUC__)
918       uint32 hi, lo;
919       asm("rdtsc" : "=a" (lo), "=d" (hi));
920       s += ((static_cast<uint64>(hi) << 32) | lo);
921 #endif
922       return s;
923     }
924 
925     size_type num_elements_;
926     size_type num_buckets_;
927     size_type seed_;
928     size_type index_of_first_non_null_;
929     void** table_;  // an array with num_buckets_ entries
930     Allocator alloc_;
931     GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap);
932   };  // end of class InnerMap
933 
934  public:
935   // Iterators
936   class const_iterator {
937     typedef typename InnerMap::const_iterator InnerIt;
938 
939    public:
940     typedef std::forward_iterator_tag iterator_category;
941     typedef typename Map::value_type value_type;
942     typedef ptrdiff_t difference_type;
943     typedef const value_type* pointer;
944     typedef const value_type& reference;
945 
const_iterator()946     const_iterator() {}
const_iterator(const InnerIt & it)947     explicit const_iterator(const InnerIt& it) : it_(it) {}
948 
949     const_reference operator*() const {
950       return *it_->value();
951     }
952     const_pointer operator->() const { return &(operator*()); }
953 
954     const_iterator& operator++() {
955       ++it_;
956       return *this;
957     }
958     const_iterator operator++(int) { return const_iterator(it_++); }
959 
960     friend bool operator==(const const_iterator& a, const const_iterator& b) {
961       return a.it_ == b.it_;
962     }
963     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
964       return !(a == b);
965     }
966 
967    private:
968     InnerIt it_;
969   };
970 
971   class iterator {
972     typedef typename InnerMap::iterator InnerIt;
973 
974    public:
975     typedef std::forward_iterator_tag iterator_category;
976     typedef typename Map::value_type value_type;
977     typedef ptrdiff_t difference_type;
978     typedef value_type* pointer;
979     typedef value_type& reference;
980 
iterator()981     iterator() {}
iterator(const InnerIt & it)982     explicit iterator(const InnerIt& it) : it_(it) {}
983 
984     reference operator*() const { return *it_->value(); }
985     pointer operator->() const { return &(operator*()); }
986 
987     iterator& operator++() {
988       ++it_;
989       return *this;
990     }
991     iterator operator++(int) { return iterator(it_++); }
992 
993     // Allow implicit conversion to const_iterator.
const_iterator()994     operator const_iterator() const {
995       return const_iterator(typename InnerMap::const_iterator(it_));
996     }
997 
998     friend bool operator==(const iterator& a, const iterator& b) {
999       return a.it_ == b.it_;
1000     }
1001     friend bool operator!=(const iterator& a, const iterator& b) {
1002       return !(a == b);
1003     }
1004 
1005    private:
1006     friend class Map;
1007 
1008     InnerIt it_;
1009   };
1010 
begin()1011   iterator begin() { return iterator(elements_->begin()); }
end()1012   iterator end() { return iterator(elements_->end()); }
begin()1013   const_iterator begin() const {
1014     return const_iterator(iterator(elements_->begin()));
1015   }
end()1016   const_iterator end() const {
1017     return const_iterator(iterator(elements_->end()));
1018   }
cbegin()1019   const_iterator cbegin() const { return begin(); }
cend()1020   const_iterator cend() const { return end(); }
1021 
1022   // Capacity
size()1023   size_type size() const { return elements_->size(); }
empty()1024   bool empty() const { return size() == 0; }
1025 
1026   // Element access
1027   T& operator[](const key_type& key) {
1028     value_type** value =  &(*elements_)[key];
1029     if (*value == NULL) {
1030       *value = CreateValueTypeInternal(key);
1031       internal::MapValueInitializer<google::protobuf::is_proto_enum<T>::value,
1032                                     T>::Initialize((*value)->second,
1033                                                    default_enum_value_);
1034     }
1035     return (*value)->second;
1036   }
at(const key_type & key)1037   const T& at(const key_type& key) const {
1038     const_iterator it = find(key);
1039     GOOGLE_CHECK(it != end());
1040     return it->second;
1041   }
at(const key_type & key)1042   T& at(const key_type& key) {
1043     iterator it = find(key);
1044     GOOGLE_CHECK(it != end());
1045     return it->second;
1046   }
1047 
1048   // Lookup
count(const key_type & key)1049   size_type count(const key_type& key) const {
1050     const_iterator it = find(key);
1051     GOOGLE_DCHECK(it == end() || key == it->first);
1052     return it == end() ? 0 : 1;
1053   }
find(const key_type & key)1054   const_iterator find(const key_type& key) const {
1055     return const_iterator(iterator(elements_->find(key)));
1056   }
find(const key_type & key)1057   iterator find(const key_type& key) { return iterator(elements_->find(key)); }
equal_range(const key_type & key)1058   std::pair<const_iterator, const_iterator> equal_range(
1059       const key_type& key) const {
1060     const_iterator it = find(key);
1061     if (it == end()) {
1062       return std::pair<const_iterator, const_iterator>(it, it);
1063     } else {
1064       const_iterator begin = it++;
1065       return std::pair<const_iterator, const_iterator>(begin, it);
1066     }
1067   }
equal_range(const key_type & key)1068   std::pair<iterator, iterator> equal_range(const key_type& key) {
1069     iterator it = find(key);
1070     if (it == end()) {
1071       return std::pair<iterator, iterator>(it, it);
1072     } else {
1073       iterator begin = it++;
1074       return std::pair<iterator, iterator>(begin, it);
1075     }
1076   }
1077 
1078   // insert
insert(const value_type & value)1079   std::pair<iterator, bool> insert(const value_type& value) {
1080     std::pair<typename InnerMap::iterator, bool> p =
1081         elements_->insert(value.first);
1082     if (p.second) {
1083       p.first->value() = CreateValueTypeInternal(value);
1084     }
1085     return std::pair<iterator, bool>(iterator(p.first), p.second);
1086   }
1087   template <class InputIt>
insert(InputIt first,InputIt last)1088   void insert(InputIt first, InputIt last) {
1089     for (InputIt it = first; it != last; ++it) {
1090       iterator exist_it = find(it->first);
1091       if (exist_it == end()) {
1092         operator[](it->first) = it->second;
1093       }
1094     }
1095   }
1096 
1097   // Erase and clear
erase(const key_type & key)1098   size_type erase(const key_type& key) {
1099     iterator it = find(key);
1100     if (it == end()) {
1101       return 0;
1102     } else {
1103       erase(it);
1104       return 1;
1105     }
1106   }
erase(iterator pos)1107   iterator erase(iterator pos) {
1108     if (arena_ == NULL) delete pos.operator->();
1109     iterator i = pos++;
1110     elements_->erase(i.it_);
1111     return pos;
1112   }
erase(iterator first,iterator last)1113   void erase(iterator first, iterator last) {
1114     while (first != last) {
1115       first = erase(first);
1116     }
1117   }
clear()1118   void clear() { erase(begin(), end()); }
1119 
1120   // Assign
1121   Map& operator=(const Map& other) {
1122     if (this != &other) {
1123       clear();
1124       insert(other.begin(), other.end());
1125     }
1126     return *this;
1127   }
1128 
swap(Map & other)1129   void swap(Map& other) {
1130     if (arena_ == other.arena_) {
1131       std::swap(default_enum_value_, other.default_enum_value_);
1132       std::swap(elements_, other.elements_);
1133     } else {
1134       // TODO(zuguang): optimize this. The temporary copy can be allocated
1135       // in the same arena as the other message, and the "other = copy" can
1136       // be replaced with the fast-path swap above.
1137       Map copy = *this;
1138       *this = other;
1139       other = copy;
1140     }
1141   }
1142 
1143   // Access to hasher.  Currently this returns a copy, but it may
1144   // be modified to return a const reference in the future.
hash_function()1145   hasher hash_function() const {
1146     return elements_->hash_function();
1147   }
1148 
1149  private:
1150   // Set default enum value only for proto2 map field whose value is enum type.
SetDefaultEnumValue(int default_enum_value)1151   void SetDefaultEnumValue(int default_enum_value) {
1152     default_enum_value_ = default_enum_value;
1153   }
1154 
CreateValueTypeInternal(const Key & key)1155   value_type* CreateValueTypeInternal(const Key& key) {
1156     if (arena_ == NULL) {
1157       return new value_type(key);
1158     } else {
1159       value_type* value = reinterpret_cast<value_type*>(
1160           Arena::CreateArray<uint8>(arena_, sizeof(value_type)));
1161       Arena::CreateInArenaStorage(const_cast<Key*>(&value->first), arena_);
1162       Arena::CreateInArenaStorage(&value->second, arena_);
1163       const_cast<Key&>(value->first) = key;
1164       return value;
1165     }
1166   }
1167 
CreateValueTypeInternal(const value_type & value)1168   value_type* CreateValueTypeInternal(const value_type& value) {
1169     if (arena_ == NULL) {
1170       return new value_type(value);
1171     } else {
1172       value_type* p = reinterpret_cast<value_type*>(
1173           Arena::CreateArray<uint8>(arena_, sizeof(value_type)));
1174       Arena::CreateInArenaStorage(const_cast<Key*>(&p->first), arena_);
1175       Arena::CreateInArenaStorage(&p->second, arena_);
1176       const_cast<Key&>(p->first) = value.first;
1177       p->second = value.second;
1178       return p;
1179     }
1180   }
1181 
1182   Arena* arena_;
1183   int default_enum_value_;
1184   InnerMap* elements_;
1185 
1186   friend class ::google::protobuf::Arena;
1187   typedef void InternalArenaConstructable_;
1188   typedef void DestructorSkippable_;
1189   template <typename Derived, typename K, typename V,
1190             internal::WireFormatLite::FieldType key_wire_type,
1191             internal::WireFormatLite::FieldType value_wire_type,
1192             int default_enum_value>
1193   friend class internal::MapFieldLite;
1194 };
1195 
1196 }  // namespace protobuf
1197 
1198 }  // namespace google
1199 #endif  // GOOGLE_PROTOBUF_MAP_H__
1200