1 // Protocol Buffers - Google's data interchange format 2 // Copyright 2008 Google Inc. All rights reserved. 3 // https://developers.google.com/protocol-buffers/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are 7 // met: 8 // 9 // * Redistributions of source code must retain the above copyright 10 // notice, this list of conditions and the following disclaimer. 11 // * Redistributions in binary form must reproduce the above 12 // copyright notice, this list of conditions and the following disclaimer 13 // in the documentation and/or other materials provided with the 14 // distribution. 15 // * Neither the name of Google Inc. nor the names of its 16 // contributors may be used to endorse or promote products derived from 17 // this software without specific prior written permission. 18 // 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 // This file defines the map container and its helpers to support protobuf maps. 32 // 33 // The Map and MapIterator types are provided by this header file. 34 // Please avoid using other types defined here, unless they are public 35 // types within Map or MapIterator, such as Map::value_type. 36 37 #ifndef GOOGLE_PROTOBUF_MAP_H__ 38 #define GOOGLE_PROTOBUF_MAP_H__ 39 40 #include <initializer_list> 41 #include <iterator> 42 #include <limits> // To support Visual Studio 2008 43 #include <set> 44 #include <utility> 45 46 #include <google/protobuf/stubs/common.h> 47 #include <google/protobuf/arena.h> 48 #include <google/protobuf/generated_enum_util.h> 49 #include <google/protobuf/map_type_handler.h> 50 #include <google/protobuf/stubs/hash.h> 51 52 #ifdef SWIG 53 #error "You cannot SWIG proto headers" 54 #endif 55 56 #include <google/protobuf/port_def.inc> 57 58 namespace google { 59 namespace protobuf { 60 61 template <typename Key, typename T> 62 class Map; 63 64 class MapIterator; 65 66 template <typename Enum> 67 struct is_proto_enum; 68 69 namespace internal { 70 template <typename Derived, typename Key, typename T, 71 WireFormatLite::FieldType key_wire_type, 72 WireFormatLite::FieldType value_wire_type, int default_enum_value> 73 class MapFieldLite; 74 75 template <typename Derived, typename Key, typename T, 76 WireFormatLite::FieldType key_wire_type, 77 WireFormatLite::FieldType value_wire_type, int default_enum_value> 78 class MapField; 79 80 template <typename Key, typename T> 81 class TypeDefinedMapFieldBase; 82 83 class DynamicMapField; 84 85 class GeneratedMessageReflection; 86 } // namespace internal 87 88 // This is the class for Map's internal value_type. Instead of using 89 // std::pair as value_type, we use this class which provides us more control of 90 // its process of construction and destruction. 91 template <typename Key, typename T> 92 class MapPair { 93 public: 94 typedef const Key first_type; 95 typedef T second_type; 96 MapPair(const Key & other_first,const T & other_second)97 MapPair(const Key& other_first, const T& other_second) 98 : first(other_first), second(other_second) {} MapPair(const Key & other_first)99 explicit MapPair(const Key& other_first) : first(other_first), second() {} MapPair(const MapPair & other)100 MapPair(const MapPair& other) : first(other.first), second(other.second) {} 101 ~MapPair()102 ~MapPair() {} 103 104 // Implicitly convertible to std::pair of compatible types. 105 template <typename T1, typename T2> 106 operator std::pair<T1, T2>() const { 107 return std::pair<T1, T2>(first, second); 108 } 109 110 const Key first; 111 T second; 112 113 private: 114 friend class Arena; 115 friend class Map<Key, T>; 116 }; 117 118 // Map is an associative container type used to store protobuf map 119 // fields. Each Map instance may or may not use a different hash function, a 120 // different iteration order, and so on. E.g., please don't examine 121 // implementation details to decide if the following would work: 122 // Map<int, int> m0, m1; 123 // m0[0] = m1[0] = m0[1] = m1[1] = 0; 124 // assert(m0.begin()->first == m1.begin()->first); // Bug! 125 // 126 // Map's interface is similar to std::unordered_map, except that Map is not 127 // designed to play well with exceptions. 128 template <typename Key, typename T> 129 class Map { 130 public: 131 typedef Key key_type; 132 typedef T mapped_type; 133 typedef MapPair<Key, T> value_type; 134 135 typedef value_type* pointer; 136 typedef const value_type* const_pointer; 137 typedef value_type& reference; 138 typedef const value_type& const_reference; 139 140 typedef size_t size_type; 141 typedef hash<Key> hasher; 142 Map()143 Map() : arena_(NULL), default_enum_value_(0) { Init(); } Map(Arena * arena)144 explicit Map(Arena* arena) : arena_(arena), default_enum_value_(0) { Init(); } 145 Map(const Map & other)146 Map(const Map& other) 147 : arena_(NULL), default_enum_value_(other.default_enum_value_) { 148 Init(); 149 insert(other.begin(), other.end()); 150 } 151 Map(Map && other)152 Map(Map&& other) noexcept : Map() { 153 if (other.arena_) { 154 *this = other; 155 } else { 156 swap(other); 157 } 158 } 159 Map& operator=(Map&& other) noexcept { 160 if (this != &other) { 161 if (arena_ != other.arena_) { 162 *this = other; 163 } else { 164 swap(other); 165 } 166 } 167 return *this; 168 } 169 170 template <class InputIt> Map(const InputIt & first,const InputIt & last)171 Map(const InputIt& first, const InputIt& last) 172 : arena_(NULL), default_enum_value_(0) { 173 Init(); 174 insert(first, last); 175 } 176 ~Map()177 ~Map() { 178 clear(); 179 if (arena_ == NULL) { 180 delete elements_; 181 } 182 } 183 184 private: Init()185 void Init() { 186 elements_ = 187 Arena::Create<InnerMap>(arena_, 0u, hasher(), Allocator(arena_)); 188 } 189 190 // re-implement std::allocator to use arena allocator for memory allocation. 191 // Used for Map implementation. Users should not use this class 192 // directly. 193 template <typename U> 194 class MapAllocator { 195 public: 196 typedef U value_type; 197 typedef value_type* pointer; 198 typedef const value_type* const_pointer; 199 typedef value_type& reference; 200 typedef const value_type& const_reference; 201 typedef size_t size_type; 202 typedef ptrdiff_t difference_type; 203 MapAllocator()204 MapAllocator() : arena_(NULL) {} MapAllocator(Arena * arena)205 explicit MapAllocator(Arena* arena) : arena_(arena) {} 206 template <typename X> MapAllocator(const MapAllocator<X> & allocator)207 MapAllocator(const MapAllocator<X>& allocator) 208 : arena_(allocator.arena()) {} 209 210 pointer allocate(size_type n, const void* /* hint */ = 0) { 211 // If arena is not given, malloc needs to be called which doesn't 212 // construct element object. 213 if (arena_ == NULL) { 214 return static_cast<pointer>(::operator new(n * sizeof(value_type))); 215 } else { 216 return reinterpret_cast<pointer>( 217 Arena::CreateArray<uint8>(arena_, n * sizeof(value_type))); 218 } 219 } 220 deallocate(pointer p,size_type n)221 void deallocate(pointer p, size_type n) { 222 if (arena_ == NULL) { 223 #if defined(__GXX_DELETE_WITH_SIZE__) || defined(__cpp_sized_deallocation) 224 ::operator delete(p, n * sizeof(value_type)); 225 #else 226 (void)n; 227 ::operator delete(p); 228 #endif 229 } 230 } 231 232 #if __cplusplus >= 201103L && !defined(GOOGLE_PROTOBUF_OS_APPLE) && \ 233 !defined(GOOGLE_PROTOBUF_OS_NACL) && \ 234 !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN) 235 template <class NodeType, class... Args> construct(NodeType * p,Args &&...args)236 void construct(NodeType* p, Args&&... args) { 237 // Clang 3.6 doesn't compile static casting to void* directly. (Issue 238 // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall 239 // not cast away constness". So first the maybe const pointer is casted to 240 // const void* and after the const void* is const casted. 241 new (const_cast<void*>(static_cast<const void*>(p))) 242 NodeType(std::forward<Args>(args)...); 243 } 244 245 template <class NodeType> destroy(NodeType * p)246 void destroy(NodeType* p) { 247 p->~NodeType(); 248 } 249 #else construct(pointer p,const_reference t)250 void construct(pointer p, const_reference t) { new (p) value_type(t); } 251 destroy(pointer p)252 void destroy(pointer p) { p->~value_type(); } 253 #endif 254 255 template <typename X> 256 struct rebind { 257 typedef MapAllocator<X> other; 258 }; 259 260 template <typename X> 261 bool operator==(const MapAllocator<X>& other) const { 262 return arena_ == other.arena_; 263 } 264 265 template <typename X> 266 bool operator!=(const MapAllocator<X>& other) const { 267 return arena_ != other.arena_; 268 } 269 270 // To support Visual Studio 2008 max_size()271 size_type max_size() const { 272 // parentheses around (std::...:max) prevents macro warning of max() 273 return (std::numeric_limits<size_type>::max)(); 274 } 275 276 // To support gcc-4.4, which does not properly 277 // support templated friend classes arena()278 Arena* arena() const { return arena_; } 279 280 private: 281 typedef void DestructorSkippable_; 282 Arena* const arena_; 283 }; 284 285 // InnerMap's key type is Key and its value type is value_type*. We use a 286 // custom class here and for Node, below, to ensure that k_ is at offset 0, 287 // allowing safe conversion from pointer to Node to pointer to Key, and vice 288 // versa when appropriate. 289 class KeyValuePair { 290 public: KeyValuePair(const Key & k,value_type * v)291 KeyValuePair(const Key& k, value_type* v) : k_(k), v_(v) {} 292 key()293 const Key& key() const { return k_; } key()294 Key& key() { return k_; } value()295 value_type* value() const { return v_; } value()296 value_type*& value() { return v_; } 297 298 private: 299 Key k_; 300 value_type* v_; 301 }; 302 303 typedef MapAllocator<KeyValuePair> Allocator; 304 305 // InnerMap is a generic hash-based map. It doesn't contain any 306 // protocol-buffer-specific logic. It is a chaining hash map with the 307 // additional feature that some buckets can be converted to use an ordered 308 // container. This ensures O(lg n) bounds on find, insert, and erase, while 309 // avoiding the overheads of ordered containers most of the time. 310 // 311 // The implementation doesn't need the full generality of unordered_map, 312 // and it doesn't have it. More bells and whistles can be added as needed. 313 // Some implementation details: 314 // 1. The hash function has type hasher and the equality function 315 // equal_to<Key>. We inherit from hasher to save space 316 // (empty-base-class optimization). 317 // 2. The number of buckets is a power of two. 318 // 3. Buckets are converted to trees in pairs: if we convert bucket b then 319 // buckets b and b^1 will share a tree. Invariant: buckets b and b^1 have 320 // the same non-NULL value iff they are sharing a tree. (An alternative 321 // implementation strategy would be to have a tag bit per bucket.) 322 // 4. As is typical for hash_map and such, the Keys and Values are always 323 // stored in linked list nodes. Pointers to elements are never invalidated 324 // until the element is deleted. 325 // 5. The trees' payload type is pointer to linked-list node. Tree-converting 326 // a bucket doesn't copy Key-Value pairs. 327 // 6. Once we've tree-converted a bucket, it is never converted back. However, 328 // the items a tree contains may wind up assigned to trees or lists upon a 329 // rehash. 330 // 7. The code requires no C++ features from C++11 or later. 331 // 8. Mutations to a map do not invalidate the map's iterators, pointers to 332 // elements, or references to elements. 333 // 9. Except for erase(iterator), any non-const method can reorder iterators. 334 class InnerMap : private hasher { 335 public: 336 typedef value_type* Value; 337 InnerMap(size_type n,hasher h,Allocator alloc)338 InnerMap(size_type n, hasher h, Allocator alloc) 339 : hasher(h), 340 num_elements_(0), 341 seed_(Seed()), 342 table_(NULL), 343 alloc_(alloc) { 344 n = TableSize(n); 345 table_ = CreateEmptyTable(n); 346 num_buckets_ = index_of_first_non_null_ = n; 347 } 348 ~InnerMap()349 ~InnerMap() { 350 if (table_ != NULL) { 351 clear(); 352 Dealloc<void*>(table_, num_buckets_); 353 } 354 } 355 356 private: 357 enum { kMinTableSize = 8 }; 358 359 // Linked-list nodes, as one would expect for a chaining hash table. 360 struct Node { 361 KeyValuePair kv; 362 Node* next; 363 }; 364 365 // This is safe only if the given pointer is known to point to a Key that is 366 // part of a Node. NodePtrFromKeyPtr(Key * k)367 static Node* NodePtrFromKeyPtr(Key* k) { 368 return reinterpret_cast<Node*>(k); 369 } 370 KeyPtrFromNodePtr(Node * node)371 static Key* KeyPtrFromNodePtr(Node* node) { return &node->kv.key(); } 372 373 // Trees. The payload type is pointer to Key, so that we can query the tree 374 // with Keys that are not in any particular data structure. When we insert, 375 // though, the pointer is always pointing to a Key that is inside a Node. 376 struct KeyCompare { operatorKeyCompare377 bool operator()(const Key* n0, const Key* n1) const { return *n0 < *n1; } 378 }; 379 typedef typename Allocator::template rebind<Key*>::other KeyPtrAllocator; 380 typedef std::set<Key*, KeyCompare, KeyPtrAllocator> Tree; 381 typedef typename Tree::iterator TreeIterator; 382 383 // iterator and const_iterator are instantiations of iterator_base. 384 template <typename KeyValueType> 385 struct iterator_base { 386 typedef KeyValueType& reference; 387 typedef KeyValueType* pointer; 388 389 // Invariants: 390 // node_ is always correct. This is handy because the most common 391 // operations are operator* and operator-> and they only use node_. 392 // When node_ is set to a non-NULL value, all the other non-const fields 393 // are updated to be correct also, but those fields can become stale 394 // if the underlying map is modified. When those fields are needed they 395 // are rechecked, and updated if necessary. iterator_baseiterator_base396 iterator_base() : node_(NULL), m_(NULL), bucket_index_(0) {} 397 iterator_baseiterator_base398 explicit iterator_base(const InnerMap* m) : m_(m) { 399 SearchFrom(m->index_of_first_non_null_); 400 } 401 402 // Any iterator_base can convert to any other. This is overkill, and we 403 // rely on the enclosing class to use it wisely. The standard "iterator 404 // can convert to const_iterator" is OK but the reverse direction is not. 405 template <typename U> iterator_baseiterator_base406 explicit iterator_base(const iterator_base<U>& it) 407 : node_(it.node_), m_(it.m_), bucket_index_(it.bucket_index_) {} 408 iterator_baseiterator_base409 iterator_base(Node* n, const InnerMap* m, size_type index) 410 : node_(n), m_(m), bucket_index_(index) {} 411 iterator_baseiterator_base412 iterator_base(TreeIterator tree_it, const InnerMap* m, size_type index) 413 : node_(NodePtrFromKeyPtr(*tree_it)), m_(m), bucket_index_(index) { 414 // Invariant: iterators that use buckets with trees have an even 415 // bucket_index_. 416 GOOGLE_DCHECK_EQ(bucket_index_ % 2, 0u); 417 } 418 419 // Advance through buckets, looking for the first that isn't empty. 420 // If nothing non-empty is found then leave node_ == NULL. SearchFromiterator_base421 void SearchFrom(size_type start_bucket) { 422 GOOGLE_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ || 423 m_->table_[m_->index_of_first_non_null_] != NULL); 424 node_ = NULL; 425 for (bucket_index_ = start_bucket; bucket_index_ < m_->num_buckets_; 426 bucket_index_++) { 427 if (m_->TableEntryIsNonEmptyList(bucket_index_)) { 428 node_ = static_cast<Node*>(m_->table_[bucket_index_]); 429 break; 430 } else if (m_->TableEntryIsTree(bucket_index_)) { 431 Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]); 432 GOOGLE_DCHECK(!tree->empty()); 433 node_ = NodePtrFromKeyPtr(*tree->begin()); 434 break; 435 } 436 } 437 } 438 439 reference operator*() const { return node_->kv; } 440 pointer operator->() const { return &(operator*()); } 441 442 friend bool operator==(const iterator_base& a, const iterator_base& b) { 443 return a.node_ == b.node_; 444 } 445 friend bool operator!=(const iterator_base& a, const iterator_base& b) { 446 return a.node_ != b.node_; 447 } 448 449 iterator_base& operator++() { 450 if (node_->next == NULL) { 451 TreeIterator tree_it; 452 const bool is_list = revalidate_if_necessary(&tree_it); 453 if (is_list) { 454 SearchFrom(bucket_index_ + 1); 455 } else { 456 GOOGLE_DCHECK_EQ(bucket_index_ & 1, 0u); 457 Tree* tree = static_cast<Tree*>(m_->table_[bucket_index_]); 458 if (++tree_it == tree->end()) { 459 SearchFrom(bucket_index_ + 2); 460 } else { 461 node_ = NodePtrFromKeyPtr(*tree_it); 462 } 463 } 464 } else { 465 node_ = node_->next; 466 } 467 return *this; 468 } 469 470 iterator_base operator++(int /* unused */) { 471 iterator_base tmp = *this; 472 ++*this; 473 return tmp; 474 } 475 476 // Assumes node_ and m_ are correct and non-NULL, but other fields may be 477 // stale. Fix them as needed. Then return true iff node_ points to a 478 // Node in a list. If false is returned then *it is modified to be 479 // a valid iterator for node_. revalidate_if_necessaryiterator_base480 bool revalidate_if_necessary(TreeIterator* it) { 481 GOOGLE_DCHECK(node_ != NULL && m_ != NULL); 482 // Force bucket_index_ to be in range. 483 bucket_index_ &= (m_->num_buckets_ - 1); 484 // Common case: the bucket we think is relevant points to node_. 485 if (m_->table_[bucket_index_] == static_cast<void*>(node_)) return true; 486 // Less common: the bucket is a linked list with node_ somewhere in it, 487 // but not at the head. 488 if (m_->TableEntryIsNonEmptyList(bucket_index_)) { 489 Node* l = static_cast<Node*>(m_->table_[bucket_index_]); 490 while ((l = l->next) != NULL) { 491 if (l == node_) { 492 return true; 493 } 494 } 495 } 496 // Well, bucket_index_ still might be correct, but probably 497 // not. Revalidate just to be sure. This case is rare enough that we 498 // don't worry about potential optimizations, such as having a custom 499 // find-like method that compares Node* instead of const Key&. 500 iterator_base i(m_->find(*KeyPtrFromNodePtr(node_), it)); 501 bucket_index_ = i.bucket_index_; 502 return m_->TableEntryIsList(bucket_index_); 503 } 504 505 Node* node_; 506 const InnerMap* m_; 507 size_type bucket_index_; 508 }; 509 510 public: 511 typedef iterator_base<KeyValuePair> iterator; 512 typedef iterator_base<const KeyValuePair> const_iterator; 513 begin()514 iterator begin() { return iterator(this); } end()515 iterator end() { return iterator(); } begin()516 const_iterator begin() const { return const_iterator(this); } end()517 const_iterator end() const { return const_iterator(); } 518 clear()519 void clear() { 520 for (size_type b = 0; b < num_buckets_; b++) { 521 if (TableEntryIsNonEmptyList(b)) { 522 Node* node = static_cast<Node*>(table_[b]); 523 table_[b] = NULL; 524 do { 525 Node* next = node->next; 526 DestroyNode(node); 527 node = next; 528 } while (node != NULL); 529 } else if (TableEntryIsTree(b)) { 530 Tree* tree = static_cast<Tree*>(table_[b]); 531 GOOGLE_DCHECK(table_[b] == table_[b + 1] && (b & 1) == 0); 532 table_[b] = table_[b + 1] = NULL; 533 typename Tree::iterator tree_it = tree->begin(); 534 do { 535 Node* node = NodePtrFromKeyPtr(*tree_it); 536 typename Tree::iterator next = tree_it; 537 ++next; 538 tree->erase(tree_it); 539 DestroyNode(node); 540 tree_it = next; 541 } while (tree_it != tree->end()); 542 DestroyTree(tree); 543 b++; 544 } 545 } 546 num_elements_ = 0; 547 index_of_first_non_null_ = num_buckets_; 548 } 549 hash_function()550 const hasher& hash_function() const { return *this; } 551 max_size()552 static size_type max_size() { 553 return static_cast<size_type>(1) << (sizeof(void**) >= 8 ? 60 : 28); 554 } size()555 size_type size() const { return num_elements_; } empty()556 bool empty() const { return size() == 0; } 557 find(const Key & k)558 iterator find(const Key& k) { return iterator(FindHelper(k).first); } find(const Key & k)559 const_iterator find(const Key& k) const { return find(k, NULL); } contains(const Key & k)560 bool contains(const Key& k) const { return find(k) != end(); } 561 562 // In traditional C++ style, this performs "insert if not present." insert(const KeyValuePair & kv)563 std::pair<iterator, bool> insert(const KeyValuePair& kv) { 564 std::pair<const_iterator, size_type> p = FindHelper(kv.key()); 565 // Case 1: key was already present. 566 if (p.first.node_ != NULL) 567 return std::make_pair(iterator(p.first), false); 568 // Case 2: insert. 569 if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) { 570 p = FindHelper(kv.key()); 571 } 572 const size_type b = p.second; // bucket number 573 Node* node = Alloc<Node>(1); 574 alloc_.construct(&node->kv, kv); 575 iterator result = InsertUnique(b, node); 576 ++num_elements_; 577 return std::make_pair(result, true); 578 } 579 580 // The same, but if an insertion is necessary then the value portion of the 581 // inserted key-value pair is left uninitialized. insert(const Key & k)582 std::pair<iterator, bool> insert(const Key& k) { 583 std::pair<const_iterator, size_type> p = FindHelper(k); 584 // Case 1: key was already present. 585 if (p.first.node_ != NULL) 586 return std::make_pair(iterator(p.first), false); 587 // Case 2: insert. 588 if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) { 589 p = FindHelper(k); 590 } 591 const size_type b = p.second; // bucket number 592 Node* node = Alloc<Node>(1); 593 typedef typename Allocator::template rebind<Key>::other KeyAllocator; 594 KeyAllocator(alloc_).construct(&node->kv.key(), k); 595 iterator result = InsertUnique(b, node); 596 ++num_elements_; 597 return std::make_pair(result, true); 598 } 599 600 Value& operator[](const Key& k) { 601 KeyValuePair kv(k, Value()); 602 return insert(kv).first->value(); 603 } 604 erase(iterator it)605 void erase(iterator it) { 606 GOOGLE_DCHECK_EQ(it.m_, this); 607 typename Tree::iterator tree_it; 608 const bool is_list = it.revalidate_if_necessary(&tree_it); 609 size_type b = it.bucket_index_; 610 Node* const item = it.node_; 611 if (is_list) { 612 GOOGLE_DCHECK(TableEntryIsNonEmptyList(b)); 613 Node* head = static_cast<Node*>(table_[b]); 614 head = EraseFromLinkedList(item, head); 615 table_[b] = static_cast<void*>(head); 616 } else { 617 GOOGLE_DCHECK(TableEntryIsTree(b)); 618 Tree* tree = static_cast<Tree*>(table_[b]); 619 tree->erase(*tree_it); 620 if (tree->empty()) { 621 // Force b to be the minimum of b and b ^ 1. This is important 622 // only because we want index_of_first_non_null_ to be correct. 623 b &= ~static_cast<size_type>(1); 624 DestroyTree(tree); 625 table_[b] = table_[b + 1] = NULL; 626 } 627 } 628 DestroyNode(item); 629 --num_elements_; 630 if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) { 631 while (index_of_first_non_null_ < num_buckets_ && 632 table_[index_of_first_non_null_] == NULL) { 633 ++index_of_first_non_null_; 634 } 635 } 636 } 637 638 private: find(const Key & k,TreeIterator * it)639 const_iterator find(const Key& k, TreeIterator* it) const { 640 return FindHelper(k, it).first; 641 } FindHelper(const Key & k)642 std::pair<const_iterator, size_type> FindHelper(const Key& k) const { 643 return FindHelper(k, NULL); 644 } FindHelper(const Key & k,TreeIterator * it)645 std::pair<const_iterator, size_type> FindHelper(const Key& k, 646 TreeIterator* it) const { 647 size_type b = BucketNumber(k); 648 if (TableEntryIsNonEmptyList(b)) { 649 Node* node = static_cast<Node*>(table_[b]); 650 do { 651 if (IsMatch(*KeyPtrFromNodePtr(node), k)) { 652 return std::make_pair(const_iterator(node, this, b), b); 653 } else { 654 node = node->next; 655 } 656 } while (node != NULL); 657 } else if (TableEntryIsTree(b)) { 658 GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); 659 b &= ~static_cast<size_t>(1); 660 Tree* tree = static_cast<Tree*>(table_[b]); 661 Key* key = const_cast<Key*>(&k); 662 typename Tree::iterator tree_it = tree->find(key); 663 if (tree_it != tree->end()) { 664 if (it != NULL) *it = tree_it; 665 return std::make_pair(const_iterator(tree_it, this, b), b); 666 } 667 } 668 return std::make_pair(end(), b); 669 } 670 671 // Insert the given Node in bucket b. If that would make bucket b too big, 672 // and bucket b is not a tree, create a tree for buckets b and b^1 to share. 673 // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct 674 // bucket. num_elements_ is not modified. InsertUnique(size_type b,Node * node)675 iterator InsertUnique(size_type b, Node* node) { 676 GOOGLE_DCHECK(index_of_first_non_null_ == num_buckets_ || 677 table_[index_of_first_non_null_] != NULL); 678 // In practice, the code that led to this point may have already 679 // determined whether we are inserting into an empty list, a short list, 680 // or whatever. But it's probably cheap enough to recompute that here; 681 // it's likely that we're inserting into an empty or short list. 682 iterator result; 683 GOOGLE_DCHECK(find(*KeyPtrFromNodePtr(node)) == end()); 684 if (TableEntryIsEmpty(b)) { 685 result = InsertUniqueInList(b, node); 686 } else if (TableEntryIsNonEmptyList(b)) { 687 if (PROTOBUF_PREDICT_FALSE(TableEntryIsTooLong(b))) { 688 TreeConvert(b); 689 result = InsertUniqueInTree(b, node); 690 GOOGLE_DCHECK_EQ(result.bucket_index_, b & ~static_cast<size_type>(1)); 691 } else { 692 // Insert into a pre-existing list. This case cannot modify 693 // index_of_first_non_null_, so we skip the code to update it. 694 return InsertUniqueInList(b, node); 695 } 696 } else { 697 // Insert into a pre-existing tree. This case cannot modify 698 // index_of_first_non_null_, so we skip the code to update it. 699 return InsertUniqueInTree(b, node); 700 } 701 // parentheses around (std::min) prevents macro expansion of min(...) 702 index_of_first_non_null_ = 703 (std::min)(index_of_first_non_null_, result.bucket_index_); 704 return result; 705 } 706 707 // Helper for InsertUnique. Handles the case where bucket b is a 708 // not-too-long linked list. InsertUniqueInList(size_type b,Node * node)709 iterator InsertUniqueInList(size_type b, Node* node) { 710 node->next = static_cast<Node*>(table_[b]); 711 table_[b] = static_cast<void*>(node); 712 return iterator(node, this, b); 713 } 714 715 // Helper for InsertUnique. Handles the case where bucket b points to a 716 // Tree. InsertUniqueInTree(size_type b,Node * node)717 iterator InsertUniqueInTree(size_type b, Node* node) { 718 GOOGLE_DCHECK_EQ(table_[b], table_[b ^ 1]); 719 // Maintain the invariant that node->next is NULL for all Nodes in Trees. 720 node->next = NULL; 721 return iterator( 722 static_cast<Tree*>(table_[b])->insert(KeyPtrFromNodePtr(node)).first, 723 this, b & ~static_cast<size_t>(1)); 724 } 725 726 // Returns whether it did resize. Currently this is only used when 727 // num_elements_ increases, though it could be used in other situations. 728 // It checks for load too low as well as load too high: because any number 729 // of erases can occur between inserts, the load could be as low as 0 here. 730 // Resizing to a lower size is not always helpful, but failing to do so can 731 // destroy the expected big-O bounds for some operations. By having the 732 // policy that sometimes we resize down as well as up, clients can easily 733 // keep O(size()) = O(number of buckets) if they want that. ResizeIfLoadIsOutOfRange(size_type new_size)734 bool ResizeIfLoadIsOutOfRange(size_type new_size) { 735 const size_type kMaxMapLoadTimes16 = 12; // controls RAM vs CPU tradeoff 736 const size_type hi_cutoff = num_buckets_ * kMaxMapLoadTimes16 / 16; 737 const size_type lo_cutoff = hi_cutoff / 4; 738 // We don't care how many elements are in trees. If a lot are, 739 // we may resize even though there are many empty buckets. In 740 // practice, this seems fine. 741 if (PROTOBUF_PREDICT_FALSE(new_size >= hi_cutoff)) { 742 if (num_buckets_ <= max_size() / 2) { 743 Resize(num_buckets_ * 2); 744 return true; 745 } 746 } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff && 747 num_buckets_ > kMinTableSize)) { 748 size_type lg2_of_size_reduction_factor = 1; 749 // It's possible we want to shrink a lot here... size() could even be 0. 750 // So, estimate how much to shrink by making sure we don't shrink so 751 // much that we would need to grow the table after a few inserts. 752 const size_type hypothetical_size = new_size * 5 / 4 + 1; 753 while ((hypothetical_size << lg2_of_size_reduction_factor) < 754 hi_cutoff) { 755 ++lg2_of_size_reduction_factor; 756 } 757 size_type new_num_buckets = std::max<size_type>( 758 kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor); 759 if (new_num_buckets != num_buckets_) { 760 Resize(new_num_buckets); 761 return true; 762 } 763 } 764 return false; 765 } 766 767 // Resize to the given number of buckets. Resize(size_t new_num_buckets)768 void Resize(size_t new_num_buckets) { 769 GOOGLE_DCHECK_GE(new_num_buckets, kMinTableSize); 770 void** const old_table = table_; 771 const size_type old_table_size = num_buckets_; 772 num_buckets_ = new_num_buckets; 773 table_ = CreateEmptyTable(num_buckets_); 774 const size_type start = index_of_first_non_null_; 775 index_of_first_non_null_ = num_buckets_; 776 for (size_type i = start; i < old_table_size; i++) { 777 if (TableEntryIsNonEmptyList(old_table, i)) { 778 TransferList(old_table, i); 779 } else if (TableEntryIsTree(old_table, i)) { 780 TransferTree(old_table, i++); 781 } 782 } 783 Dealloc<void*>(old_table, old_table_size); 784 } 785 TransferList(void * const * table,size_type index)786 void TransferList(void* const* table, size_type index) { 787 Node* node = static_cast<Node*>(table[index]); 788 do { 789 Node* next = node->next; 790 InsertUnique(BucketNumber(*KeyPtrFromNodePtr(node)), node); 791 node = next; 792 } while (node != NULL); 793 } 794 TransferTree(void * const * table,size_type index)795 void TransferTree(void* const* table, size_type index) { 796 Tree* tree = static_cast<Tree*>(table[index]); 797 typename Tree::iterator tree_it = tree->begin(); 798 do { 799 Node* node = NodePtrFromKeyPtr(*tree_it); 800 InsertUnique(BucketNumber(**tree_it), node); 801 } while (++tree_it != tree->end()); 802 DestroyTree(tree); 803 } 804 EraseFromLinkedList(Node * item,Node * head)805 Node* EraseFromLinkedList(Node* item, Node* head) { 806 if (head == item) { 807 return head->next; 808 } else { 809 head->next = EraseFromLinkedList(item, head->next); 810 return head; 811 } 812 } 813 TableEntryIsEmpty(size_type b)814 bool TableEntryIsEmpty(size_type b) const { 815 return TableEntryIsEmpty(table_, b); 816 } TableEntryIsNonEmptyList(size_type b)817 bool TableEntryIsNonEmptyList(size_type b) const { 818 return TableEntryIsNonEmptyList(table_, b); 819 } TableEntryIsTree(size_type b)820 bool TableEntryIsTree(size_type b) const { 821 return TableEntryIsTree(table_, b); 822 } TableEntryIsList(size_type b)823 bool TableEntryIsList(size_type b) const { 824 return TableEntryIsList(table_, b); 825 } TableEntryIsEmpty(void * const * table,size_type b)826 static bool TableEntryIsEmpty(void* const* table, size_type b) { 827 return table[b] == NULL; 828 } TableEntryIsNonEmptyList(void * const * table,size_type b)829 static bool TableEntryIsNonEmptyList(void* const* table, size_type b) { 830 return table[b] != NULL && table[b] != table[b ^ 1]; 831 } TableEntryIsTree(void * const * table,size_type b)832 static bool TableEntryIsTree(void* const* table, size_type b) { 833 return !TableEntryIsEmpty(table, b) && 834 !TableEntryIsNonEmptyList(table, b); 835 } TableEntryIsList(void * const * table,size_type b)836 static bool TableEntryIsList(void* const* table, size_type b) { 837 return !TableEntryIsTree(table, b); 838 } 839 TreeConvert(size_type b)840 void TreeConvert(size_type b) { 841 GOOGLE_DCHECK(!TableEntryIsTree(b) && !TableEntryIsTree(b ^ 1)); 842 typename Allocator::template rebind<Tree>::other tree_allocator(alloc_); 843 Tree* tree = tree_allocator.allocate(1); 844 // We want to use the three-arg form of construct, if it exists, but we 845 // create a temporary and use the two-arg construct that's known to exist. 846 // It's clunky, but the compiler should be able to generate more-or-less 847 // the same code. 848 tree_allocator.construct(tree, 849 Tree(KeyCompare(), KeyPtrAllocator(alloc_))); 850 // Now the tree is ready to use. 851 size_type count = CopyListToTree(b, tree) + CopyListToTree(b ^ 1, tree); 852 GOOGLE_DCHECK_EQ(count, tree->size()); 853 table_[b] = table_[b ^ 1] = static_cast<void*>(tree); 854 } 855 856 // Copy a linked list in the given bucket to a tree. 857 // Returns the number of things it copied. CopyListToTree(size_type b,Tree * tree)858 size_type CopyListToTree(size_type b, Tree* tree) { 859 size_type count = 0; 860 Node* node = static_cast<Node*>(table_[b]); 861 while (node != NULL) { 862 tree->insert(KeyPtrFromNodePtr(node)); 863 ++count; 864 Node* next = node->next; 865 node->next = NULL; 866 node = next; 867 } 868 return count; 869 } 870 871 // Return whether table_[b] is a linked list that seems awfully long. 872 // Requires table_[b] to point to a non-empty linked list. TableEntryIsTooLong(size_type b)873 bool TableEntryIsTooLong(size_type b) { 874 const size_type kMaxLength = 8; 875 size_type count = 0; 876 Node* node = static_cast<Node*>(table_[b]); 877 do { 878 ++count; 879 node = node->next; 880 } while (node != NULL); 881 // Invariant: no linked list ever is more than kMaxLength in length. 882 GOOGLE_DCHECK_LE(count, kMaxLength); 883 return count >= kMaxLength; 884 } 885 BucketNumber(const Key & k)886 size_type BucketNumber(const Key& k) const { 887 // We inherit from hasher, so one-arg operator() provides a hash function. 888 size_type h = (*const_cast<InnerMap*>(this))(k); 889 return (h + seed_) & (num_buckets_ - 1); 890 } 891 IsMatch(const Key & k0,const Key & k1)892 bool IsMatch(const Key& k0, const Key& k1) const { 893 return std::equal_to<Key>()(k0, k1); 894 } 895 896 // Return a power of two no less than max(kMinTableSize, n). 897 // Assumes either n < kMinTableSize or n is a power of two. TableSize(size_type n)898 size_type TableSize(size_type n) { 899 return n < static_cast<size_type>(kMinTableSize) 900 ? static_cast<size_type>(kMinTableSize) 901 : n; 902 } 903 904 // Use alloc_ to allocate an array of n objects of type U. 905 template <typename U> Alloc(size_type n)906 U* Alloc(size_type n) { 907 typedef typename Allocator::template rebind<U>::other alloc_type; 908 return alloc_type(alloc_).allocate(n); 909 } 910 911 // Use alloc_ to deallocate an array of n objects of type U. 912 template <typename U> Dealloc(U * t,size_type n)913 void Dealloc(U* t, size_type n) { 914 typedef typename Allocator::template rebind<U>::other alloc_type; 915 alloc_type(alloc_).deallocate(t, n); 916 } 917 DestroyNode(Node * node)918 void DestroyNode(Node* node) { 919 alloc_.destroy(&node->kv); 920 Dealloc<Node>(node, 1); 921 } 922 DestroyTree(Tree * tree)923 void DestroyTree(Tree* tree) { 924 typename Allocator::template rebind<Tree>::other tree_allocator(alloc_); 925 tree_allocator.destroy(tree); 926 tree_allocator.deallocate(tree, 1); 927 } 928 CreateEmptyTable(size_type n)929 void** CreateEmptyTable(size_type n) { 930 GOOGLE_DCHECK(n >= kMinTableSize); 931 GOOGLE_DCHECK_EQ(n & (n - 1), 0); 932 void** result = Alloc<void*>(n); 933 memset(result, 0, n * sizeof(result[0])); 934 return result; 935 } 936 937 // Return a randomish value. Seed()938 size_type Seed() const { 939 size_type s = static_cast<size_type>(reinterpret_cast<uintptr_t>(this)); 940 #if defined(__x86_64__) && defined(__GNUC__) && \ 941 !defined(GOOGLE_PROTOBUF_NO_RDTSC) 942 uint32 hi, lo; 943 asm("rdtsc" : "=a"(lo), "=d"(hi)); 944 s += ((static_cast<uint64>(hi) << 32) | lo); 945 #endif 946 return s; 947 } 948 949 size_type num_elements_; 950 size_type num_buckets_; 951 size_type seed_; 952 size_type index_of_first_non_null_; 953 void** table_; // an array with num_buckets_ entries 954 Allocator alloc_; 955 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(InnerMap); 956 }; // end of class InnerMap 957 958 public: 959 // Iterators 960 class const_iterator { 961 typedef typename InnerMap::const_iterator InnerIt; 962 963 public: 964 typedef std::forward_iterator_tag iterator_category; 965 typedef typename Map::value_type value_type; 966 typedef ptrdiff_t difference_type; 967 typedef const value_type* pointer; 968 typedef const value_type& reference; 969 const_iterator()970 const_iterator() {} const_iterator(const InnerIt & it)971 explicit const_iterator(const InnerIt& it) : it_(it) {} 972 973 const_reference operator*() const { return *it_->value(); } 974 const_pointer operator->() const { return &(operator*()); } 975 976 const_iterator& operator++() { 977 ++it_; 978 return *this; 979 } 980 const_iterator operator++(int) { return const_iterator(it_++); } 981 982 friend bool operator==(const const_iterator& a, const const_iterator& b) { 983 return a.it_ == b.it_; 984 } 985 friend bool operator!=(const const_iterator& a, const const_iterator& b) { 986 return !(a == b); 987 } 988 989 private: 990 InnerIt it_; 991 }; 992 993 class iterator { 994 typedef typename InnerMap::iterator InnerIt; 995 996 public: 997 typedef std::forward_iterator_tag iterator_category; 998 typedef typename Map::value_type value_type; 999 typedef ptrdiff_t difference_type; 1000 typedef value_type* pointer; 1001 typedef value_type& reference; 1002 iterator()1003 iterator() {} iterator(const InnerIt & it)1004 explicit iterator(const InnerIt& it) : it_(it) {} 1005 1006 reference operator*() const { return *it_->value(); } 1007 pointer operator->() const { return &(operator*()); } 1008 1009 iterator& operator++() { 1010 ++it_; 1011 return *this; 1012 } 1013 iterator operator++(int) { return iterator(it_++); } 1014 1015 // Allow implicit conversion to const_iterator. const_iterator()1016 operator const_iterator() const { 1017 return const_iterator(typename InnerMap::const_iterator(it_)); 1018 } 1019 1020 friend bool operator==(const iterator& a, const iterator& b) { 1021 return a.it_ == b.it_; 1022 } 1023 friend bool operator!=(const iterator& a, const iterator& b) { 1024 return !(a == b); 1025 } 1026 1027 private: 1028 friend class Map; 1029 1030 InnerIt it_; 1031 }; 1032 begin()1033 iterator begin() { return iterator(elements_->begin()); } end()1034 iterator end() { return iterator(elements_->end()); } begin()1035 const_iterator begin() const { 1036 return const_iterator(iterator(elements_->begin())); 1037 } end()1038 const_iterator end() const { 1039 return const_iterator(iterator(elements_->end())); 1040 } cbegin()1041 const_iterator cbegin() const { return begin(); } cend()1042 const_iterator cend() const { return end(); } 1043 1044 // Capacity size()1045 size_type size() const { return elements_->size(); } empty()1046 bool empty() const { return size() == 0; } 1047 1048 // Element access 1049 T& operator[](const key_type& key) { 1050 value_type** value = &(*elements_)[key]; 1051 if (*value == NULL) { 1052 *value = CreateValueTypeInternal(key); 1053 internal::MapValueInitializer<is_proto_enum<T>::value, T>::Initialize( 1054 (*value)->second, default_enum_value_); 1055 } 1056 return (*value)->second; 1057 } at(const key_type & key)1058 const T& at(const key_type& key) const { 1059 const_iterator it = find(key); 1060 GOOGLE_CHECK(it != end()) << "key not found: " << key; 1061 return it->second; 1062 } at(const key_type & key)1063 T& at(const key_type& key) { 1064 iterator it = find(key); 1065 GOOGLE_CHECK(it != end()) << "key not found: " << key; 1066 return it->second; 1067 } 1068 1069 // Lookup count(const key_type & key)1070 size_type count(const key_type& key) const { 1071 const_iterator it = find(key); 1072 GOOGLE_DCHECK(it == end() || key == it->first); 1073 return it == end() ? 0 : 1; 1074 } find(const key_type & key)1075 const_iterator find(const key_type& key) const { 1076 return const_iterator(iterator(elements_->find(key))); 1077 } find(const key_type & key)1078 iterator find(const key_type& key) { return iterator(elements_->find(key)); } contains(const Key & key)1079 bool contains(const Key& key) const { return elements_->contains(key); } equal_range(const key_type & key)1080 std::pair<const_iterator, const_iterator> equal_range( 1081 const key_type& key) const { 1082 const_iterator it = find(key); 1083 if (it == end()) { 1084 return std::pair<const_iterator, const_iterator>(it, it); 1085 } else { 1086 const_iterator begin = it++; 1087 return std::pair<const_iterator, const_iterator>(begin, it); 1088 } 1089 } equal_range(const key_type & key)1090 std::pair<iterator, iterator> equal_range(const key_type& key) { 1091 iterator it = find(key); 1092 if (it == end()) { 1093 return std::pair<iterator, iterator>(it, it); 1094 } else { 1095 iterator begin = it++; 1096 return std::pair<iterator, iterator>(begin, it); 1097 } 1098 } 1099 1100 // insert insert(const value_type & value)1101 std::pair<iterator, bool> insert(const value_type& value) { 1102 std::pair<typename InnerMap::iterator, bool> p = 1103 elements_->insert(value.first); 1104 if (p.second) { 1105 p.first->value() = CreateValueTypeInternal(value); 1106 } 1107 return std::pair<iterator, bool>(iterator(p.first), p.second); 1108 } 1109 template <class InputIt> insert(InputIt first,InputIt last)1110 void insert(InputIt first, InputIt last) { 1111 for (InputIt it = first; it != last; ++it) { 1112 iterator exist_it = find(it->first); 1113 if (exist_it == end()) { 1114 operator[](it->first) = it->second; 1115 } 1116 } 1117 } insert(std::initializer_list<value_type> values)1118 void insert(std::initializer_list<value_type> values) { 1119 insert(values.begin(), values.end()); 1120 } 1121 1122 // Erase and clear erase(const key_type & key)1123 size_type erase(const key_type& key) { 1124 iterator it = find(key); 1125 if (it == end()) { 1126 return 0; 1127 } else { 1128 erase(it); 1129 return 1; 1130 } 1131 } erase(iterator pos)1132 iterator erase(iterator pos) { 1133 if (arena_ == NULL) delete pos.operator->(); 1134 iterator i = pos++; 1135 elements_->erase(i.it_); 1136 return pos; 1137 } erase(iterator first,iterator last)1138 void erase(iterator first, iterator last) { 1139 while (first != last) { 1140 first = erase(first); 1141 } 1142 } clear()1143 void clear() { erase(begin(), end()); } 1144 1145 // Assign 1146 Map& operator=(const Map& other) { 1147 if (this != &other) { 1148 clear(); 1149 insert(other.begin(), other.end()); 1150 } 1151 return *this; 1152 } 1153 swap(Map & other)1154 void swap(Map& other) { 1155 if (arena_ == other.arena_) { 1156 std::swap(default_enum_value_, other.default_enum_value_); 1157 std::swap(elements_, other.elements_); 1158 } else { 1159 // TODO(zuguang): optimize this. The temporary copy can be allocated 1160 // in the same arena as the other message, and the "other = copy" can 1161 // be replaced with the fast-path swap above. 1162 Map copy = *this; 1163 *this = other; 1164 other = copy; 1165 } 1166 } 1167 1168 // Access to hasher. Currently this returns a copy, but it may 1169 // be modified to return a const reference in the future. hash_function()1170 hasher hash_function() const { return elements_->hash_function(); } 1171 1172 private: 1173 // Set default enum value only for proto2 map field whose value is enum type. SetDefaultEnumValue(int default_enum_value)1174 void SetDefaultEnumValue(int default_enum_value) { 1175 default_enum_value_ = default_enum_value; 1176 } 1177 CreateValueTypeInternal(const Key & key)1178 value_type* CreateValueTypeInternal(const Key& key) { 1179 if (arena_ == NULL) { 1180 return new value_type(key); 1181 } else { 1182 value_type* value = reinterpret_cast<value_type*>( 1183 Arena::CreateArray<uint8>(arena_, sizeof(value_type))); 1184 Arena::CreateInArenaStorage(const_cast<Key*>(&value->first), arena_); 1185 Arena::CreateInArenaStorage(&value->second, arena_); 1186 const_cast<Key&>(value->first) = key; 1187 return value; 1188 } 1189 } 1190 CreateValueTypeInternal(const value_type & value)1191 value_type* CreateValueTypeInternal(const value_type& value) { 1192 if (arena_ == NULL) { 1193 return new value_type(value); 1194 } else { 1195 value_type* p = reinterpret_cast<value_type*>( 1196 Arena::CreateArray<uint8>(arena_, sizeof(value_type))); 1197 Arena::CreateInArenaStorage(const_cast<Key*>(&p->first), arena_); 1198 Arena::CreateInArenaStorage(&p->second, arena_); 1199 const_cast<Key&>(p->first) = value.first; 1200 p->second = value.second; 1201 return p; 1202 } 1203 } 1204 1205 Arena* arena_; 1206 int default_enum_value_; 1207 InnerMap* elements_; 1208 1209 friend class Arena; 1210 typedef void InternalArenaConstructable_; 1211 typedef void DestructorSkippable_; 1212 template <typename Derived, typename K, typename V, 1213 internal::WireFormatLite::FieldType key_wire_type, 1214 internal::WireFormatLite::FieldType value_wire_type, 1215 int default_enum_value> 1216 friend class internal::MapFieldLite; 1217 }; 1218 1219 } // namespace protobuf 1220 } // namespace google 1221 1222 #include <google/protobuf/port_undef.inc> 1223 1224 #endif // GOOGLE_PROTOBUF_MAP_H__ 1225