Sierra Toolkit  Version of the Day
hashtable_eastl.h
1 /*
2 Copyright (C) 2005,2009-2010 Electronic Arts, Inc. All rights reserved.
3 
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions
6 are met:
7 
8 1. Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 2. Redistributions in binary form must reproduce the above copyright
11  notice, this list of conditions and the following disclaimer in the
12  documentation and/or other materials provided with the distribution.
13 3. Neither the name of Electronic Arts, Inc. ("EA") nor the names of
14  its contributors may be used to endorse or promote products derived
15  from this software without specific prior written permission.
16 
17 THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
18 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28 
30 // EASTL/internal/hashtable.h
31 //
32 // Written and maintained by Paul Pedriana - 2005.
34 
36 // This file implements a hashtable, much like the C++ TR1 hash_set/hash_map.
37 // proposed classes.
38 // The primary distinctions between this hashtable and TR1 hash tables are:
39 // - hashtable is savvy to an environment that doesn't have exception handling,
40 // as is sometimes the case with console or embedded environments.
41 // - hashtable is slightly more space-efficient than a conventional std hashtable
42 // implementation on platforms with 64 bit size_t. This is
43 // because std STL uses size_t (64 bits) in data structures whereby 32 bits
44 // of data would be fine.
45 // - hashtable can contain objects with alignment requirements. TR1 hash tables
46 // cannot do so without a bit of tedious non-portable effort.
47 // - hashtable supports debug memory naming natively.
48 // - hashtable provides a find function that lets you specify a type that is
49 // different from the hash table key type. This is particularly useful for
50 // the storing of string objects but finding them by char pointers.
52 
54 // This file is currently partially based on the TR1 (technical report 1)
55 // reference implementation of the hash_set/hash_map C++ classes
56 // as of about 4/2005.
58 
59 
60 #ifndef EASTL_INTERNAL_HASHTABLE_H
61 #define EASTL_INTERNAL_HASHTABLE_H
62 
63 
64 #include <stk_util/util/config_eastl.h>
65 #include <stk_util/util/type_traits_eastl.h>
66 #include <stk_util/util/allocator_eastl.h>
67 #include <stk_util/util/iterator_eastl.h>
68 #include <stk_util/util/functional_eastl.h>
69 #include <stk_util/util/utility_eastl.h>
70 #include <stk_util/util/algorithm_eastl.h>
71 #include <string.h>
72 
73 #ifdef _MSC_VER
74  #pragma warning(push, 0)
75  #include <new>
76  #include <stddef.h>
77  #pragma warning(pop)
78 #else
79  #include <new>
80  #include <stddef.h>
81 #endif
82 
83 #ifdef _MSC_VER
84  #pragma warning(push)
85  #pragma warning(disable: 4512) // 'class' : assignment operator could not be generated.
86  #pragma warning(disable: 4530) // C++ exception handler used, but unwind semantics are not enabled. Specify /EHsc
87 #endif
88 
89 
90 namespace eastl
91 {
92 
97  #ifndef EASTL_HASHTABLE_DEFAULT_NAME
98  #define EASTL_HASHTABLE_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " hashtable" // Unless the user overrides something, this is "EASTL hashtable".
99  #endif
100 
101 
104  #ifndef EASTL_HASHTABLE_DEFAULT_ALLOCATOR
105  #define EASTL_HASHTABLE_DEFAULT_ALLOCATOR allocator_type(EASTL_HASHTABLE_DEFAULT_NAME)
106  #endif
107 
108 
109 
116  extern EASTL_API void* gpEmptyBucketArray[2];
117 
118 
119 
128  template <typename Value, bool bCacheHashCode>
129  struct hash_node;
130 
131  template <typename Value>
132  struct hash_node<Value, true>
133  {
134  Value mValue;
135  hash_node* mpNext;
136  eastl_size_t mnHashCode; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
137  } EASTL_MAY_ALIAS;
138 
139  template <typename Value>
140  struct hash_node<Value, false>
141  {
142  Value mValue;
143  hash_node* mpNext;
144  } EASTL_MAY_ALIAS;
145 
146 
147 
155  template <typename Value, bool bCacheHashCode>
157  {
158  public:
160 
161  node_type* mpNode;
162 
163  public:
165  : mpNode(pNode) { }
166 
167  void increment()
168  { mpNode = mpNode->mpNext; }
169  };
170 
171 
172 
180  template <typename Value, bool bConst, bool bCacheHashCode>
181  struct node_iterator : public node_iterator_base<Value, bCacheHashCode>
182  {
183  public:
186  typedef typename base_type::node_type node_type;
187  typedef Value value_type;
190  typedef ptrdiff_t difference_type;
191  typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
192 
193  public:
194  explicit node_iterator(node_type* pNode = NULL)
195  : base_type(pNode) { }
196 
198  : base_type(x.mpNode) { }
199 
200  reference operator*() const
201  { return base_type::mpNode->mValue; }
202 
203  pointer operator->() const
204  { return &(base_type::mpNode->mValue); }
205 
206  node_iterator& operator++()
207  { base_type::increment(); return *this; }
208 
209  node_iterator operator++(int)
210  { node_iterator temp(*this); base_type::increment(); return temp; }
211 
212  }; // node_iterator
213 
214 
215 
226  template <typename Value, bool bCacheHashCode>
228  {
229  public:
231 
232  public:
233  // We use public here because it allows the hashtable class to access
234  // these without a function call, and we are very strongly avoiding
235  // function calls in this library, as our primary goal is performance
236  // over correctness and some compilers (e.g. GCC) are terrible at
237  // inlining and so avoiding function calls is of major importance.
238  node_type* mpNode; // Current node within current bucket.
239  node_type** mpBucket; // Current bucket.
240 
241  public:
242  hashtable_iterator_base(node_type* pNode, node_type** pBucket)
243  : mpNode(pNode), mpBucket(pBucket) { }
244 
245  void increment_bucket()
246  {
247  ++mpBucket;
248  while(*mpBucket == NULL) // We store an extra bucket with some non-NULL value at the end
249  ++mpBucket; // of the bucket array so that finding the end of the bucket
250  mpNode = *mpBucket; // array is quick and simple.
251  }
252 
253  void increment()
254  {
255  mpNode = mpNode->mpNext;
256 
257  while(mpNode == NULL)
258  mpNode = *++mpBucket;
259  }
260 
261  }; // hashtable_iterator_base
262 
263 
264 
265 
276  template <typename Value, bool bConst, bool bCacheHashCode>
277  struct hashtable_iterator : public hashtable_iterator_base<Value, bCacheHashCode>
278  {
279  public:
283  typedef typename base_type::node_type node_type;
284  typedef Value value_type;
287  typedef ptrdiff_t difference_type;
288  typedef EASTL_ITC_NS::forward_iterator_tag iterator_category;
289 
290  public:
291  hashtable_iterator(node_type* pNode = NULL, node_type** pBucket = NULL)
292  : base_type(pNode, pBucket) { }
293 
294  hashtable_iterator(node_type** pBucket)
295  : base_type(*pBucket, pBucket) { }
296 
298  : base_type(x.mpNode, x.mpBucket) { }
299 
300  reference operator*() const
301  { return base_type::mpNode->mValue; }
302 
303  pointer operator->() const
304  { return &(base_type::mpNode->mValue); }
305 
306  hashtable_iterator& operator++()
307  { base_type::increment(); return *this; }
308 
309  hashtable_iterator operator++(int)
310  { hashtable_iterator temp(*this); base_type::increment(); return temp; }
311 
312  const node_type* get_node() const
313  { return base_type::mpNode; }
314 
315  }; // hashtable_iterator
316 
317 
318 
319 
330  template <typename Iterator>
331  inline typename eastl::iterator_traits<Iterator>::difference_type
332  distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::input_iterator_tag)
333  { return 0; }
334 
335  template <typename Iterator>
336  inline typename eastl::iterator_traits<Iterator>::difference_type
337  distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::forward_iterator_tag)
338  { return eastl::distance(first, last); }
339 
340  template <typename Iterator>
341  inline typename eastl::iterator_traits<Iterator>::difference_type
342  ht_distance(Iterator first, Iterator last)
343  {
344  typedef typename eastl::iterator_traits<Iterator>::iterator_category IC;
345  return distance_fw_impl(first, last, IC());
346  }
347 
348 
349 
350 
357  {
358  // Defined as eastl_size_t instead of size_t because the latter
359  // wastes memory and is sometimes slower on 64 bit machines.
360  uint32_t operator()(uint32_t r, uint32_t n) const
361  { return r % n; }
362  };
363 
364 
374 
375 
381  struct EASTL_API prime_rehash_policy
382  {
383  public:
384  float mfMaxLoadFactor;
385  float mfGrowthFactor;
386  mutable uint32_t mnNextResize;
387 
388  public:
389  prime_rehash_policy(float fMaxLoadFactor = 1.f)
390  : mfMaxLoadFactor(fMaxLoadFactor), mfGrowthFactor(2.f), mnNextResize(0) { }
391 
392  float GetMaxLoadFactor() const
393  { return mfMaxLoadFactor; }
394 
397  static uint32_t GetPrevBucketCountOnly(uint32_t nBucketCountHint);
398 
401  uint32_t GetPrevBucketCount(uint32_t nBucketCountHint) const;
402 
405  uint32_t GetNextBucketCount(uint32_t nBucketCountHint) const;
406 
409  uint32_t GetBucketCount(uint32_t nElementCount) const;
410 
416  GetRehashRequired(uint32_t nBucketCount, uint32_t nElementCount, uint32_t nElementAdd) const;
417  };
418 
419 
420 
421 
422 
424  // Base classes for hashtable. We define these base classes because
425  // in some cases we want to do different things depending on the
426  // value of a policy class. In some cases the policy class affects
427  // which member functions and nested typedefs are defined; we handle that
428  // by specializing base class templates. Several of the base class templates
429  // need to access other members of class template hashtable, so we use
430  // the "curiously recurring template pattern" (parent class is templated
431  // on type of child class) for them.
433 
434 
440  template <typename RehashPolicy, typename Hashtable>
441  struct rehash_base { };
442 
443  template <typename Hashtable>
444  struct rehash_base<prime_rehash_policy, Hashtable>
445  {
446  // Returns the max load factor, which is the load factor beyond
447  // which we rebuild the container with a new bucket count.
448  float get_max_load_factor() const
449  {
450  const Hashtable* const pThis = static_cast<const Hashtable*>(this);
451  return pThis->rehash_policy().GetMaxLoadFactor();
452  }
453 
454  // If you want to make the hashtable never rehash (resize),
455  // set the max load factor to be a very high number (e.g. 100000.f).
456  void set_max_load_factor(float fMaxLoadFactor)
457  {
458  Hashtable* const pThis = static_cast<Hashtable*>(this);
459  pThis->rehash_policy(prime_rehash_policy(fMaxLoadFactor));
460  }
461  };
462 
463 
464 
465 
480  template <typename Key, typename Value, typename ExtractKey, typename Equal,
481  typename H1, typename H2, typename H, bool bCacheHashCode>
483 
484 
490  template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
491  struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, false>
492  {
493  protected:
494  ExtractKey mExtractKey; // To do: Make this member go away entirely, as it never has any data.
495  Equal mEqual; // To do: Make this instance use zero space when it is zero size.
496  H mRangedHash; // To do: Make this instance use zero space when it is zero size
497 
498  public:
499  H1 hash_function() const
500  { return H1(); }
501 
502  Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
503  { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
504 
505  const Equal& key_eq() const
506  { return mEqual; }
507 
508  Equal& key_eq()
509  { return mEqual; }
510 
511  protected:
512  typedef void* hash_code_t;
513  typedef uint32_t bucket_index_t;
514 
515  hash_code_base(const ExtractKey& extractKey, const Equal& eq, const H1&, const H2&, const H& h)
516  : mExtractKey(extractKey), mEqual(eq), mRangedHash(h) { }
517 
518  hash_code_t get_hash_code(const Key& key) const
519  { return NULL; }
520 
521  bucket_index_t bucket_index(hash_code_t, uint32_t) const
522  { return (bucket_index_t)0; }
523 
524  bucket_index_t bucket_index(const Key& key, hash_code_t, uint32_t nBucketCount) const
525  { return (bucket_index_t)mRangedHash(key, nBucketCount); }
526 
527  bucket_index_t bucket_index(const hash_node<Value, false>* pNode, uint32_t nBucketCount) const
528  { return (bucket_index_t)mRangedHash(mExtractKey(pNode->mValue), nBucketCount); }
529 
530  bool compare(const Key& key, hash_code_t, hash_node<Value, false>* pNode) const
531  { return mEqual(key, mExtractKey(pNode->mValue)); }
532 
533  void copy_code(hash_node<Value, false>*, const hash_node<Value, false>*) const
534  { } // Nothing to do.
535 
536  void set_code(hash_node<Value, false>* pDest, hash_code_t c) const
537  { } // Nothing to do.
538 
539  void base_swap(hash_code_base& x)
540  {
541  eastl::swap(mExtractKey, x.mExtractKey);
542  eastl::swap(mEqual, x.mEqual);
543  eastl::swap(mRangedHash, x.mRangedHash);
544  }
545 
546  }; // hash_code_base
547 
548 
549 
550  // No specialization for ranged hash function while caching hash codes.
551  // That combination is meaningless, and trying to do it is an error.
552 
553 
560  template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2, typename H>
561  struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, true>;
562 
563 
564 
571  template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
572  struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, false>
573  {
574  protected:
575  ExtractKey mExtractKey;
576  Equal mEqual;
577  H1 m_h1;
578  H2 m_h2;
579 
580  public:
581  typedef H1 hasher;
582 
583  H1 hash_function() const
584  { return m_h1; }
585 
586  Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
587  { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
588 
589  const Equal& key_eq() const
590  { return mEqual; }
591 
592  Equal& key_eq()
593  { return mEqual; }
594 
595  protected:
596  typedef uint32_t hash_code_t;
597  typedef uint32_t bucket_index_t;
598  typedef hash_node<Value, false> node_type;
599 
600  hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
601  : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
602 
603  hash_code_t get_hash_code(const Key& key) const
604  { return (hash_code_t)m_h1(key); }
605 
606  bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
607  { return (bucket_index_t)m_h2(c, nBucketCount); }
608 
609  bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
610  { return (bucket_index_t)m_h2(c, nBucketCount); }
611 
612  bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
613  { return (bucket_index_t)m_h2((hash_code_t)m_h1(mExtractKey(pNode->mValue)), nBucketCount); }
614 
615  bool compare(const Key& key, hash_code_t, node_type* pNode) const
616  { return mEqual(key, mExtractKey(pNode->mValue)); }
617 
618  void copy_code(node_type*, const node_type*) const
619  { } // Nothing to do.
620 
621  void set_code(node_type*, hash_code_t) const
622  { } // Nothing to do.
623 
624  void base_swap(hash_code_base& x)
625  {
626  eastl::swap(mExtractKey, x.mExtractKey);
627  eastl::swap(mEqual, x.mEqual);
628  eastl::swap(m_h1, x.m_h1);
629  eastl::swap(m_h2, x.m_h2);
630  }
631 
632  }; // hash_code_base
633 
634 
635 
642  template <typename Key, typename Value, typename ExtractKey, typename Equal, typename H1, typename H2>
643  struct hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, default_ranged_hash, true>
644  {
645  protected:
646  ExtractKey mExtractKey;
647  Equal mEqual;
648  H1 m_h1;
649  H2 m_h2;
650 
651  public:
652  typedef H1 hasher;
653 
654  H1 hash_function() const
655  { return m_h1; }
656 
657  Equal equal_function() const // Deprecated. Use key_eq() instead, as key_eq is what the new C++ standard
658  { return mEqual; } // has specified in its hashtable (unordered_*) proposal.
659 
660  const Equal& key_eq() const
661  { return mEqual; }
662 
663  Equal& key_eq()
664  { return mEqual; }
665 
666  protected:
667  typedef uint32_t hash_code_t;
668  typedef uint32_t bucket_index_t;
669  typedef hash_node<Value, true> node_type;
670 
671  hash_code_base(const ExtractKey& ex, const Equal& eq, const H1& h1, const H2& h2, const default_ranged_hash&)
672  : mExtractKey(ex), mEqual(eq), m_h1(h1), m_h2(h2) { }
673 
674  hash_code_t get_hash_code(const Key& key) const
675  { return (hash_code_t)m_h1(key); }
676 
677  bucket_index_t bucket_index(hash_code_t c, uint32_t nBucketCount) const
678  { return (bucket_index_t)m_h2(c, nBucketCount); }
679 
680  bucket_index_t bucket_index(const Key&, hash_code_t c, uint32_t nBucketCount) const
681  { return (bucket_index_t)m_h2(c, nBucketCount); }
682 
683  bucket_index_t bucket_index(const node_type* pNode, uint32_t nBucketCount) const
684  { return (bucket_index_t)m_h2((uint32_t)pNode->mnHashCode, nBucketCount); }
685 
686  bool compare(const Key& key, hash_code_t c, node_type* pNode) const
687  { return (pNode->mnHashCode == c) && mEqual(key, mExtractKey(pNode->mValue)); }
688 
689  void copy_code(node_type* pDest, const node_type* pSource) const
690  { pDest->mnHashCode = pSource->mnHashCode; }
691 
692  void set_code(node_type* pDest, hash_code_t c) const
693  { pDest->mnHashCode = c; }
694 
695  void base_swap(hash_code_base& x)
696  {
697  eastl::swap(mExtractKey, x.mExtractKey);
698  eastl::swap(mEqual, x.mEqual);
699  eastl::swap(m_h1, x.m_h1);
700  eastl::swap(m_h2, x.m_h2);
701  }
702 
703  }; // hash_code_base
704 
705 
706 
707 
708 
785  template <typename Key, typename Value, typename Allocator, typename ExtractKey,
786  typename Equal, typename H1, typename H2, typename H,
787  typename RehashPolicy, bool bCacheHashCode, bool bMutableIterators, bool bUniqueKeys>
788  class hashtable
789  : public rehash_base<RehashPolicy, hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H, RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> >,
790  public hash_code_base<Key, Value, ExtractKey, Equal, H1, H2, H, bCacheHashCode>
791  {
792  public:
793  typedef Key key_type;
794  typedef Value value_type;
795  typedef typename ExtractKey::result_type mapped_type;
797  typedef typename hash_code_base_type::hash_code_t hash_code_t;
798  typedef Allocator allocator_type;
799  typedef Equal key_equal;
800  typedef ptrdiff_t difference_type;
801  typedef eastl_size_t size_type; // See config.h for the definition of eastl_size_t, which defaults to uint32_t.
802  typedef value_type& reference;
803  typedef const value_type& const_reference;
811  typedef typename type_select<bUniqueKeys, eastl::pair<iterator, bool>, iterator>::type insert_return_type;
812  typedef hashtable<Key, Value, Allocator, ExtractKey, Equal, H1, H2, H,
813  RehashPolicy, bCacheHashCode, bMutableIterators, bUniqueKeys> this_type;
814  typedef RehashPolicy rehash_policy_type;
815  typedef ExtractKey extract_key_type;
816  typedef H1 h1_type;
817  typedef H2 h2_type;
818  typedef H h_type;
819 
820  using hash_code_base_type::key_eq;
821  using hash_code_base_type::hash_function;
822  using hash_code_base_type::mExtractKey;
823  using hash_code_base_type::get_hash_code;
824  using hash_code_base_type::bucket_index;
825  using hash_code_base_type::compare;
826  using hash_code_base_type::set_code;
827 
828  static const bool kCacheHashCode = bCacheHashCode;
829 
830  enum
831  {
832  kKeyAlignment = EASTL_ALIGN_OF(key_type),
833  kKeyAlignmentOffset = 0, // To do: Make sure this really is zero for all uses of this template.
834  kValueAlignment = EASTL_ALIGN_OF(value_type),
835  kValueAlignmentOffset = 0, // To fix: This offset is zero for sets and >0 for maps. Need to fix this.
836  kAllocFlagBuckets = 0x00400000 // Flag to allocator which indicates that we are allocating buckets and not nodes.
837  };
838 
839  protected:
840  node_type** mpBucketArray;
841  size_type mnBucketCount;
842  size_type mnElementCount;
843  RehashPolicy mRehashPolicy; // To do: Use base class optimization to make this go away.
844  allocator_type mAllocator; // To do: Use base class optimization to make this go away.
845 
846  public:
847  hashtable(size_type nBucketCount, const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
848  const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR);
849 
850  template <typename FowardIterator>
851  hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
852  const H1&, const H2&, const H&, const Equal&, const ExtractKey&,
853  const allocator_type& allocator = EASTL_HASHTABLE_DEFAULT_ALLOCATOR); // allocator arg removed because VC7.1 fails on the default arg. To do: Make a second version of this function without a default arg.
854 
855  hashtable(const hashtable& x);
856  ~hashtable();
857 
858  allocator_type& get_allocator();
859  void set_allocator(const allocator_type& allocator);
860 
861  this_type& operator=(const this_type& x);
862 
863  void swap(this_type& x);
864 
865  public:
866  iterator begin()
867  {
868  iterator i(mpBucketArray);
869  if(!i.mpNode)
870  i.increment_bucket();
871  return i;
872  }
873 
874  const_iterator begin() const
875  {
876  const_iterator i(mpBucketArray);
877  if(!i.mpNode)
878  i.increment_bucket();
879  return i;
880  }
881 
882  iterator end()
883  { return iterator(mpBucketArray + mnBucketCount); }
884 
885  const_iterator end() const
886  { return const_iterator(mpBucketArray + mnBucketCount); }
887 
888  local_iterator begin(size_type n)
889  { return local_iterator(mpBucketArray[n]); }
890 
891  local_iterator end(size_type)
892  { return local_iterator(NULL); }
893 
894  const_local_iterator begin(size_type n) const
895  { return const_local_iterator(mpBucketArray[n]); }
896 
897  const_local_iterator end(size_type) const
898  { return const_local_iterator(NULL); }
899 
900  bool empty() const
901  { return mnElementCount == 0; }
902 
903  size_type size() const
904  { return mnElementCount; }
905 
906  size_type bucket_count() const
907  { return mnBucketCount; }
908 
909  size_type bucket_size(size_type n) const
910  { return (size_type)eastl::distance(begin(n), end(n)); }
911 
912  //size_type bucket(const key_type& k) const
913  // { return bucket_index(k, (hash code here), (uint32_t)mnBucketCount); }
914 
915  public:
916  float load_factor() const
917  { return (float)mnElementCount / (float)mnBucketCount; }
918 
919  // Inherited from the base class.
920  // Returns the max load factor, which is the load factor beyond
921  // which we rebuild the container with a new bucket count.
922  // get_max_load_factor comes from rehash_base.
923  // float get_max_load_factor() const;
924 
925  // Inherited from the base class.
926  // If you want to make the hashtable never rehash (resize),
927  // set the max load factor to be a very high number (e.g. 100000.f).
928  // set_max_load_factor comes from rehash_base.
929  // void set_max_load_factor(float fMaxLoadFactor);
930 
934  { return mRehashPolicy; }
935 
938  void rehash_policy(const rehash_policy_type& rehashPolicy);
939 
940  public:
941 
942  insert_return_type insert(const value_type& value);
943  iterator insert(const_iterator, const value_type& value);
944 
945  template <typename InputIterator>
946  void insert(InputIterator first, InputIterator last);
947 
948  public:
949  iterator erase(iterator position);
950  iterator erase(iterator first, iterator last);
951  reverse_iterator erase(reverse_iterator position);
953  size_type erase(const key_type& k);
954 
955  void clear();
956  void clear(bool clearBuckets);
957  void reset();
958  void rehash(size_type nBucketCount);
959 
960  public:
961  iterator find(const key_type& key);
962  const_iterator find(const key_type& key) const;
963 
979  template <typename U, typename UHash, typename BinaryPredicate>
980  iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate);
981 
982  template <typename U, typename UHash, typename BinaryPredicate>
983  const_iterator find_as(const U& u, UHash uhash, BinaryPredicate predicate) const;
984 
985  template <typename U>
986  iterator find_as(const U& u);
987 
988  template <typename U>
989  const_iterator find_as(const U& u) const;
990 
993  iterator find_by_hash(hash_code_t c);
994  const_iterator find_by_hash(hash_code_t c) const;
995 
996  size_type count(const key_type& k) const;
997 
998  eastl::pair<iterator, iterator> equal_range(const key_type& k);
999  eastl::pair<const_iterator, const_iterator> equal_range(const key_type& k) const;
1000 
1001  public:
1002  bool validate() const;
1003  int validate_iterator(const_iterator i) const;
1004 
1005  protected:
1006  node_type* DoAllocateNode(const value_type& value);
1007  node_type* DoAllocateNodeFromKey(const key_type& key);
1008  void DoFreeNode(node_type* pNode);
1009  void DoFreeNodes(node_type** pBucketArray, size_type);
1010 
1011  node_type** DoAllocateBuckets(size_type n);
1012  void DoFreeBuckets(node_type** pBucketArray, size_type n);
1013 
1014  eastl::pair<iterator, bool> DoInsertValue(const value_type& value, true_type);
1015  iterator DoInsertValue(const value_type& value, false_type);
1016 
1017  eastl::pair<iterator, bool> DoInsertKey(const key_type& key, true_type);
1018  iterator DoInsertKey(const key_type& key, false_type);
1019 
1020  void DoRehash(size_type nBucketCount);
1021  node_type* DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const;
1022 
1023  template <typename U, typename BinaryPredicate>
1024  node_type* DoFindNode(node_type* pNode, const U& u, BinaryPredicate predicate) const;
1025 
1026  node_type* DoFindNode(node_type* pNode, hash_code_t c) const;
1027 
1028  }; // class hashtable
1029 
1030 
1031 
1032 
1033 
1035  // node_iterator_base
1037 
1038  template <typename Value, bool bCacheHashCode>
1039  inline bool operator==(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
1040  { return a.mpNode == b.mpNode; }
1041 
1042  template <typename Value, bool bCacheHashCode>
1043  inline bool operator!=(const node_iterator_base<Value, bCacheHashCode>& a, const node_iterator_base<Value, bCacheHashCode>& b)
1044  { return a.mpNode != b.mpNode; }
1045 
1046 
1047 
1048 
1050  // hashtable_iterator_base
1052 
1053  template <typename Value, bool bCacheHashCode>
1054  inline bool operator==(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
1055  { return a.mpNode == b.mpNode; }
1056 
1057  template <typename Value, bool bCacheHashCode>
1058  inline bool operator!=(const hashtable_iterator_base<Value, bCacheHashCode>& a, const hashtable_iterator_base<Value, bCacheHashCode>& b)
1059  { return a.mpNode != b.mpNode; }
1060 
1061 
1062 
1063 
1065  // hashtable
1067 
1068  template <typename K, typename V, typename A, typename EK, typename Eq,
1069  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1070  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>
1071  ::hashtable(size_type nBucketCount, const H1& h1, const H2& h2, const H& h,
1072  const Eq& eq, const EK& ek, const allocator_type& allocator)
1073  : rehash_base<RP, hashtable>(),
1074  hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(ek, eq, h1, h2, h),
1075  mnBucketCount(0),
1076  mnElementCount(0),
1077  mRehashPolicy(),
1078  mAllocator(allocator)
1079  {
1080  if(nBucketCount < 2) // If we are starting in an initially empty state, with no memory allocation done.
1081  reset();
1082  else // Else we are creating a potentially non-empty hashtable...
1083  {
1084  EASTL_ASSERT(nBucketCount < 10000000);
1085  mnBucketCount = (size_type)mRehashPolicy.GetNextBucketCount((uint32_t)nBucketCount);
1086  mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
1087  }
1088  }
1089 
1090 
1091 
1092  template <typename K, typename V, typename A, typename EK, typename Eq,
1093  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1094  template <typename FowardIterator>
1095  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(FowardIterator first, FowardIterator last, size_type nBucketCount,
1096  const H1& h1, const H2& h2, const H& h,
1097  const Eq& eq, const EK& ek, const allocator_type& allocator)
1098  : rehash_base<rehash_policy_type, hashtable>(),
1099  hash_code_base<key_type, value_type, extract_key_type, key_equal, h1_type, h2_type, h_type, kCacheHashCode>(ek, eq, h1, h2, h),
1100  //mnBucketCount(0), // This gets re-assigned below.
1101  mnElementCount(0),
1102  mRehashPolicy(),
1103  mAllocator(allocator)
1104  {
1105  if(nBucketCount < 2)
1106  {
1107  const size_type nElementCount = (size_type)eastl::ht_distance(first, last);
1108  mnBucketCount = (size_type)mRehashPolicy.GetBucketCount((uint32_t)nElementCount);
1109  }
1110  else
1111  {
1112  EASTL_ASSERT(nBucketCount < 10000000);
1113  mnBucketCount = nBucketCount;
1114  }
1115 
1116  mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will always be at least 2.
1117 
1118  #if EASTL_EXCEPTIONS_ENABLED
1119  try
1120  {
1121  #endif
1122  for(; first != last; ++first)
1123  insert(*first);
1124  #if EASTL_EXCEPTIONS_ENABLED
1125  }
1126  catch(...)
1127  {
1128  clear();
1129  DoFreeBuckets(mpBucketArray, mnBucketCount);
1130  throw;
1131  }
1132  #endif
1133  }
1134 
1135 
1136 
1137  template <typename K, typename V, typename A, typename EK, typename Eq,
1138  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1139  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::hashtable(const this_type& x)
1140  : rehash_base<RP, hashtable>(x),
1141  hash_code_base<K, V, EK, Eq, H1, H2, H, bC>(x),
1142  mnBucketCount(x.mnBucketCount),
1143  mnElementCount(x.mnElementCount),
1144  mRehashPolicy(x.mRehashPolicy),
1145  mAllocator(x.mAllocator)
1146  {
1147  if(mnElementCount) // If there is anything to copy...
1148  {
1149  mpBucketArray = DoAllocateBuckets(mnBucketCount); // mnBucketCount will be at least 2.
1150 
1151  #if EASTL_EXCEPTIONS_ENABLED
1152  try
1153  {
1154  #endif
1155  for(size_type i = 0; i < x.mnBucketCount; ++i)
1156  {
1157  node_type* pNodeSource = x.mpBucketArray[i];
1158  node_type** ppNodeDest = mpBucketArray + i;
1159 
1160  while(pNodeSource)
1161  {
1162  *ppNodeDest = DoAllocateNode(pNodeSource->mValue);
1163  copy_code(*ppNodeDest, pNodeSource);
1164  ppNodeDest = &(*ppNodeDest)->mpNext;
1165  pNodeSource = pNodeSource->mpNext;
1166  }
1167  }
1168  #if EASTL_EXCEPTIONS_ENABLED
1169  }
1170  catch(...)
1171  {
1172  clear();
1173  DoFreeBuckets(mpBucketArray, mnBucketCount);
1174  throw;
1175  }
1176  #endif
1177  }
1178  else
1179  {
1180  // In this case, instead of allocate memory and copy nothing from x,
1181  // we reset ourselves to a zero allocation state.
1182  reset();
1183  }
1184  }
1185 
1186 
1187 
1188  template <typename K, typename V, typename A, typename EK, typename Eq,
1189  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1190  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::allocator_type&
1191  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::get_allocator()
1192  {
1193  return mAllocator;
1194  }
1195 
1196 
1197 
1198  template <typename K, typename V, typename A, typename EK, typename Eq,
1199  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1200  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::set_allocator(const allocator_type& allocator)
1201  {
1202  mAllocator = allocator;
1203  }
1204 
1205 
1206 
1207  template <typename K, typename V, typename A, typename EK, typename Eq,
1208  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1209  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::this_type&
1210  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::operator=(const this_type& x)
1211  {
1212  if(this != &x)
1213  {
1214  clear();
1215 
1216  #if EASTL_ALLOCATOR_COPY_ENABLED
1217  mAllocator = x.mAllocator;
1218  #endif
1219 
1220  insert(x.begin(), x.end());
1221  }
1222  return *this;
1223  }
1224 
1225 
1226 
1227  template <typename K, typename V, typename A, typename EK, typename Eq,
1228  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1229  inline hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::~hashtable()
1230  {
1231  clear();
1232  DoFreeBuckets(mpBucketArray, mnBucketCount);
1233  }
1234 
1235 
1236 
1237  template <typename K, typename V, typename A, typename EK, typename Eq,
1238  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1239  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
1240  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNode(const value_type& value)
1241  {
1242  node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
1243 
1244  #if EASTL_EXCEPTIONS_ENABLED
1245  try
1246  {
1247  #endif
1248  ::new(&pNode->mValue) value_type(value);
1249  pNode->mpNext = NULL;
1250  return pNode;
1251  #if EASTL_EXCEPTIONS_ENABLED
1252  }
1253  catch(...)
1254  {
1255  EASTLFree(mAllocator, pNode, sizeof(node_type));
1256  throw;
1257  }
1258  #endif
1259  }
1260 
1261 
1262 
1263  template <typename K, typename V, typename A, typename EK, typename Eq,
1264  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1265  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
1266  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateNodeFromKey(const key_type& key)
1267  {
1268  node_type* const pNode = (node_type*)allocate_memory(mAllocator, sizeof(node_type), kValueAlignment, kValueAlignmentOffset);
1269 
1270  #if EASTL_EXCEPTIONS_ENABLED
1271  try
1272  {
1273  #endif
1274  ::new(&pNode->mValue) value_type(key);
1275  pNode->mpNext = NULL;
1276  return pNode;
1277  #if EASTL_EXCEPTIONS_ENABLED
1278  }
1279  catch(...)
1280  {
1281  EASTLFree(mAllocator, pNode, sizeof(node_type));
1282  throw;
1283  }
1284  #endif
1285  }
1286 
1287 
1288 
1289  template <typename K, typename V, typename A, typename EK, typename Eq,
1290  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1291  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNode(node_type* pNode)
1292  {
1293  pNode->~node_type();
1294  EASTLFree(mAllocator, pNode, sizeof(node_type));
1295  }
1296 
1297 
1298 
1299  template <typename K, typename V, typename A, typename EK, typename Eq,
1300  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1301  void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeNodes(node_type** pNodeArray, size_type n)
1302  {
1303  for(size_type i = 0; i < n; ++i)
1304  {
1305  node_type* pNode = pNodeArray[i];
1306  while(pNode)
1307  {
1308  node_type* const pTempNode = pNode;
1309  pNode = pNode->mpNext;
1310  DoFreeNode(pTempNode);
1311  }
1312  pNodeArray[i] = NULL;
1313  }
1314  }
1315 
1316 
1317 
1318  template <typename K, typename V, typename A, typename EK, typename Eq,
1319  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1320  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type**
1321  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoAllocateBuckets(size_type n)
1322  {
1323  // We allocate one extra bucket to hold a sentinel, an arbitrary
1324  // non-null pointer. Iterator increment relies on this.
1325  EASTL_ASSERT(n > 1); // We reserve an mnBucketCount of 1 for the shared gpEmptyBucketArray.
1326  EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the allocator has a copy of this enum.
1327  node_type** const pBucketArray = (node_type**)EASTLAllocFlags(mAllocator, (n + 1) * sizeof(node_type*), kAllocFlagBuckets);
1328  //eastl::fill(pBucketArray, pBucketArray + n, (node_type*)NULL);
1329  memset(pBucketArray, 0, n * sizeof(node_type*));
1330  pBucketArray[n] = reinterpret_cast<node_type*>((uintptr_t)~0);
1331  return pBucketArray;
1332  }
1333 
1334 
1335 
1336  template <typename K, typename V, typename A, typename EK, typename Eq,
1337  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1338  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFreeBuckets(node_type** pBucketArray, size_type n)
1339  {
1340  // If n <= 1, then pBucketArray is from the shared gpEmptyBucketArray. We don't test
1341  // for pBucketArray == &gpEmptyBucketArray because one library have a different gpEmptyBucketArray
1342  // than another but pass a hashtable to another. So we go by the size.
1343  if(n > 1)
1344  EASTLFree(mAllocator, pBucketArray, (n + 1) * sizeof(node_type*)); // '+1' because DoAllocateBuckets allocates nBucketCount + 1 buckets in order to have a NULL sentinel at the end.
1345  }
1346 
1347 
1348 
1349  template <typename K, typename V, typename A, typename EK, typename Eq,
1350  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1351  void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::swap(this_type& x)
1352  {
1353  if(mAllocator == x.mAllocator) // If allocators are equivalent...
1354  {
1355  // We leave mAllocator as-is.
1356  hash_code_base<K, V, EK, Eq, H1, H2, H, bC>::base_swap(x); // hash_code_base has multiple implementations, so we let them handle the swap.
1357  eastl::swap(mRehashPolicy, x.mRehashPolicy);
1358  eastl::swap(mpBucketArray, x.mpBucketArray);
1359  eastl::swap(mnBucketCount, x.mnBucketCount);
1360  eastl::swap(mnElementCount, x.mnElementCount);
1361  }
1362  else
1363  {
1364  const this_type temp(*this); // Can't call eastl::swap because that would
1365  *this = x; // itself call this member swap function.
1366  x = temp;
1367  }
1368  }
1369 
1370 
1371  template <typename K, typename V, typename A, typename EK, typename Eq,
1372  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1374  {
1375  mRehashPolicy = rehashPolicy;
1376 
1377  const size_type nBuckets = rehashPolicy.GetBucketCount((uint32_t)mnElementCount);
1378 
1379  if(nBuckets > mnBucketCount)
1380  DoRehash(nBuckets);
1381  }
1382 
1383 
1384 
1385  template <typename K, typename V, typename A, typename EK, typename Eq,
1386  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1389  {
1390  const hash_code_t c = get_hash_code(k);
1391  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1392 
1393  node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
1394  return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1395  }
1396 
1397 
1398 
1399  template <typename K, typename V, typename A, typename EK, typename Eq,
1400  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1401  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
1403  {
1404  const hash_code_t c = get_hash_code(k);
1405  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1406 
1407  node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
1408  return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1409  }
1410 
1411 
1412 
1413  template <typename K, typename V, typename A, typename EK, typename Eq,
1414  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1415  template <typename U, typename UHash, typename BinaryPredicate>
1416  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1417  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate)
1418  {
1419  const hash_code_t c = (hash_code_t)uhash(other);
1420  const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
1421 
1422  node_type* const pNode = DoFindNode(mpBucketArray[n], other, predicate);
1423  return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1424  }
1425 
1426 
1427 
1428  template <typename K, typename V, typename A, typename EK, typename Eq,
1429  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1430  template <typename U, typename UHash, typename BinaryPredicate>
1432  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other, UHash uhash, BinaryPredicate predicate) const
1433  {
1434  const hash_code_t c = (hash_code_t)uhash(other);
1435  const size_type n = (size_type)(c % mnBucketCount); // This assumes we are using the mod range policy.
1436 
1437  node_type* const pNode = DoFindNode(mpBucketArray[n], other, predicate);
1438  return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1439  }
1440 
1441 
1456  template <typename H, typename U>
1457  inline typename H::iterator hashtable_find(H& hashTable, U u)
1458  { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
1459 
1460  template <typename H, typename U>
1461  inline typename H::const_iterator hashtable_find(const H& hashTable, U u)
1462  { return hashTable.find_as(u, eastl::hash<U>(), eastl::equal_to_2<const typename H::key_type, U>()); }
1463 
1464 
1465 
1466  template <typename K, typename V, typename A, typename EK, typename Eq,
1467  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1468  template <typename U>
1469  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1470  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other)
1471  { return eastl::hashtable_find(*this, other); }
1472  // VC++ doesn't appear to like the following, though it seems correct to me.
1473  // So we implement the workaround above until we can straighten this out.
1474  //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
1475 
1476 
1477  template <typename K, typename V, typename A, typename EK, typename Eq,
1478  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1479  template <typename U>
1480  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator
1481  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::find_as(const U& other) const
1482  { return eastl::hashtable_find(*this, other); }
1483  // VC++ doesn't appear to like the following, though it seems correct to me.
1484  // So we implement the workaround above until we can straighten this out.
1485  //{ return find_as(other, eastl::hash<U>(), eastl::equal_to_2<const key_type, U>()); }
1486 
1487 
1488  template <typename K, typename V, typename A, typename EK, typename Eq,
1489  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1490  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1492  {
1493  const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
1494 
1495  node_type* const pNode = DoFindNode(mpBucketArray[n], c);
1496  return pNode ? iterator(pNode, mpBucketArray + n) : iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1497  }
1498 
1499 
1500  template <typename K, typename V, typename A, typename EK, typename Eq,
1501  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1504  {
1505  const size_type n = (size_type)bucket_index(c, (uint32_t)mnBucketCount);
1506 
1507  node_type* const pNode = DoFindNode(mpBucketArray[n], c);
1508  return pNode ? const_iterator(pNode, mpBucketArray + n) : const_iterator(mpBucketArray + mnBucketCount); // iterator(mpBucketArray + mnBucketCount) == end()
1509  }
1510 
1511 
1512  template <typename K, typename V, typename A, typename EK, typename Eq,
1513  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1514  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
1515  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::count(const key_type& k) const
1516  {
1517  const hash_code_t c = get_hash_code(k);
1518  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1519  size_type result = 0;
1520 
1521  // To do: Make a specialization for bU (unique keys) == true and take
1522  // advantage of the fact that the count will always be zero or one in that case.
1523  for(node_type* pNode = mpBucketArray[n]; pNode; pNode = pNode->mpNext)
1524  {
1525  if(compare(k, c, pNode))
1526  ++result;
1527  }
1528  return result;
1529  }
1530 
1531 
1532 
1533  template <typename K, typename V, typename A, typename EK, typename Eq,
1534  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1536  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator>
1537  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k)
1538  {
1539  const hash_code_t c = get_hash_code(k);
1540  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1541 
1542  node_type** head = mpBucketArray + n;
1543  node_type* pNode = DoFindNode(*head, k, c);
1544 
1545  if(pNode)
1546  {
1547  node_type* p1 = pNode->mpNext;
1548 
1549  for(; p1; p1 = p1->mpNext)
1550  {
1551  if(!compare(k, c, p1))
1552  break;
1553  }
1554 
1555  iterator first(pNode, head);
1556  iterator last(p1, head);
1557 
1558  if(!p1)
1559  last.increment_bucket();
1560 
1561  return eastl::pair<iterator, iterator>(first, last);
1562  }
1563 
1564  return eastl::pair<iterator, iterator>(iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
1565  iterator(mpBucketArray + mnBucketCount));
1566  }
1567 
1568 
1569 
1570 
1571  template <typename K, typename V, typename A, typename EK, typename Eq,
1572  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1574  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::const_iterator>
1575  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::equal_range(const key_type& k) const
1576  {
1577  const hash_code_t c = get_hash_code(k);
1578  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1579  node_type** head = mpBucketArray + n;
1580  node_type* pNode = DoFindNode(*head, k, c);
1581 
1582  if(pNode)
1583  {
1584  node_type* p1 = pNode->mpNext;
1585 
1586  for(; p1; p1 = p1->mpNext)
1587  {
1588  if(!compare(k, c, p1))
1589  break;
1590  }
1591 
1592  const_iterator first(pNode, head);
1593  const_iterator last(p1, head);
1594 
1595  if(!p1)
1596  last.increment_bucket();
1597 
1598  return eastl::pair<const_iterator, const_iterator>(first, last);
1599  }
1600 
1601  return eastl::pair<const_iterator, const_iterator>(const_iterator(mpBucketArray + mnBucketCount), // iterator(mpBucketArray + mnBucketCount) == end()
1602  const_iterator(mpBucketArray + mnBucketCount));
1603  }
1604 
1605 
1606 
1607  template <typename K, typename V, typename A, typename EK, typename Eq,
1608  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1609  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
1610  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const key_type& k, hash_code_t c) const
1611  {
1612  for(; pNode; pNode = pNode->mpNext)
1613  {
1614  if(compare(k, c, pNode))
1615  return pNode;
1616  }
1617  return NULL;
1618  }
1619 
1620 
1621 
1622  template <typename K, typename V, typename A, typename EK, typename Eq,
1623  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1624  template <typename U, typename BinaryPredicate>
1625  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
1626  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, const U& other, BinaryPredicate predicate) const
1627  {
1628  for(; pNode; pNode = pNode->mpNext)
1629  {
1630  if(predicate(mExtractKey(pNode->mValue), other)) // Intentionally compare with key as first arg and other as second arg.
1631  return pNode;
1632  }
1633  return NULL;
1634  }
1635 
1636 
1637 
1638  template <typename K, typename V, typename A, typename EK, typename Eq,
1639  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1640  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::node_type*
1641  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoFindNode(node_type* pNode, hash_code_t c) const
1642  {
1643  for(; pNode; pNode = pNode->mpNext)
1644  {
1645  if(pNode->mnHashCode == c)
1646  return pNode;
1647  }
1648  return NULL;
1649  }
1650 
1651 
1652 
1653  template <typename K, typename V, typename A, typename EK, typename Eq,
1654  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1656  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(const value_type& value, true_type) // true_type means bUniqueKeys is true.
1657  {
1658  const key_type& k = mExtractKey(value);
1659  const hash_code_t c = get_hash_code(k);
1660  size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1661  node_type* const pNode = DoFindNode(mpBucketArray[n], k, c);
1662 
1663  if(pNode == NULL)
1664  {
1665  const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
1666 
1667  // Allocate the new node before doing the rehash so that we don't
1668  // do a rehash if the allocation throws.
1669  node_type* const pNodeNew = DoAllocateNode(value);
1670  set_code(pNodeNew, c); // This is a no-op for most hashtables.
1671 
1672  #if EASTL_EXCEPTIONS_ENABLED
1673  try
1674  {
1675  #endif
1676  if(bRehash.first)
1677  {
1678  n = (size_type)bucket_index(k, c, (uint32_t)bRehash.second);
1679  DoRehash(bRehash.second);
1680  }
1681 
1682  EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
1683  pNodeNew->mpNext = mpBucketArray[n];
1684  mpBucketArray[n] = pNodeNew;
1685  ++mnElementCount;
1686 
1687  return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
1688  #if EASTL_EXCEPTIONS_ENABLED
1689  }
1690  catch(...)
1691  {
1692  DoFreeNode(pNodeNew);
1693  throw;
1694  }
1695  #endif
1696  }
1697 
1698  return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
1699  }
1700 
1701 
1702 
1703  template <typename K, typename V, typename A, typename EK, typename Eq,
1704  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1705  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1706  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertValue(const value_type& value, false_type) // false_type means bUniqueKeys is false.
1707  {
1708  const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
1709 
1710  if(bRehash.first)
1711  DoRehash(bRehash.second);
1712 
1713  const key_type& k = mExtractKey(value);
1714  const hash_code_t c = get_hash_code(k);
1715  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1716 
1717  node_type* const pNodeNew = DoAllocateNode(value);
1718  set_code(pNodeNew, c); // This is a no-op for most hashtables.
1719 
1720  // To consider: Possibly make this insertion not make equal elements contiguous.
1721  // As it stands now, we insert equal values contiguously in the hashtable.
1722  // The benefit is that equal_range can work in a sensible manner and that
1723  // erase(value) can more quickly find equal values. The downside is that
1724  // this insertion operation taking some extra time. How important is it to
1725  // us that equal_range span all equal items?
1726  node_type* const pNodePrev = DoFindNode(mpBucketArray[n], k, c);
1727 
1728  if(pNodePrev == NULL)
1729  {
1730  EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
1731  pNodeNew->mpNext = mpBucketArray[n];
1732  mpBucketArray[n] = pNodeNew;
1733  }
1734  else
1735  {
1736  pNodeNew->mpNext = pNodePrev->mpNext;
1737  pNodePrev->mpNext = pNodeNew;
1738  }
1739 
1740  ++mnElementCount;
1741 
1742  return iterator(pNodeNew, mpBucketArray + n);
1743  }
1744 
1745 
1746 
1747  template <typename K, typename V, typename A, typename EK, typename Eq,
1748  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1750  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(const key_type& key, true_type) // true_type means bUniqueKeys is true.
1751  {
1752  const hash_code_t c = get_hash_code(key);
1753  size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
1754  node_type* const pNode = DoFindNode(mpBucketArray[n], key, c);
1755 
1756  if(pNode == NULL)
1757  {
1758  const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
1759 
1760  // Allocate the new node before doing the rehash so that we don't
1761  // do a rehash if the allocation throws.
1762  node_type* const pNodeNew = DoAllocateNodeFromKey(key);
1763  set_code(pNodeNew, c); // This is a no-op for most hashtables.
1764 
1765  #if EASTL_EXCEPTIONS_ENABLED
1766  try
1767  {
1768  #endif
1769  if(bRehash.first)
1770  {
1771  n = (size_type)bucket_index(key, c, (uint32_t)bRehash.second);
1772  DoRehash(bRehash.second);
1773  }
1774 
1775  EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
1776  pNodeNew->mpNext = mpBucketArray[n];
1777  mpBucketArray[n] = pNodeNew;
1778  ++mnElementCount;
1779 
1780  return eastl::pair<iterator, bool>(iterator(pNodeNew, mpBucketArray + n), true);
1781  #if EASTL_EXCEPTIONS_ENABLED
1782  }
1783  catch(...)
1784  {
1785  DoFreeNode(pNodeNew);
1786  throw;
1787  }
1788  #endif
1789  }
1790 
1791  return eastl::pair<iterator, bool>(iterator(pNode, mpBucketArray + n), false);
1792  }
1793 
1794 
1795 
1796  template <typename K, typename V, typename A, typename EK, typename Eq,
1797  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1798  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1799  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoInsertKey(const key_type& key, false_type) // false_type means bUniqueKeys is false.
1800  {
1801  const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, (uint32_t)1);
1802 
1803  if(bRehash.first)
1804  DoRehash(bRehash.second);
1805 
1806  const hash_code_t c = get_hash_code(key);
1807  const size_type n = (size_type)bucket_index(key, c, (uint32_t)mnBucketCount);
1808 
1809  node_type* const pNodeNew = DoAllocateNodeFromKey(key);
1810  set_code(pNodeNew, c); // This is a no-op for most hashtables.
1811 
1812  // To consider: Possibly make this insertion not make equal elements contiguous.
1813  // As it stands now, we insert equal values contiguously in the hashtable.
1814  // The benefit is that equal_range can work in a sensible manner and that
1815  // erase(value) can more quickly find equal values. The downside is that
1816  // this insertion operation taking some extra time. How important is it to
1817  // us that equal_range span all equal items?
1818  node_type* const pNodePrev = DoFindNode(mpBucketArray[n], key, c);
1819 
1820  if(pNodePrev == NULL)
1821  {
1822  EASTL_ASSERT((void**)mpBucketArray != &gpEmptyBucketArray[0]);
1823  pNodeNew->mpNext = mpBucketArray[n];
1824  mpBucketArray[n] = pNodeNew;
1825  }
1826  else
1827  {
1828  pNodeNew->mpNext = pNodePrev->mpNext;
1829  pNodePrev->mpNext = pNodeNew;
1830  }
1831 
1832  ++mnElementCount;
1833 
1834  return iterator(pNodeNew, mpBucketArray + n);
1835  }
1836 
1837 
1838 
1839  template <typename K, typename V, typename A, typename EK, typename Eq,
1840  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1841  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert_return_type
1843  {
1844  return DoInsertValue(value, integral_constant<bool, bU>());
1845  }
1846 
1847 
1848 
1849  template <typename K, typename V, typename A, typename EK, typename Eq,
1850  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1851  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1852  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(const_iterator, const value_type& value)
1853  {
1854  // We ignore the first argument (hint iterator). It's not likely to be useful for hashtable containers.
1855 
1856  #ifdef __MWERKS__ // The Metrowerks compiler has a bug.
1857  insert_return_type result = insert(value);
1858  return result.first; // Note by Paul Pedriana while perusing this code: This code will fail to compile when bU is false (i.e. for multiset, multimap).
1859 
1860  #elif defined(__GNUC__) && (__GNUC__ < 3) // If using old GCC (GCC 2.x has a bug which we work around)
1861  EASTL_ASSERT(empty()); // This function cannot return the correct return value on GCC 2.x. Unless, that is, the container is empty.
1862  DoInsertValue(value, integral_constant<bool, bU>());
1863  return begin(); // This is the wrong answer.
1864  #else
1865  insert_return_type result = DoInsertValue(value, integral_constant<bool, bU>());
1866  return result.first; // Note by Paul Pedriana while perusing this code: This code will fail to compile when bU is false (i.e. for multiset, multimap).
1867  #endif
1868  }
1869 
1870 
1871 
1872  template <typename K, typename V, typename A, typename EK, typename Eq,
1873  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1874  template <typename InputIterator>
1875  void
1876  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::insert(InputIterator first, InputIterator last)
1877  {
1878  const uint32_t nElementAdd = (uint32_t)eastl::ht_distance(first, last);
1879  const eastl::pair<bool, uint32_t> bRehash = mRehashPolicy.GetRehashRequired((uint32_t)mnBucketCount, (uint32_t)mnElementCount, nElementAdd);
1880 
1881  if(bRehash.first)
1882  DoRehash(bRehash.second);
1883 
1884  for(; first != last; ++first)
1885  {
1886  #ifdef __MWERKS__ // The Metrowerks compiler has a bug.
1887  insert(*first);
1888  #else
1889  DoInsertValue(*first, integral_constant<bool, bU>());
1890  #endif
1891  }
1892  }
1893 
1894 
1895 
1896  template <typename K, typename V, typename A, typename EK, typename Eq,
1897  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1898  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1899  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(iterator i)
1900  {
1901  iterator iNext(i);
1902  ++iNext;
1903 
1904  node_type* pNode = i.mpNode;
1905  node_type* pNodeCurrent = *i.mpBucket;
1906 
1907  if(pNodeCurrent == pNode)
1908  *i.mpBucket = pNodeCurrent->mpNext;
1909  else
1910  {
1911  // We have a singly-linked list, so we have no choice but to
1912  // walk down it till we find the node before the node at 'i'.
1913  node_type* pNodeNext = pNodeCurrent->mpNext;
1914 
1915  while(pNodeNext != pNode)
1916  {
1917  pNodeCurrent = pNodeNext;
1918  pNodeNext = pNodeCurrent->mpNext;
1919  }
1920 
1921  pNodeCurrent->mpNext = pNodeNext->mpNext;
1922  }
1923 
1924  DoFreeNode(pNode);
1925  --mnElementCount;
1926 
1927  return iNext;
1928  }
1929 
1930 
1931 
1932  template <typename K, typename V, typename A, typename EK, typename Eq,
1933  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1934  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::iterator
1935  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(iterator first, iterator last)
1936  {
1937  while(first != last)
1938  first = erase(first);
1939  return first;
1940  }
1941 
1942 
1943 
1944  template <typename K, typename V, typename A, typename EK, typename Eq,
1945  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1946  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reverse_iterator
1947  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(reverse_iterator position)
1948  {
1949  return reverse_iterator(erase((++position).base()));
1950  }
1951 
1952 
1953 
1954  template <typename K, typename V, typename A, typename EK, typename Eq,
1955  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1956  inline typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::reverse_iterator
1957  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(reverse_iterator first, reverse_iterator last)
1958  {
1959  // Version which erases in order from first to last.
1960  // difference_type i(first.base() - last.base());
1961  // while(i--)
1962  // first = erase(first);
1963  // return first;
1964 
1965  // Version which erases in order from last to first, but is slightly more efficient:
1966  return reverse_iterator(erase((++last).base(), (++first).base()));
1967  }
1968 
1969 
1970 
1971  template <typename K, typename V, typename A, typename EK, typename Eq,
1972  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
1973  typename hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::size_type
1974  hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::erase(const key_type& k)
1975  {
1976  // To do: Reimplement this function to do a single loop and not try to be
1977  // smart about element contiguity. The mechanism here is only a benefit if the
1978  // buckets are heavily overloaded; otherwise this mechanism may be slightly slower.
1979 
1980  const hash_code_t c = get_hash_code(k);
1981  const size_type n = (size_type)bucket_index(k, c, (uint32_t)mnBucketCount);
1982  const size_type nElementCountSaved = mnElementCount;
1983 
1984  node_type** pBucketArray = mpBucketArray + n;
1985 
1986  while(*pBucketArray && !compare(k, c, *pBucketArray))
1987  pBucketArray = &(*pBucketArray)->mpNext;
1988 
1989  while(*pBucketArray && compare(k, c, *pBucketArray))
1990  {
1991  node_type* const pNode = *pBucketArray;
1992  *pBucketArray = pNode->mpNext;
1993  DoFreeNode(pNode);
1994  --mnElementCount;
1995  }
1996 
1997  return nElementCountSaved - mnElementCount;
1998  }
1999 
2000 
2001 
2002  template <typename K, typename V, typename A, typename EK, typename Eq,
2003  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2004  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear()
2005  {
2006  DoFreeNodes(mpBucketArray, mnBucketCount);
2007  mnElementCount = 0;
2008  }
2009 
2010 
2011 
2012  template <typename K, typename V, typename A, typename EK, typename Eq,
2013  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2014  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::clear(bool clearBuckets)
2015  {
2016  DoFreeNodes(mpBucketArray, mnBucketCount);
2017  if(clearBuckets)
2018  {
2019  DoFreeBuckets(mpBucketArray, mnBucketCount);
2020  reset();
2021  }
2022  mnElementCount = 0;
2023  }
2024 
2025 
2026 
2027  template <typename K, typename V, typename A, typename EK, typename Eq,
2028  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2030  {
2031  // The reset function is a special extension function which unilaterally
2032  // resets the container to an empty state without freeing the memory of
2033  // the contained objects. This is useful for very quickly tearing down a
2034  // container built into scratch memory.
2035  mnBucketCount = 1;
2036 
2037  #ifdef _MSC_VER
2038  mpBucketArray = (node_type**)&gpEmptyBucketArray[0];
2039  #else
2040  void* p = &gpEmptyBucketArray[0];
2041  memcpy(&mpBucketArray, &p, sizeof(mpBucketArray)); // Other compilers implement strict aliasing and casting is thus unsafe.
2042  #endif
2043 
2044  mnElementCount = 0;
2045  mRehashPolicy.mnNextResize = 0;
2046  }
2047 
2048 
2049 
2050  template <typename K, typename V, typename A, typename EK, typename Eq,
2051  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2052  inline void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::rehash(size_type nBucketCount)
2053  {
2054  // Note that we unilaterally use the passed in bucket count; we do not attempt migrate it
2055  // up to the next prime number. We leave it at the user's discretion to do such a thing.
2056  DoRehash(nBucketCount);
2057  }
2058 
2059 
2060 
2061  template <typename K, typename V, typename A, typename EK, typename Eq,
2062  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2063  void hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::DoRehash(size_type nNewBucketCount)
2064  {
2065  node_type** const pBucketArray = DoAllocateBuckets(nNewBucketCount); // nNewBucketCount should always be >= 2.
2066 
2067  #if EASTL_EXCEPTIONS_ENABLED
2068  try
2069  {
2070  #endif
2071  node_type* pNode;
2072 
2073  for(size_type i = 0; i < mnBucketCount; ++i)
2074  {
2075  while((pNode = mpBucketArray[i]) != NULL) // Using '!=' disables compiler warnings.
2076  {
2077  const size_type nNewBucketIndex = (size_type)bucket_index(pNode, (uint32_t)nNewBucketCount);
2078 
2079  mpBucketArray[i] = pNode->mpNext;
2080  pNode->mpNext = pBucketArray[nNewBucketIndex];
2081  pBucketArray[nNewBucketIndex] = pNode;
2082  }
2083  }
2084 
2085  DoFreeBuckets(mpBucketArray, mnBucketCount);
2086  mnBucketCount = nNewBucketCount;
2087  mpBucketArray = pBucketArray;
2088  #if EASTL_EXCEPTIONS_ENABLED
2089  }
2090  catch(...)
2091  {
2092  // A failure here means that a hash function threw an exception.
2093  // We can't restore the previous state without calling the hash
2094  // function again, so the only sensible recovery is to delete everything.
2095  DoFreeNodes(pBucketArray, nNewBucketCount);
2096  DoFreeBuckets(pBucketArray, nNewBucketCount);
2097  DoFreeNodes(mpBucketArray, mnBucketCount);
2098  mnElementCount = 0;
2099  throw;
2100  }
2101  #endif
2102  }
2103 
2104 
2105  template <typename K, typename V, typename A, typename EK, typename Eq,
2106  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2107  inline bool hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate() const
2108  {
2109  // Verify our empty bucket array is unmodified.
2110  if(gpEmptyBucketArray[0] != NULL)
2111  return false;
2112 
2113  if(gpEmptyBucketArray[1] != (void*)uintptr_t(~0))
2114  return false;
2115 
2116  // Verify that we have at least one bucket. Calculations can
2117  // trigger division by zero exceptions otherwise.
2118  if(mnBucketCount == 0)
2119  return false;
2120 
2121  // Verify that gpEmptyBucketArray is used correctly.
2122  // gpEmptyBucketArray is only used when initially empty.
2123  if((void**)mpBucketArray == &gpEmptyBucketArray[0])
2124  {
2125  if(mnElementCount) // gpEmptyBucketArray is used only for empty hash tables.
2126  return false;
2127 
2128  if(mnBucketCount != 1) // gpEmptyBucketArray is used exactly an only for mnBucketCount == 1.
2129  return false;
2130  }
2131  else
2132  {
2133  if(mnBucketCount < 2) // Small bucket counts *must* use gpEmptyBucketArray.
2134  return false;
2135  }
2136 
2137  // Verify that the element count matches mnElementCount.
2138  size_type nElementCount = 0;
2139 
2140  for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
2141  ++nElementCount;
2142 
2143  if(nElementCount != mnElementCount)
2144  return false;
2145 
2146  // To do: Verify that individual elements are in the expected buckets.
2147 
2148  return true;
2149  }
2150 
2151 
2152  template <typename K, typename V, typename A, typename EK, typename Eq,
2153  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2154  int hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>::validate_iterator(const_iterator i) const
2155  {
2156  // To do: Come up with a more efficient mechanism of doing this.
2157 
2158  for(const_iterator temp = begin(), tempEnd = end(); temp != tempEnd; ++temp)
2159  {
2160  if(temp == i)
2161  return (isf_valid | isf_current | isf_can_dereference);
2162  }
2163 
2164  if(i == end())
2165  return (isf_valid | isf_current);
2166 
2167  return isf_none;
2168  }
2169 
2170 
2171 
2173  // global operators
2175 
2176  template <typename K, typename V, typename A, typename EK, typename Eq,
2177  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2178  inline bool operator==(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2179  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2180  {
2181  return (a.size() == b.size()) && eastl::equal(a.begin(), a.end(), b.begin());
2182  }
2183 
2184 
2185  // Comparing hash tables for less-ness is an odd thing to do. We provide it for
2186  // completeness, though the user is advised to be wary of how they use this.
2187  template <typename K, typename V, typename A, typename EK, typename Eq,
2188  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2189  inline bool operator<(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2190  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2191  {
2192  // This requires hash table elements to support operator<. Since the hash table
2193  // doesn't compare elements via less (it does so via equals), we must use the
2194  // globally defined operator less for the elements.
2195  return eastl::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
2196  }
2197 
2198 
2199  template <typename K, typename V, typename A, typename EK, typename Eq,
2200  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2201  inline bool operator!=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2202  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2203  {
2204  return !(a == b);
2205  }
2206 
2207 
2208  template <typename K, typename V, typename A, typename EK, typename Eq,
2209  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2210  inline bool operator>(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2211  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2212  {
2213  return b < a;
2214  }
2215 
2216 
2217  template <typename K, typename V, typename A, typename EK, typename Eq,
2218  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2219  inline bool operator<=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2220  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2221  {
2222  return !(b < a);
2223  }
2224 
2225 
2226  template <typename K, typename V, typename A, typename EK, typename Eq,
2227  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2228  inline bool operator>=(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2229  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2230  {
2231  return !(a < b);
2232  }
2233 
2234 
2235  template <typename K, typename V, typename A, typename EK, typename Eq,
2236  typename H1, typename H2, typename H, typename RP, bool bC, bool bM, bool bU>
2237  inline void swap(const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& a,
2238  const hashtable<K, V, A, EK, Eq, H1, H2, H, RP, bC, bM, bU>& b)
2239  {
2240  a.swap(b);
2241  }
2242 
2243 
2244 } // namespace eastl
2245 
2246 
2247 #ifdef _MSC_VER
2248  #pragma warning(pop)
2249 #endif
2250 
2251 #endif // Header include guard
iterator find_by_hash(hash_code_t c)
const rehash_policy_type & rehash_policy() const
EASTL_API void * gpEmptyBucketArray[2]
uint32_t GetBucketCount(uint32_t nElementCount) const
H::iterator hashtable_find(H &hashTable, U u)
eastl::iterator_traits< Iterator >::difference_type distance_fw_impl(Iterator first, Iterator last, EASTL_ITC_NS::input_iterator_tag)
void swap(T &a, T &b)
iterator find_as(const U &u, UHash uhash, BinaryPredicate predicate)
bool lexicographical_compare(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2)
void reset(MPI_Comm new_comm)
Function reset determines new parallel_size and parallel_rank. Flushes, closes, and reopens log files...
Definition: Env.cpp:1067
void * allocate_memory(Allocator &a, size_t n, size_t alignment, size_t alignmentOffset)
Part * find(const PartVector &parts, const std::string &name)
Find a part by name in a collection of parts.
Definition: Part.cpp:22
EA Standard Template Library.
bool insert(PartVector &v, Part &part)
Insert a part into a properly ordered collection of parts. Returns true if this is a new insertion...
Definition: Part.cpp:85
bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)