root/lang/ruby/ruby-bayon/trunk/sparsehash/google/sparsehash/densehashtable.h @ 36827

Revision 36827, 47.2 kB (checked in by winebarrel, 4 years ago)
  • Property svn:eol-style set to native
Line 
1// Copyright (c) 2005, Google Inc.
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8//     * Redistributions of source code must retain the above copyright
9// notice, this list of conditions and the following disclaimer.
10//     * Redistributions in binary form must reproduce the above
11// copyright notice, this list of conditions and the following disclaimer
12// in the documentation and/or other materials provided with the
13// distribution.
14//     * Neither the name of Google Inc. nor the names of its
15// contributors may be used to endorse or promote products derived from
16// this software without specific prior written permission.
17//
18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30// ---
31// Author: Craig Silverstein
32//
33// A dense hashtable is a particular implementation of
34// a hashtable: one that is meant to minimize memory allocation.
35// It does this by using an array to store all the data.  We
36// steal a value from the key space to indicate "empty" array
37// elements (ie indices where no item lives) and another to indicate
38// "deleted" elements.
39//
40// (Note it is possible to change the value of the delete key
41// on the fly; you can even remove it, though after that point
42// the hashtable is insert_only until you set it again.  The empty
43// value however can't be changed.)
44//
45// To minimize allocation and pointer overhead, we use internal
46// probing, in which the hashtable is a single table, and collisions
47// are resolved by trying to insert again in another bucket.  The
48// most cache-efficient internal probing schemes are linear probing
49// (which suffers, alas, from clumping) and quadratic probing, which
50// is what we implement by default.
51//
52// Type requirements: value_type is required to be Copy Constructible
53// and Default Constructible. It is not required to be (and commonly
54// isn't) Assignable.
55//
56// You probably shouldn't use this code directly.  Use
57// <google/dense_hash_map> or <google/dense_hash_set> instead.
58
59// You can change the following below:
60// HT_OCCUPANCY_FLT      -- how full before we double size
61// HT_EMPTY_FLT          -- how empty before we halve size
62// HT_MIN_BUCKETS        -- default smallest bucket size
63//
64// You can also change enlarge_resize_percent (which defaults to
65// HT_OCCUPANCY_FLT), and shrink_resize_percent (which defaults to
66// HT_EMPTY_FLT) with set_resizing_parameters().
67//
68// How to decide what values to use?
69// shrink_resize_percent's default of .4 * OCCUPANCY_FLT, is probably good.
70// HT_MIN_BUCKETS is probably unnecessary since you can specify
71// (indirectly) the starting number of buckets at construct-time.
72// For enlarge_resize_percent, you can use this chart to try to trade-off
73// expected lookup time to the space taken up.  By default, this
74// code uses quadratic probing, though you can change it to linear
75// via _JUMP below if you really want to.
76//
77// From http://www.augustana.ca/~mohrj/courses/1999.fall/csc210/lecture_notes/hashing.html
78// NUMBER OF PROBES / LOOKUP       Successful            Unsuccessful
79// Quadratic collision resolution   1 - ln(1-L) - L/2    1/(1-L) - L - ln(1-L)
80// Linear collision resolution     [1+1/(1-L)]/2         [1+1/(1-L)2]/2
81//
82// -- enlarge_resize_percent --         0.10  0.50  0.60  0.75  0.80  0.90  0.99
83// QUADRATIC COLLISION RES.
84//    probes/successful lookup    1.05  1.44  1.62  2.01  2.21  2.85  5.11
85//    probes/unsuccessful lookup  1.11  2.19  2.82  4.64  5.81  11.4  103.6
86// LINEAR COLLISION RES.
87//    probes/successful lookup    1.06  1.5   1.75  2.5   3.0   5.5   50.5
88//    probes/unsuccessful lookup  1.12  2.5   3.6   8.5   13.0  50.0  5000.0
89
90#ifndef _DENSEHASHTABLE_H_
91#define _DENSEHASHTABLE_H_
92
93// The probing method
94// Linear probing
95// #define JUMP_(key, num_probes)    ( 1 )
96// Quadratic-ish probing
97#define JUMP_(key, num_probes)    ( num_probes )
98
99
100#include <google/sparsehash/sparseconfig.h>
101#include <assert.h>
102#include <stdio.h>
103#include <stdlib.h>             // for abort()
104#include <algorithm>            // For swap(), eg
105#include <stdexcept>            // For length_error
106#include <iostream>             // For cerr
107#include <memory>               // For uninitialized_fill, uninitialized_copy
108#include <utility>              // for pair<>
109#include <iterator>             // for facts about iterator tags
110#include <google/type_traits.h> // for true_type, integral_constant, etc.
111
112_START_GOOGLE_NAMESPACE_
113
114using STL_NAMESPACE::pair;
115
116// Hashtable class, used to implement the hashed associative containers
117// hash_set and hash_map.
118
119// Value: what is stored in the table (each bucket is a Value).
120// Key: something in a 1-to-1 correspondence to a Value, that can be used
121//      to search for a Value in the table (find() takes a Key).
122// HashFcn: Takes a Key and returns an integer, the more unique the better.
123// ExtractKey: given a Value, returns the unique Key associated with it.
124// SetKey: given a Value* and a Key, modifies the value such that
125//         ExtractKey(value) == key.  We guarantee this is only called
126//         with key == deleted_key or key == empty_key.
127// EqualKey: Given two Keys, says whether they are the same (that is,
128//           if they are both associated with the same Value).
129// Alloc: STL allocator to use to allocate memory.  Currently ignored.
130
131template <class Value, class Key, class HashFcn,
132          class ExtractKey, class SetKey, class EqualKey, class Alloc>
133class dense_hashtable;
134
135template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
136struct dense_hashtable_iterator;
137
138template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
139struct dense_hashtable_const_iterator;
140
141// We're just an array, but we need to skip over empty and deleted elements
142template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
143struct dense_hashtable_iterator {
144 public:
145  typedef dense_hashtable_iterator<V,K,HF,ExK,SetK,EqK,A>       iterator;
146  typedef dense_hashtable_const_iterator<V,K,HF,ExK,SetK,EqK,A> const_iterator;
147
148  typedef STL_NAMESPACE::forward_iterator_tag iterator_category;
149  typedef V value_type;
150  typedef ptrdiff_t difference_type;
151  typedef size_t size_type;
152  typedef V& reference;                // Value
153  typedef V* pointer;
154
155  // "Real" constructor and default constructor
156  dense_hashtable_iterator(const dense_hashtable<V,K,HF,ExK,SetK,EqK,A> *h,
157                           pointer it, pointer it_end, bool advance)
158    : ht(h), pos(it), end(it_end)   {
159    if (advance)  advance_past_empty_and_deleted();
160  }
161  dense_hashtable_iterator() { }
162  // The default destructor is fine; we don't define one
163  // The default operator= is fine; we don't define one
164
165  // Happy dereferencer
166  reference operator*() const { return *pos; }
167  pointer operator->() const { return &(operator*()); }
168
169  // Arithmetic.  The only hard part is making sure that
170  // we're not on an empty or marked-deleted array element
171  void advance_past_empty_and_deleted() {
172    while ( pos != end && (ht->test_empty(*this) || ht->test_deleted(*this)) )
173      ++pos;
174  }
175  iterator& operator++()   {
176    assert(pos != end); ++pos; advance_past_empty_and_deleted(); return *this;
177  }
178  iterator operator++(int) { iterator tmp(*this); ++*this; return tmp; }
179
180  // Comparison.
181  bool operator==(const iterator& it) const { return pos == it.pos; }
182  bool operator!=(const iterator& it) const { return pos != it.pos; }
183
184
185  // The actual data
186  const dense_hashtable<V,K,HF,ExK,SetK,EqK,A> *ht;
187  pointer pos, end;
188};
189
190
191// Now do it all again, but with const-ness!
192template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
193struct dense_hashtable_const_iterator {
194 public:
195  typedef dense_hashtable_iterator<V,K,HF,ExK,SetK,EqK,A>       iterator;
196  typedef dense_hashtable_const_iterator<V,K,HF,ExK,SetK,EqK,A> const_iterator;
197
198  typedef STL_NAMESPACE::forward_iterator_tag iterator_category;
199  typedef V value_type;
200  typedef ptrdiff_t difference_type;
201  typedef size_t size_type;
202  typedef const V& reference;                // Value
203  typedef const V* pointer;
204
205  // "Real" constructor and default constructor
206  dense_hashtable_const_iterator(
207      const dense_hashtable<V,K,HF,ExK,SetK,EqK,A> *h,
208      pointer it, pointer it_end, bool advance)
209    : ht(h), pos(it), end(it_end)   {
210    if (advance)  advance_past_empty_and_deleted();
211  }
212  dense_hashtable_const_iterator() { }
213  // This lets us convert regular iterators to const iterators
214  dense_hashtable_const_iterator(const iterator &it)
215    : ht(it.ht), pos(it.pos), end(it.end) { }
216  // The default destructor is fine; we don't define one
217  // The default operator= is fine; we don't define one
218
219  // Happy dereferencer
220  reference operator*() const { return *pos; }
221  pointer operator->() const { return &(operator*()); }
222
223  // Arithmetic.  The only hard part is making sure that
224  // we're not on an empty or marked-deleted array element
225  void advance_past_empty_and_deleted() {
226    while ( pos != end && (ht->test_empty(*this) || ht->test_deleted(*this)) )
227      ++pos;
228  }
229  const_iterator& operator++()   {
230    assert(pos != end); ++pos; advance_past_empty_and_deleted(); return *this;
231  }
232  const_iterator operator++(int) { const_iterator tmp(*this); ++*this; return tmp; }
233
234  // Comparison.
235  bool operator==(const const_iterator& it) const { return pos == it.pos; }
236  bool operator!=(const const_iterator& it) const { return pos != it.pos; }
237
238
239  // The actual data
240  const dense_hashtable<V,K,HF,ExK,SetK,EqK,A> *ht;
241  pointer pos, end;
242};
243
244template <class Value, class Key, class HashFcn,
245          class ExtractKey, class SetKey, class EqualKey, class Alloc>
246class dense_hashtable {
247 public:
248  typedef Key key_type;
249  typedef Value value_type;
250  typedef HashFcn hasher;
251  typedef EqualKey key_equal;
252
253  typedef size_t            size_type;
254  typedef ptrdiff_t         difference_type;
255  typedef value_type*       pointer;
256  typedef const value_type* const_pointer;
257  typedef value_type&       reference;
258  typedef const value_type& const_reference;
259  typedef dense_hashtable_iterator<Value, Key, HashFcn,
260                                   ExtractKey, SetKey, EqualKey, Alloc>
261  iterator;
262
263  typedef dense_hashtable_const_iterator<Value, Key, HashFcn,
264                                         ExtractKey, SetKey, EqualKey, Alloc>
265  const_iterator;
266
267  // These come from tr1.  For us they're the same as regular iterators.
268  typedef iterator local_iterator;
269  typedef const_iterator const_local_iterator;
270
271  // How full we let the table get before we resize, by default.
272  // Knuth says .8 is good -- higher causes us to probe too much,
273  // though it saves memory.
274  static const float HT_OCCUPANCY_FLT; // = 0.5;
275
276  // How empty we let the table get before we resize lower, by default.
277  // (0.0 means never resize lower.)
278  // It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
279  static const float HT_EMPTY_FLT; // = 0.4 * HT_OCCUPANCY_FLT
280
281  // Minimum size we're willing to let hashtables be.
282  // Must be a power of two, and at least 4.
283  // Note, however, that for a given hashtable, the initial size is a
284  // function of the first constructor arg, and may be >HT_MIN_BUCKETS.
285  static const size_t HT_MIN_BUCKETS = 4;
286
287  // By default, if you don't specify a hashtable size at
288  // construction-time, we use this size.  Must be a power of two, and
289  // at least HT_MIN_BUCKETS.
290  static const size_t HT_DEFAULT_STARTING_BUCKETS = 32;
291
292
293  // ITERATOR FUNCTIONS
294  iterator begin()             { return iterator(this, table,
295                                                 table + num_buckets, true); }
296  iterator end()               { return iterator(this, table + num_buckets,
297                                                 table + num_buckets, true); }
298  const_iterator begin() const { return const_iterator(this, table,
299                                                       table+num_buckets,true);}
300  const_iterator end() const   { return const_iterator(this, table + num_buckets,
301                                                       table+num_buckets,true);}
302
303  // These come from tr1 unordered_map.  They iterate over 'bucket' n.
304  // For sparsehashtable, we could consider each 'group' to be a bucket,
305  // I guess, but I don't really see the point.  We'll just consider
306  // bucket n to be the n-th element of the sparsetable, if it's occupied,
307  // or some empty element, otherwise.
308  local_iterator begin(size_type i) {
309    return local_iterator(this, table + i, table + i+1, false);
310  }
311  local_iterator end(size_type i) {
312    local_iterator it = begin(i);
313    if (!test_empty(i) && !test_deleted(i))
314      ++it;
315    return it;
316  }
317  const_local_iterator begin(size_type i) const {
318    return const_local_iterator(this, table + i, table + i+1, false);
319  }
320  const_local_iterator end(size_type i) const {
321    const_local_iterator it = begin(i);
322    if (!test_empty(i) && !test_deleted(i))
323      ++it;
324    return it;
325  }
326
327  // ACCESSOR FUNCTIONS for the things we templatize on, basically
328  hasher hash_funct() const { return hash; }
329  key_equal key_eq() const  { return equals; }
330
331  // Accessor function for statistics gathering.
332  int num_table_copies() const { return num_ht_copies; }
333
334 private:
335  // Annoyingly, we can't copy values around, because they might have
336  // const components (they're probably pair<const X, Y>).  We use
337  // explicit destructor invocation and placement new to get around
338  // this.  Arg.
339  void set_value(value_type* dst, const value_type& src) {
340    dst->~value_type();
341    new(dst) value_type(src);
342  }
343
344  void destroy_buckets(size_type first, size_type last) {
345    for ( ; first != last; ++first)
346      table[first].~value_type();
347  }
348
349  // DELETE HELPER FUNCTIONS
350  // This lets the user describe a key that will indicate deleted
351  // table entries.  This key should be an "impossible" entry --
352  // if you try to insert it for real, you won't be able to retrieve it!
353  // (NB: while you pass in an entire value, only the key part is looked
354  // at.  This is just because I don't know how to assign just a key.)
355 private:
356  void squash_deleted() {           // gets rid of any deleted entries we have
357    if ( num_deleted ) {            // get rid of deleted before writing
358      dense_hashtable tmp(*this);   // copying will get rid of deleted
359      swap(tmp);                    // now we are tmp
360    }
361    assert(num_deleted == 0);
362  }
363
364  bool test_deleted_key(const key_type& key) const {
365    // The num_deleted test is crucial for read(): after read(), the ht values
366    // are garbage, and we don't want to think some of them are deleted.
367    // Invariant: !use_deleted implies num_deleted is 0.
368    assert(use_deleted || num_deleted == 0);
369    return num_deleted > 0 && equals(delkey, key);
370  }
371
372 public:
373  void set_deleted_key(const key_type &key) {
374    // the empty indicator (if specified) and the deleted indicator
375    // must be different
376    assert(!use_empty || !equals(key, get_key(emptyval)));
377    // It's only safe to change what "deleted" means if we purge deleted guys
378    squash_deleted();
379    use_deleted = true;
380    delkey = key;
381  }
382  void clear_deleted_key() {
383    squash_deleted();
384    use_deleted = false;
385  }
386  key_type deleted_key() const {
387    assert(use_deleted);
388    return delkey;
389  }
390
391  // These are public so the iterators can use them
392  // True if the item at position bucknum is "deleted" marker
393  bool test_deleted(size_type bucknum) const {
394    return test_deleted_key(get_key(table[bucknum]));
395  }
396  bool test_deleted(const iterator &it) const {
397    return test_deleted_key(get_key(*it));
398  }
399  bool test_deleted(const const_iterator &it) const {
400    return test_deleted_key(get_key(*it));
401  }
402
403  // Set it so test_deleted is true.  true if object didn't used to be deleted.
404  bool set_deleted(iterator &it) {
405    assert(use_deleted);             // bad if set_deleted_key() wasn't called
406    bool retval = !test_deleted(it);
407    // &* converts from iterator to value-type.
408    set_key(&(*it), delkey);
409    return retval;
410  }
411  // Set it so test_deleted is false.  true if object used to be deleted
412  bool clear_deleted(iterator &it) {
413    assert(use_deleted);             // bad if set_deleted_key() wasn't called
414    // Happens automatically when we assign something else in its place.
415    return test_deleted(it);
416  }
417
418  // We also allow to set/clear the deleted bit on a const iterator.
419  // We allow a const_iterator for the same reason you can delete a
420  // const pointer: it's convenient, and semantically you can't use
421  // 'it' after it's been deleted anyway, so its const-ness doesn't
422  // really matter.
423  bool set_deleted(const_iterator &it) {
424    assert(use_deleted);             // bad if set_deleted_key() wasn't called
425    bool retval = !test_deleted(it);
426    set_key(const_cast<value_type*>(&(*it)), delkey);
427    return retval;
428  }
429  // Set it so test_deleted is false.  true if object used to be deleted
430  bool clear_deleted(const_iterator &it) {
431    assert(use_deleted);             // bad if set_deleted_key() wasn't called
432    return test_deleted(it);
433  }
434
435  // EMPTY HELPER FUNCTIONS
436  // This lets the user describe a key that will indicate empty (unused)
437  // table entries.  This key should be an "impossible" entry --
438  // if you try to insert it for real, you won't be able to retrieve it!
439  // (NB: while you pass in an entire value, only the key part is looked
440  // at.  This is just because I don't know how to assign just a key.)
441 public:
442  // These are public so the iterators can use them
443  // True if the item at position bucknum is "empty" marker
444  bool test_empty(size_type bucknum) const {
445    assert(use_empty);              // we always need to know what's empty!
446    return equals(get_key(emptyval), get_key(table[bucknum]));
447  }
448  bool test_empty(const iterator &it) const {
449    assert(use_empty);              // we always need to know what's empty!
450    return equals(get_key(emptyval), get_key(*it));
451  }
452  bool test_empty(const const_iterator &it) const {
453    assert(use_empty);              // we always need to know what's empty!
454    return equals(get_key(emptyval), get_key(*it));
455  }
456
457 private:
458  // You can either set a range empty or an individual element
459  void set_empty(size_type bucknum) {
460    assert(use_empty);
461    set_value(&table[bucknum], emptyval);
462  }
463  void fill_range_with_empty(value_type* table_start, value_type* table_end) {
464    // Like set_empty(range), but doesn't destroy previous contents
465    STL_NAMESPACE::uninitialized_fill(table_start, table_end, emptyval);
466  }
467  void set_empty(size_type buckstart, size_type buckend) {
468    assert(use_empty);
469    destroy_buckets(buckstart, buckend);
470    fill_range_with_empty(table + buckstart, table + buckend);
471  }
472
473 public:
474  // TODO(csilvers): change all callers of this to pass in a key instead,
475  //                 and take a const key_type instead of const value_type.
476  void set_empty_key(const value_type &val) {
477    // Once you set the empty key, you can't change it
478    assert(!use_empty);
479    // The deleted indicator (if specified) and the empty indicator
480    // must be different.
481    assert(!use_deleted || !equals(get_key(val), delkey));
482    use_empty = true;
483    set_value(&emptyval, val);
484
485    assert(!table);                  // must set before first use
486    // num_buckets was set in constructor even though table was NULL
487    table = (value_type *) malloc(num_buckets * sizeof(*table));
488    assert(table);
489    fill_range_with_empty(table, table + num_buckets);
490  }
491  // TODO(sjackman): return a key_type rather than a value_type
492  value_type empty_key() const {
493    assert(use_empty);
494    return emptyval;
495  }
496
497  // FUNCTIONS CONCERNING SIZE
498 public:
499  size_type size() const      { return num_elements - num_deleted; }
500  // Buckets are always a power of 2
501  size_type max_size() const  { return (size_type(-1) >> 1U) + 1; }
502  bool empty() const          { return size() == 0; }
503  size_type bucket_count() const      { return num_buckets; }
504  size_type max_bucket_count() const  { return max_size(); }
505  size_type nonempty_bucket_count() const { return num_elements; }
506  // These are tr1 methods.  Their idea of 'bucket' doesn't map well to
507  // what we do.  We just say every bucket has 0 or 1 items in it.
508  size_type bucket_size(size_type i) const {
509    return begin(i) == end(i) ? 0 : 1;
510  }
511
512
513
514 private:
515  // Because of the above, size_type(-1) is never legal; use it for errors
516  static const size_type ILLEGAL_BUCKET = size_type(-1);
517
518 private:
519  // This is the smallest size a hashtable can be without being too crowded
520  // If you like, you can give a min #buckets as well as a min #elts
521  size_type min_size(size_type num_elts, size_type min_buckets_wanted) {
522    size_type sz = HT_MIN_BUCKETS;             // min buckets allowed
523    while ( sz < min_buckets_wanted ||
524            num_elts >= static_cast<size_type>(sz * enlarge_resize_percent) ) {
525      if (sz * 2 < sz)
526        throw std::length_error("resize overflow");  // protect against overflow
527      sz *= 2;
528    }
529    return sz;
530  }
531
532  // Used after a string of deletes
533  void maybe_shrink() {
534    assert(num_elements >= num_deleted);
535    assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two
536    assert(bucket_count() >= HT_MIN_BUCKETS);
537
538    // If you construct a hashtable with < HT_DEFAULT_STARTING_BUCKETS,
539    // we'll never shrink until you get relatively big, and we'll never
540    // shrink below HT_DEFAULT_STARTING_BUCKETS.  Otherwise, something
541    // like "dense_hash_set<int> x; x.insert(4); x.erase(4);" will
542    // shrink us down to HT_MIN_BUCKETS buckets, which is too small.
543    if (shrink_threshold > 0 &&
544        (num_elements-num_deleted) < shrink_threshold &&
545        bucket_count() > HT_DEFAULT_STARTING_BUCKETS ) {
546      size_type sz = bucket_count() / 2;    // find how much we should shrink
547      while ( sz > HT_DEFAULT_STARTING_BUCKETS &&
548              (num_elements - num_deleted) < sz * shrink_resize_percent )
549        sz /= 2;                            // stay a power of 2
550      dense_hashtable tmp(*this, sz);       // Do the actual resizing
551      swap(tmp);                            // now we are tmp
552    }
553    consider_shrink = false;                // because we just considered it
554  }
555
556  // We'll let you resize a hashtable -- though this makes us copy all!
557  // When you resize, you say, "make it big enough for this many more elements"
558  void resize_delta(size_type delta) {
559    if ( consider_shrink )                   // see if lots of deletes happened
560      maybe_shrink();
561    if ( bucket_count() > HT_MIN_BUCKETS &&
562         (num_elements + delta) <= enlarge_threshold )
563      return;                                // we're ok as we are
564
565    // Sometimes, we need to resize just to get rid of all the
566    // "deleted" buckets that are clogging up the hashtable.  So when
567    // deciding whether to resize, count the deleted buckets (which
568    // are currently taking up room).  But later, when we decide what
569    // size to resize to, *don't* count deleted buckets, since they
570    // get discarded during the resize.
571    const size_type needed_size = min_size(num_elements + delta, 0);
572    if ( needed_size > bucket_count() ) {      // we don't have enough buckets
573      size_type resize_to = min_size(num_elements - num_deleted + delta,
574                                     bucket_count());
575      if (resize_to < needed_size) {
576        // This situation means that we have enough deleted elements,
577        // that once we purge them, we won't actually have needed to
578        // grow.  But we may want to grow anyway: if we just purge one
579        // element, say, we'll have to grow anyway next time we
580        // insert.  Might as well grow now, since we're already going
581        // through the trouble of copying (in order to purge the
582        // deleted elements).
583        if (num_elements - num_deleted + delta >=
584            static_cast<size_type>(resize_to*2 * shrink_resize_percent)) {
585          // Good, we won't be below the shrink threshhold even if we double.
586          resize_to *= 2;
587        }
588      }
589      dense_hashtable tmp(*this, resize_to);
590      swap(tmp);                             // now we are tmp
591    }
592  }
593
594  // Increase number of buckets, assuming value_type has trivial copy
595  // constructor and destructor.  (Really, we want it to have "trivial
596  // move", because that's what realloc does.  But there's no way to
597  // capture that using type_traits, so we pretend that move(x, y) is
598  // equivalent to "x.~T(); new(x) T(y);" which is pretty much
599  // correct, if a bit conservative.)
600  void expand_array(size_t resize_to, true_type) {
601    table = (value_type *) realloc(table, resize_to * sizeof(value_type));
602    assert(table);
603    fill_range_with_empty(table + num_buckets, table + resize_to);
604  }
605
606  // Increase number of buckets, without special assumptions about value_type.
607  // TODO(austern): make this exception safe. Handle exceptions from
608  // value_type's copy constructor.
609  void expand_array(size_t resize_to, false_type) {
610    value_type* new_table =
611      (value_type *) malloc(resize_to * sizeof(value_type));
612    assert(new_table);
613    STL_NAMESPACE::uninitialized_copy(table, table + num_buckets, new_table);
614    fill_range_with_empty(new_table + num_buckets, new_table + resize_to);
615    destroy_buckets(0, num_buckets);
616    free(table);
617    table = new_table;
618  }
619
620  // Used to actually do the rehashing when we grow/shrink a hashtable
621  void copy_from(const dense_hashtable &ht, size_type min_buckets_wanted) {
622    clear();            // clear table, set num_deleted to 0
623
624    // If we need to change the size of our table, do it now
625    const size_type resize_to = min_size(ht.size(), min_buckets_wanted);
626    if ( resize_to > bucket_count() ) { // we don't have enough buckets
627      typedef integral_constant<bool,
628          (has_trivial_copy<value_type>::value &&
629           has_trivial_destructor<value_type>::value)>
630          realloc_ok; // we pretend mv(x,y) == "x.~T(); new(x) T(y)"
631      expand_array(resize_to, realloc_ok());
632      num_buckets = resize_to;
633      reset_thresholds();
634    }
635
636    // We use a normal iterator to get non-deleted bcks from ht
637    // We could use insert() here, but since we know there are
638    // no duplicates and no deleted items, we can be more efficient
639    assert((bucket_count() & (bucket_count()-1)) == 0);      // a power of two
640    for ( const_iterator it = ht.begin(); it != ht.end(); ++it ) {
641      size_type num_probes = 0;              // how many times we've probed
642      size_type bucknum;
643      const size_type bucket_count_minus_one = bucket_count() - 1;
644      for (bucknum = hash(get_key(*it)) & bucket_count_minus_one;
645           !test_empty(bucknum);                               // not empty
646           bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one) {
647        ++num_probes;
648        assert(num_probes < bucket_count()); // or else the hashtable is full
649      }
650      set_value(&table[bucknum], *it);       // copies the value to here
651      num_elements++;
652    }
653    num_ht_copies++;
654  }
655
656  // Required by the spec for hashed associative container
657 public:
658  // Though the docs say this should be num_buckets, I think it's much
659  // more useful as req_elements.  As a special feature, calling with
660  // req_elements==0 will cause us to shrink if we can, saving space.
661  void resize(size_type req_elements) {       // resize to this or larger
662    if ( consider_shrink || req_elements == 0 )
663      maybe_shrink();
664    if ( req_elements > num_elements )
665      return resize_delta(req_elements - num_elements);
666  }
667
668  // Get and change the value of shrink_resize_percent and
669  // enlarge_resize_percent.  The description at the beginning of this
670  // file explains how to choose the values.  Setting the shrink
671  // parameter to 0.0 ensures that the table never shrinks.
672  void get_resizing_parameters(float* shrink, float* grow) const {
673    *shrink = shrink_resize_percent;
674    *grow = enlarge_resize_percent;
675  }
676  void set_resizing_parameters(float shrink, float grow) {
677    assert(shrink >= 0.0);
678    assert(grow <= 1.0);
679    if (shrink > grow/2.0f)
680      shrink = grow / 2.0f;     // otherwise we thrash hashtable size
681    shrink_resize_percent = shrink;
682    enlarge_resize_percent = grow;
683    reset_thresholds();
684  }
685
686  // CONSTRUCTORS -- as required by the specs, we take a size,
687  // but also let you specify a hashfunction, key comparator,
688  // and key extractor.  We also define a copy constructor and =.
689  // DESTRUCTOR -- needs to free the table
690  explicit dense_hashtable(size_type expected_max_items_in_table = 0,
691                           const HashFcn& hf = HashFcn(),
692                           const EqualKey& eql = EqualKey(),
693                           const ExtractKey& ext = ExtractKey(),
694                           const SetKey& set = SetKey())
695    : hash(hf), equals(eql), get_key(ext), set_key(set), num_deleted(0),
696      use_deleted(false), use_empty(false),
697      delkey(), emptyval(), enlarge_resize_percent(HT_OCCUPANCY_FLT),
698      shrink_resize_percent(HT_EMPTY_FLT), table(NULL),
699      num_buckets(expected_max_items_in_table == 0
700                  ? HT_DEFAULT_STARTING_BUCKETS
701                  : min_size(expected_max_items_in_table, 0)),
702      num_elements(0), num_ht_copies(0) {
703    // table is NULL until emptyval is set.  However, we set num_buckets
704    // here so we know how much space to allocate once emptyval is set
705    reset_thresholds();
706  }
707
708  // As a convenience for resize(), we allow an optional second argument
709  // which lets you make this new hashtable a different size than ht
710  dense_hashtable(const dense_hashtable& ht,
711                  size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS)
712    : hash(ht.hash), equals(ht.equals),
713      get_key(ht.get_key), set_key(ht.set_key), num_deleted(0),
714      use_deleted(ht.use_deleted), use_empty(ht.use_empty),
715      delkey(ht.delkey), emptyval(ht.emptyval),
716      enlarge_resize_percent(ht.enlarge_resize_percent),
717      shrink_resize_percent(ht.shrink_resize_percent), table(NULL),
718      num_buckets(0), num_elements(0), num_ht_copies(ht.num_ht_copies) {
719    if (!ht.use_empty) {
720      // If use_empty isn't set, copy_from will crash, so we do our own copying.
721      assert(ht.empty());
722      num_buckets = min_size(ht.size(), min_buckets_wanted);
723      reset_thresholds();
724      return;
725    }
726    reset_thresholds();
727    copy_from(ht, min_buckets_wanted);   // copy_from() ignores deleted entries
728  }
729
730  dense_hashtable& operator= (const dense_hashtable& ht) {
731    if (&ht == this)  return *this;        // don't copy onto ourselves
732    if (!ht.use_empty) {
733      assert(ht.empty());
734      dense_hashtable empty_table(ht);  // empty table with ht's thresholds
735      this->swap(empty_table);
736      return *this;
737    }
738    hash = ht.hash;
739    equals = ht.equals;
740    get_key = ht.get_key;
741    set_key = ht.set_key;
742    use_deleted = ht.use_deleted;
743    use_empty = ht.use_empty;
744    delkey = ht.delkey;
745    set_value(&emptyval, ht.emptyval);
746    enlarge_resize_percent = ht.enlarge_resize_percent;
747    shrink_resize_percent = ht.shrink_resize_percent;
748    copy_from(ht, HT_MIN_BUCKETS);  // calls clear and sets num_deleted to 0 too
749    return *this;
750  }
751
752  ~dense_hashtable() {
753    if (table) {
754      destroy_buckets(0, num_buckets);
755      free(table);
756    }
757  }
758
759  // Many STL algorithms use swap instead of copy constructors
760  void swap(dense_hashtable& ht) {
761    STL_NAMESPACE::swap(hash, ht.hash);
762    STL_NAMESPACE::swap(equals, ht.equals);
763    STL_NAMESPACE::swap(get_key, ht.get_key);
764    STL_NAMESPACE::swap(set_key, ht.set_key);
765    STL_NAMESPACE::swap(num_deleted, ht.num_deleted);
766    STL_NAMESPACE::swap(use_deleted, ht.use_deleted);
767    STL_NAMESPACE::swap(use_empty, ht.use_empty);
768    STL_NAMESPACE::swap(enlarge_resize_percent, ht.enlarge_resize_percent);
769    STL_NAMESPACE::swap(shrink_resize_percent, ht.shrink_resize_percent);
770    STL_NAMESPACE::swap(delkey, ht.delkey);
771    { value_type tmp;     // for annoying reasons, swap() doesn't work
772      set_value(&tmp, emptyval);
773      set_value(&emptyval, ht.emptyval);
774      set_value(&ht.emptyval, tmp);
775    }
776    STL_NAMESPACE::swap(table, ht.table);
777    STL_NAMESPACE::swap(num_buckets, ht.num_buckets);
778    STL_NAMESPACE::swap(num_elements, ht.num_elements);
779    STL_NAMESPACE::swap(num_ht_copies, ht.num_ht_copies);
780    reset_thresholds();
781    ht.reset_thresholds();
782  }
783
784  // It's always nice to be able to clear a table without deallocating it
785  void clear() {
786    const size_type new_num_buckets = min_size(0,0);
787    if (num_elements == 0 &&
788        num_deleted == 0 &&
789        new_num_buckets == num_buckets) {
790      // Table is already empty, and the number of buckets is already as we
791      // desire, so nothing to do.
792      return;
793    }
794    if (table)
795      destroy_buckets(0, num_buckets);
796    if (!table || (new_num_buckets != num_buckets)) {
797      // Recompute the resize thresholds and realloc the table only if we're
798      // actually changing its size.
799      num_buckets = new_num_buckets;          // our new size
800      reset_thresholds();
801      table = (value_type *) realloc(table, num_buckets * sizeof(*table));
802    }
803    assert(table);
804    fill_range_with_empty(table, table + num_buckets);
805    num_elements = 0;
806    num_deleted = 0;
807  }
808
809  // Clear the table without resizing it.
810  // Mimicks the stl_hashtable's behaviour when clear()-ing in that it
811  // does not modify the bucket count
812  void clear_no_resize() {
813    if (table) {
814      set_empty(0, num_buckets);
815    }
816    // don't consider to shrink before another erase()
817    reset_thresholds();
818    num_elements = 0;
819    num_deleted = 0;
820  }
821
822  // LOOKUP ROUTINES
823 private:
824  // Returns a pair of positions: 1st where the object is, 2nd where
825  // it would go if you wanted to insert it.  1st is ILLEGAL_BUCKET
826  // if object is not found; 2nd is ILLEGAL_BUCKET if it is.
827  // Note: because of deletions where-to-insert is not trivial: it's the
828  // first deleted bucket we see, as long as we don't find the key later
829  pair<size_type, size_type> find_position(const key_type &key) const {
830    size_type num_probes = 0;              // how many times we've probed
831    const size_type bucket_count_minus_one = bucket_count() - 1;
832    size_type bucknum = hash(key) & bucket_count_minus_one;
833    size_type insert_pos = ILLEGAL_BUCKET; // where we would insert
834    while ( 1 ) {                          // probe until something happens
835      if ( test_empty(bucknum) ) {         // bucket is empty
836        if ( insert_pos == ILLEGAL_BUCKET )   // found no prior place to insert
837          return pair<size_type,size_type>(ILLEGAL_BUCKET, bucknum);
838        else
839          return pair<size_type,size_type>(ILLEGAL_BUCKET, insert_pos);
840
841      } else if ( test_deleted(bucknum) ) {// keep searching, but mark to insert
842        if ( insert_pos == ILLEGAL_BUCKET )
843          insert_pos = bucknum;
844
845      } else if ( equals(key, get_key(table[bucknum])) ) {
846        return pair<size_type,size_type>(bucknum, ILLEGAL_BUCKET);
847      }
848      ++num_probes;                        // we're doing another probe
849      bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one;
850      assert(num_probes < bucket_count()); // don't probe too many times!
851    }
852  }
853
854 public:
855  iterator find(const key_type& key) {
856    if ( size() == 0 ) return end();
857    pair<size_type, size_type> pos = find_position(key);
858    if ( pos.first == ILLEGAL_BUCKET )     // alas, not there
859      return end();
860    else
861      return iterator(this, table + pos.first, table + num_buckets, false);
862  }
863
864  const_iterator find(const key_type& key) const {
865    if ( size() == 0 ) return end();
866    pair<size_type, size_type> pos = find_position(key);
867    if ( pos.first == ILLEGAL_BUCKET )     // alas, not there
868      return end();
869    else
870      return const_iterator(this, table + pos.first, table+num_buckets, false);
871  }
872
873  // This is a tr1 method: the bucket a given key is in, or what bucket
874  // it would be put in, if it were to be inserted.  Shrug.
875  size_type bucket(const key_type& key) const {
876    pair<size_type, size_type> pos = find_position(key);
877    return pos.first == ILLEGAL_BUCKET ? pos.second : pos.first;
878  }
879
880  // Counts how many elements have key key.  For maps, it's either 0 or 1.
881  size_type count(const key_type &key) const {
882    pair<size_type, size_type> pos = find_position(key);
883    return pos.first == ILLEGAL_BUCKET ? 0 : 1;
884  }
885
886  // Likewise, equal_range doesn't really make sense for us.  Oh well.
887  pair<iterator,iterator> equal_range(const key_type& key) {
888    iterator pos = find(key);      // either an iterator or end
889    if (pos == end()) {
890      return pair<iterator,iterator>(pos, pos);
891    } else {
892      const iterator startpos = pos++;
893      return pair<iterator,iterator>(startpos, pos);
894    }
895  }
896  pair<const_iterator,const_iterator> equal_range(const key_type& key) const {
897    const_iterator pos = find(key);      // either an iterator or end
898    if (pos == end()) {
899      return pair<const_iterator,const_iterator>(pos, pos);
900    } else {
901      const const_iterator startpos = pos++;
902      return pair<const_iterator,const_iterator>(startpos, pos);
903    }
904  }
905
906
907  // INSERTION ROUTINES
908 private:
909  // If you know *this is big enough to hold obj, use this routine
910  pair<iterator, bool> insert_noresize(const value_type& obj) {
911    // First, double-check we're not inserting delkey or emptyval
912    assert(!use_empty || !equals(get_key(obj), get_key(emptyval)));
913    assert(!use_deleted || !equals(get_key(obj), delkey));
914    const pair<size_type,size_type> pos = find_position(get_key(obj));
915    if ( pos.first != ILLEGAL_BUCKET) {      // object was already there
916      return pair<iterator,bool>(iterator(this, table + pos.first,
917                                          table + num_buckets, false),
918                                 false);          // false: we didn't insert
919    } else {                                 // pos.second says where to put it
920      if ( test_deleted(pos.second) ) {      // just replace if it's been del.
921        const_iterator delpos(this, table + pos.second,              // shrug:
922                              table + num_buckets, false);// shouldn't need const
923        clear_deleted(delpos);
924        assert( num_deleted > 0);
925        --num_deleted;                       // used to be, now it isn't
926      } else {
927        ++num_elements;                      // replacing an empty bucket
928      }
929      set_value(&table[pos.second], obj);
930      return pair<iterator,bool>(iterator(this, table + pos.second,
931                                          table + num_buckets, false),
932                                 true);           // true: we did insert
933    }
934  }
935
936 public:
937  // This is the normal insert routine, used by the outside world
938  pair<iterator, bool> insert(const value_type& obj) {
939    resize_delta(1);                      // adding an object, grow if need be
940    return insert_noresize(obj);
941  }
942
943  // When inserting a lot at a time, we specialize on the type of iterator
944  template <class InputIterator>
945  void insert(InputIterator f, InputIterator l) {
946    // specializes on iterator type
947    insert(f, l, typename STL_NAMESPACE::iterator_traits<InputIterator>::iterator_category());
948  }
949
950  // Iterator supports operator-, resize before inserting
951  template <class ForwardIterator>
952  void insert(ForwardIterator f, ForwardIterator l,
953              STL_NAMESPACE::forward_iterator_tag) {
954    size_type n = STL_NAMESPACE::distance(f, l);   // TODO(csilvers): standard?
955    resize_delta(n);
956    for ( ; n > 0; --n, ++f)
957      insert_noresize(*f);
958  }
959
960  // Arbitrary iterator, can't tell how much to resize
961  template <class InputIterator>
962  void insert(InputIterator f, InputIterator l,
963              STL_NAMESPACE::input_iterator_tag) {
964    for ( ; f != l; ++f)
965      insert(*f);
966  }
967
968
969  // DELETION ROUTINES
970  size_type erase(const key_type& key) {
971    // First, double-check we're not trying to erase delkey or emptyval
972    assert(!use_empty || !equals(key, get_key(emptyval)));
973    assert(!use_deleted || !equals(key, delkey));
974    const_iterator pos = find(key);   // shrug: shouldn't need to be const
975    if ( pos != end() ) {
976      assert(!test_deleted(pos));  // or find() shouldn't have returned it
977      set_deleted(pos);
978      ++num_deleted;
979      consider_shrink = true;      // will think about shrink after next insert
980      return 1;                    // because we deleted one thing
981    } else {
982      return 0;                    // because we deleted nothing
983    }
984  }
985
986  // We return the iterator past the deleted item.
987  void erase(iterator pos) {
988    if ( pos == end() ) return;    // sanity check
989    if ( set_deleted(pos) ) {      // true if object has been newly deleted
990      ++num_deleted;
991      consider_shrink = true;      // will think about shrink after next insert
992    }
993  }
994
995  void erase(iterator f, iterator l) {
996    for ( ; f != l; ++f) {
997      if ( set_deleted(f)  )       // should always be true
998        ++num_deleted;
999    }
1000    consider_shrink = true;        // will think about shrink after next insert
1001  }
1002
1003  // We allow you to erase a const_iterator just like we allow you to
1004  // erase an iterator.  This is in parallel to 'delete': you can delete
1005  // a const pointer just like a non-const pointer.  The logic is that
1006  // you can't use the object after it's erased anyway, so it doesn't matter
1007  // if it's const or not.
1008  void erase(const_iterator pos) {
1009    if ( pos == end() ) return;    // sanity check
1010    if ( set_deleted(pos) ) {      // true if object has been newly deleted
1011      ++num_deleted;
1012      consider_shrink = true;      // will think about shrink after next insert
1013    }
1014  }
1015  void erase(const_iterator f, const_iterator l) {
1016    for ( ; f != l; ++f) {
1017      if ( set_deleted(f)  )       // should always be true
1018        ++num_deleted;
1019    }
1020    consider_shrink = true;        // will think about shrink after next insert
1021  }
1022
1023
1024  // COMPARISON
1025  bool operator==(const dense_hashtable& ht) const {
1026    if (size() != ht.size()) {
1027      return false;
1028    } else if (this == &ht) {
1029      return true;
1030    } else {
1031      // Iterate through the elements in "this" and see if the
1032      // corresponding element is in ht
1033      for ( const_iterator it = begin(); it != end(); ++it ) {
1034        const_iterator it2 = ht.find(get_key(*it));
1035        if ((it2 == ht.end()) || (*it != *it2)) {
1036          return false;
1037        }
1038      }
1039      return true;
1040    }
1041  }
1042  bool operator!=(const dense_hashtable& ht) const {
1043    return !(*this == ht);
1044  }
1045
1046
1047  // I/O
1048  // We support reading and writing hashtables to disk.  Alas, since
1049  // I don't know how to write a hasher or key_equal, you have to make
1050  // sure everything but the table is the same.  We compact before writing
1051  //
1052  // NOTE: These functions are currently TODO.  They've not been implemented.
1053  bool write_metadata(FILE *fp) {
1054    squash_deleted();           // so we don't have to worry about delkey
1055    return false;               // TODO
1056  }
1057
1058  bool read_metadata(FILE *fp) {
1059    num_deleted = 0;            // since we got rid before writing
1060    assert(use_empty);          // have to set this before calling us
1061    if (table)  free(table);    // we'll make our own
1062    // TODO: read magic number
1063    // TODO: read num_buckets
1064    reset_thresholds();
1065    table = (value_type *) malloc(num_buckets * sizeof(*table));
1066    assert(table);
1067    fill_range_with_empty(table, table + num_buckets);
1068    // TODO: read num_elements
1069    for ( size_type i = 0; i < num_elements; ++i ) {
1070      // TODO: read bucket_num
1071      // TODO: set with non-empty, non-deleted value
1072    }
1073    return false;               // TODO
1074  }
1075
1076  // If your keys and values are simple enough, we can write them to
1077  // disk for you.  "simple enough" means value_type is a POD type
1078  // that contains no pointers.  However, we don't try to normalize
1079  // endianness
1080  bool write_nopointer_data(FILE *fp) const {
1081    for ( const_iterator it = begin(); it != end(); ++it ) {
1082      // TODO: skip empty/deleted values
1083      if ( !fwrite(&*it, sizeof(*it), 1, fp) )  return false;
1084    }
1085    return false;
1086  }
1087
1088  // When reading, we have to override the potential const-ness of *it
1089  bool read_nopointer_data(FILE *fp) {
1090    for ( iterator it = begin(); it != end(); ++it ) {
1091      // TODO: skip empty/deleted values
1092      if ( !fread(reinterpret_cast<void*>(&(*it)), sizeof(*it), 1, fp) )
1093        return false;
1094    }
1095    return false;
1096  }
1097
1098 private:
1099  // The actual data
1100  hasher hash;                      // required by hashed_associative_container
1101  key_equal equals;
1102  ExtractKey get_key;
1103  SetKey set_key;
1104  size_type num_deleted;        // how many occupied buckets are marked deleted
1105  bool use_deleted;                          // false until delkey has been set
1106  bool use_empty;                          // you must do this before you start
1107  // TODO(csilvers): make a pointer, and get rid of use_deleted (benchmark!)
1108  key_type delkey;                           // which key marks deleted entries
1109  value_type emptyval;                        // which key marks unused entries
1110  float enlarge_resize_percent;                       // how full before resize
1111  float shrink_resize_percent;                       // how empty before resize
1112  size_type shrink_threshold;            // num_buckets * shrink_resize_percent
1113  size_type enlarge_threshold;          // num_buckets * enlarge_resize_percent
1114  value_type *table;
1115  size_type num_buckets;
1116  size_type num_elements;
1117  int num_ht_copies;             // a statistics counter incremented every Copy
1118  bool consider_shrink;   // true if we should try to shrink before next insert
1119
1120  void reset_thresholds() {
1121    enlarge_threshold = static_cast<size_type>(num_buckets
1122                                               * enlarge_resize_percent);
1123    shrink_threshold = static_cast<size_type>(num_buckets
1124                                              * shrink_resize_percent);
1125    consider_shrink = false;   // whatever caused us to reset already considered
1126  }
1127};
1128
1129// We need a global swap as well
1130template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
1131inline void swap(dense_hashtable<V,K,HF,ExK,SetK,EqK,A> &x,
1132                 dense_hashtable<V,K,HF,ExK,SetK,EqK,A> &y) {
1133  x.swap(y);
1134}
1135
1136#undef JUMP_
1137
1138template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
1139const typename dense_hashtable<V,K,HF,ExK,SetK,EqK,A>::size_type
1140dense_hashtable<V,K,HF,ExK,SetK,EqK,A>::ILLEGAL_BUCKET;
1141
1142// How full we let the table get before we resize.  Knuth says .8 is
1143// good -- higher causes us to probe too much, though saves memory.
1144// However, we go with .5, getting better performance at the cost of
1145// more space (a trade-off densehashtable explicitly chooses to make).
1146// Feel free to play around with different values, though.
1147template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
1148const float dense_hashtable<V,K,HF,ExK,SetK,EqK,A>::HT_OCCUPANCY_FLT = 0.5f;
1149
1150// How empty we let the table get before we resize lower.
1151// It should be less than OCCUPANCY_FLT / 2 or we thrash resizing
1152template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
1153const float dense_hashtable<V,K,HF,ExK,SetK,EqK,A>::HT_EMPTY_FLT
1154    = 0.4f * dense_hashtable<V,K,HF,ExK,SetK,EqK,A>::HT_OCCUPANCY_FLT;
1155
1156_END_GOOGLE_NAMESPACE_
1157
1158#endif /* _DENSEHASHTABLE_H_ */
Note: See TracBrowser for help on using the browser.