Fix: hash table growth (for small tables) should be limited
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 27 Aug 2013 21:58:22 +0000 (17:58 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 30 Aug 2013 18:24:34 +0000 (14:24 -0400)
Buckets with many entries encountered in a hash table could cause it to
grow to a large size, beyond the scope for which this mechanism is
expected to play a role when node accounting is available. Indeed, when
the hash table grows to larger size, split-counter node accounting is
expected to deal with resize/shrink rather than relying on an heuristic
based on the largest bucket size.

This is fixing an issue where we see hash tables sometimes reaching 65k
entries index (65536*8 = 524288 bytes) for a workload limited to adding
1000 entries and then removing all of them, done in a loop (random
keys).

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
rculfhash.c

index 423609d6c188dd79f6392763ad7b8082f2180def..7d39388f494700a1b06a5df885d5a5350cdf42a8 100644 (file)
@@ -563,6 +563,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
 
 static long nr_cpus_mask = -1;
 static long split_count_mask = -1;
+static int split_count_order = -1;
 
 #if defined(HAVE_SYSCONF)
 static void ht_init_nr_cpus_mask(void)
@@ -597,6 +598,8 @@ void alloc_split_items_count(struct cds_lfht *ht)
                        split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
                else
                        split_count_mask = nr_cpus_mask;
+               split_count_order =
+                       cds_lfht_get_count_order_ulong(split_count_mask + 1);
        }
 
        assert(split_count_mask >= 0);
@@ -713,14 +716,39 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
         * Use bucket-local length for small table expand and for
         * environments lacking per-cpu data support.
         */
-       if (count >= (1UL << COUNT_COMMIT_ORDER))
+       if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
                return;
        if (chain_len > 100)
                dbg_printf("WARNING: large chain length: %u.\n",
                           chain_len);
-       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
-               cds_lfht_resize_lazy_grow(ht, size,
-                       cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) {
+               int growth;
+
+               /*
+                * Ideal growth calculated based on chain length.
+                */
+               growth = cds_lfht_get_count_order_u32(chain_len
+                               - (CHAIN_LEN_TARGET - 1));
+               if ((ht->flags & CDS_LFHT_ACCOUNTING)
+                               && (size << growth)
+                                       >= (1UL << (COUNT_COMMIT_ORDER
+                                               + split_count_order))) {
+                       /*
+                        * If ideal growth expands the hash table size
+                        * beyond the "small hash table" sizes, use the
+                        * maximum small hash table size to attempt
+                        * expanding the hash table. This only applies
+                        * when node accounting is available, otherwise
+                        * the chain length is used to expand the hash
+                        * table in every case.
+                        */
+                       growth = COUNT_COMMIT_ORDER + split_count_order
+                               - cds_lfht_get_count_order_ulong(size);
+                       if (growth <= 0)
+                               return;
+               }
+               cds_lfht_resize_lazy_grow(ht, size, growth);
+       }
 }
 
 static
This page took 0.025516 seconds and 4 git commands to generate.