Skip to content

Commit

Permalink
rhashtable: Add immediate rehash during insertion
Browse files Browse the repository at this point in the history
This patch reintroduces immediate rehash during insertion.  If
we find during insertion that the table is full or the chain
length exceeds a set limit (currently 16 but may be disabled
with insecure_elasticity) then we will force an immediate rehash.
The rehash will contain an expansion if the table utilisation
exceeds 75%.

If this rehash fails then the insertion will fail.  Otherwise the
insertion will be reattempted in the new hash table.

Signed-off-by: Herbert Xu <[email protected]>
Acked-by: Thomas Graf <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
herbertx authored and davem330 committed Mar 24, 2015
1 parent b9ecfda commit ccd57b1
Show file tree
Hide file tree
Showing 2 changed files with 96 additions and 6 deletions.
42 changes: 37 additions & 5 deletions include/linux/rhashtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ struct rhashtable;
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
* @nulls_base: Base value to generate nulls marker
* @insecure_elasticity: Set to true to disable chain length checks
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
Expand All @@ -116,6 +117,7 @@ struct rhashtable_params {
unsigned int max_size;
unsigned int min_size;
u32 nulls_base;
bool insecure_elasticity;
size_t locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
Expand All @@ -127,6 +129,7 @@ struct rhashtable_params {
* @tbl: Bucket table
* @nelems: Number of elements in table
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
Expand All @@ -137,6 +140,7 @@ struct rhashtable {
atomic_t nelems;
bool being_destroyed;
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
Expand Down Expand Up @@ -266,6 +270,17 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
tbl->size > ht->p.min_size;
}

/**
* rht_grow_above_100 - returns true if nelems > table-size
* @ht: hash table
* @tbl: current table
*/
static inline bool rht_grow_above_100(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
return atomic_read(&ht->nelems) > tbl->size;
}

/* The bucket lock is selected based on the hash and protects mutations
* on a group of hash buckets.
*
Expand Down Expand Up @@ -307,6 +322,7 @@ int rhashtable_init(struct rhashtable *ht,
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj,
struct bucket_table *old_tbl);
int rhashtable_insert_rehash(struct rhashtable *ht);

int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
Expand Down Expand Up @@ -529,12 +545,14 @@ static inline int __rhashtable_insert_fast(
.ht = ht,
.key = key,
};
int err = -EEXIST;
struct bucket_table *tbl, *new_tbl;
struct rhash_head *head;
spinlock_t *lock;
unsigned elasticity;
unsigned hash;
int err;

restart:
rcu_read_lock();

tbl = rht_dereference_rcu(ht->tbl, ht);
Expand All @@ -557,20 +575,34 @@ static inline int __rhashtable_insert_fast(
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
err = rhashtable_insert_slow(ht, key, obj, new_tbl);
if (err == -EAGAIN)
goto slow_path;
goto out;
}

if (!key)
goto skip_lookup;
if (unlikely(rht_grow_above_100(ht, tbl))) {
slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();
err = rhashtable_insert_rehash(ht);
if (err)
return err;

goto restart;
}

err = -EEXIST;
elasticity = ht->elasticity;
rht_for_each(head, tbl, hash) {
if (unlikely(!(params.obj_cmpfn ?
if (key &&
unlikely(!(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
rhashtable_compare(&arg, rht_obj(ht, head)))))
goto out;
if (!--elasticity)
goto slow_path;
}

skip_lookup:
err = 0;

head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
Expand Down
60 changes: 59 additions & 1 deletion lib/rhashtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -375,21 +375,76 @@ static void rht_deferred_worker(struct work_struct *work)
schedule_work(&ht->run_work);
}

static bool rhashtable_check_elasticity(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned hash)
{
unsigned elasticity = ht->elasticity;
struct rhash_head *head;

rht_for_each(head, tbl, hash)
if (!--elasticity)
return true;

return false;
}

int rhashtable_insert_rehash(struct rhashtable *ht)
{
struct bucket_table *old_tbl;
struct bucket_table *new_tbl;
struct bucket_table *tbl;
unsigned int size;
int err;

old_tbl = rht_dereference_rcu(ht->tbl, ht);
tbl = rhashtable_last_table(ht, old_tbl);

size = tbl->size;

if (rht_grow_above_75(ht, tbl))
size *= 2;
/* More than two rehashes (not resizes) detected. */
else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
return -EBUSY;

new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
if (new_tbl == NULL)
return -ENOMEM;

err = rhashtable_rehash_attach(ht, tbl, new_tbl);
if (err) {
bucket_table_free(new_tbl);
if (err == -EEXIST)
err = 0;
} else
schedule_work(&ht->run_work);

return err;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);

int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj,
struct bucket_table *tbl)
{
struct rhash_head *head;
unsigned hash;
int err = -EEXIST;
int err;

tbl = rhashtable_last_table(ht, tbl);
hash = head_hashfn(ht, tbl, obj);
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);

err = -EEXIST;
if (key && rhashtable_lookup_fast(ht, key, ht->p))
goto exit;

err = -EAGAIN;
if (rhashtable_check_elasticity(ht, tbl, hash) ||
rht_grow_above_100(ht, tbl))
goto exit;

err = 0;

head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
Expand Down Expand Up @@ -678,6 +733,9 @@ int rhashtable_init(struct rhashtable *ht,

ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);

if (!params->insecure_elasticity)
ht->elasticity = 16;

if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
else
Expand Down

0 comments on commit ccd57b1

Please sign in to comment.