rhashtable: allow rht_bucket_var to return NULL.

Rather than returning a pointer to a static nulls, rht_bucket_var()
now returns NULL if the bucket doesn't exist.
This will make the next patch, which stores a bitlock in the
bucket pointer, somewhat cleaner.

This change involves introducing __rht_bucket_nested() which is
like rht_bucket_nested(), but doesn't provide the static nulls,
and changing rht_bucket_nested() to call this and possible
provide a static nulls - as is still needed for the non-var case.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
NeilBrown 2019-04-02 10:07:45 +11:00 committed by David S. Miller
parent 7a41c294c1
commit ff302db965
2 changed files with 29 additions and 11 deletions

View File

@ -265,6 +265,8 @@ void rhashtable_destroy(struct rhashtable *ht);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash); unsigned int hash);
struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl, struct bucket_table *tbl,
unsigned int hash); unsigned int hash);
@ -294,7 +296,7 @@ static inline struct rhash_head __rcu *const *rht_bucket(
static inline struct rhash_head __rcu **rht_bucket_var( static inline struct rhash_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash) struct bucket_table *tbl, unsigned int hash)
{ {
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash]; &tbl->buckets[hash];
} }
@ -890,6 +892,8 @@ static inline int __rhashtable_remove_fast_one(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
if (!pprev)
goto out;
rht_for_each_from(he, *pprev, tbl, hash) { rht_for_each_from(he, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
@ -934,6 +938,7 @@ static inline int __rhashtable_remove_fast_one(
break; break;
} }
out:
spin_unlock_bh(lock); spin_unlock_bh(lock);
if (err > 0) { if (err > 0) {
@ -1042,6 +1047,8 @@ static inline int __rhashtable_replace_fast(
spin_lock_bh(lock); spin_lock_bh(lock);
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
if (!pprev)
goto out;
rht_for_each_from(he, *pprev, tbl, hash) { rht_for_each_from(he, *pprev, tbl, hash) {
if (he != obj_old) { if (he != obj_old) {
pprev = &he->next; pprev = &he->next;
@ -1053,7 +1060,7 @@ static inline int __rhashtable_replace_fast(
err = 0; err = 0;
break; break;
} }
out:
spin_unlock_bh(lock); spin_unlock_bh(lock);
return err; return err;

View File

@ -237,8 +237,10 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
goto out; goto out;
err = -ENOENT; err = -ENOENT;
if (!pprev)
goto out;
rht_for_each(entry, old_tbl, old_hash) { rht_for_each_from(entry, *pprev, old_tbl, old_hash) {
err = 0; err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash); next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
@ -496,6 +498,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
elasticity = RHT_ELASTICITY; elasticity = RHT_ELASTICITY;
pprev = rht_bucket_var(tbl, hash); pprev = rht_bucket_var(tbl, hash);
if (!pprev)
return ERR_PTR(-ENOENT);
rht_for_each_from(head, *pprev, tbl, hash) { rht_for_each_from(head, *pprev, tbl, hash) {
struct rhlist_head *list; struct rhlist_head *list;
struct rhlist_head *plist; struct rhlist_head *plist;
@ -1161,11 +1165,10 @@ void rhashtable_destroy(struct rhashtable *ht)
} }
EXPORT_SYMBOL_GPL(rhashtable_destroy); EXPORT_SYMBOL_GPL(rhashtable_destroy);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash) unsigned int hash)
{ {
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
static struct rhash_head __rcu *rhnull;
unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest; unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash; unsigned int subhash = hash;
@ -1183,15 +1186,23 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
subhash >>= shift; subhash >>= shift;
} }
if (!ntbl) { if (!ntbl)
if (!rhnull) return NULL;
INIT_RHT_NULLS_HEAD(rhnull);
return &rhnull;
}
return &ntbl[subhash].bucket; return &ntbl[subhash].bucket;
} }
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
static struct rhash_head __rcu *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
return __rht_bucket_nested(tbl, hash) ?: &rhnull;
}
EXPORT_SYMBOL_GPL(rht_bucket_nested); EXPORT_SYMBOL_GPL(rht_bucket_nested);
struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,