mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
c302378bc1
An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
241 lines
4.9 KiB
C
241 lines
4.9 KiB
C
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
|
|
/*
|
|
* Generic non-thread safe hash map implementation.
|
|
*
|
|
* Copyright (c) 2019 Facebook
|
|
*/
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <errno.h>
|
|
#include <linux/err.h>
|
|
#include "hashmap.h"
|
|
|
|
/* make sure libbpf doesn't use kernel-only integer typedefs */
|
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
|
|
|
/* prevent accidental re-addition of reallocarray() */
|
|
#pragma GCC poison reallocarray
|
|
|
|
/* start with 4 buckets */
|
|
#define HASHMAP_MIN_CAP_BITS 2
|
|
|
|
static void hashmap_add_entry(struct hashmap_entry **pprev,
|
|
struct hashmap_entry *entry)
|
|
{
|
|
entry->next = *pprev;
|
|
*pprev = entry;
|
|
}
|
|
|
|
static void hashmap_del_entry(struct hashmap_entry **pprev,
|
|
struct hashmap_entry *entry)
|
|
{
|
|
*pprev = entry->next;
|
|
entry->next = NULL;
|
|
}
|
|
|
|
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
|
|
hashmap_equal_fn equal_fn, void *ctx)
|
|
{
|
|
map->hash_fn = hash_fn;
|
|
map->equal_fn = equal_fn;
|
|
map->ctx = ctx;
|
|
|
|
map->buckets = NULL;
|
|
map->cap = 0;
|
|
map->cap_bits = 0;
|
|
map->sz = 0;
|
|
}
|
|
|
|
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
|
|
hashmap_equal_fn equal_fn,
|
|
void *ctx)
|
|
{
|
|
struct hashmap *map = malloc(sizeof(struct hashmap));
|
|
|
|
if (!map)
|
|
return ERR_PTR(-ENOMEM);
|
|
hashmap__init(map, hash_fn, equal_fn, ctx);
|
|
return map;
|
|
}
|
|
|
|
void hashmap__clear(struct hashmap *map)
|
|
{
|
|
struct hashmap_entry *cur, *tmp;
|
|
size_t bkt;
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
free(cur);
|
|
}
|
|
free(map->buckets);
|
|
map->buckets = NULL;
|
|
map->cap = map->cap_bits = map->sz = 0;
|
|
}
|
|
|
|
void hashmap__free(struct hashmap *map)
|
|
{
|
|
if (IS_ERR_OR_NULL(map))
|
|
return;
|
|
|
|
hashmap__clear(map);
|
|
free(map);
|
|
}
|
|
|
|
size_t hashmap__size(const struct hashmap *map)
|
|
{
|
|
return map->sz;
|
|
}
|
|
|
|
size_t hashmap__capacity(const struct hashmap *map)
|
|
{
|
|
return map->cap;
|
|
}
|
|
|
|
static bool hashmap_needs_to_grow(struct hashmap *map)
|
|
{
|
|
/* grow if empty or more than 75% filled */
|
|
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
|
|
}
|
|
|
|
static int hashmap_grow(struct hashmap *map)
|
|
{
|
|
struct hashmap_entry **new_buckets;
|
|
struct hashmap_entry *cur, *tmp;
|
|
size_t new_cap_bits, new_cap;
|
|
size_t h, bkt;
|
|
|
|
new_cap_bits = map->cap_bits + 1;
|
|
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
|
|
new_cap_bits = HASHMAP_MIN_CAP_BITS;
|
|
|
|
new_cap = 1UL << new_cap_bits;
|
|
new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
|
|
if (!new_buckets)
|
|
return -ENOMEM;
|
|
|
|
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
|
|
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
|
|
hashmap_add_entry(&new_buckets[h], cur);
|
|
}
|
|
|
|
map->cap = new_cap;
|
|
map->cap_bits = new_cap_bits;
|
|
free(map->buckets);
|
|
map->buckets = new_buckets;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool hashmap_find_entry(const struct hashmap *map,
|
|
const long key, size_t hash,
|
|
struct hashmap_entry ***pprev,
|
|
struct hashmap_entry **entry)
|
|
{
|
|
struct hashmap_entry *cur, **prev_ptr;
|
|
|
|
if (!map->buckets)
|
|
return false;
|
|
|
|
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
|
|
cur;
|
|
prev_ptr = &cur->next, cur = cur->next) {
|
|
if (map->equal_fn(cur->key, key, map->ctx)) {
|
|
if (pprev)
|
|
*pprev = prev_ptr;
|
|
*entry = cur;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
int hashmap_insert(struct hashmap *map, long key, long value,
|
|
enum hashmap_insert_strategy strategy,
|
|
long *old_key, long *old_value)
|
|
{
|
|
struct hashmap_entry *entry;
|
|
size_t h;
|
|
int err;
|
|
|
|
if (old_key)
|
|
*old_key = 0;
|
|
if (old_value)
|
|
*old_value = 0;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (strategy != HASHMAP_APPEND &&
|
|
hashmap_find_entry(map, key, h, NULL, &entry)) {
|
|
if (old_key)
|
|
*old_key = entry->key;
|
|
if (old_value)
|
|
*old_value = entry->value;
|
|
|
|
if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
|
|
entry->key = key;
|
|
entry->value = value;
|
|
return 0;
|
|
} else if (strategy == HASHMAP_ADD) {
|
|
return -EEXIST;
|
|
}
|
|
}
|
|
|
|
if (strategy == HASHMAP_UPDATE)
|
|
return -ENOENT;
|
|
|
|
if (hashmap_needs_to_grow(map)) {
|
|
err = hashmap_grow(map);
|
|
if (err)
|
|
return err;
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
}
|
|
|
|
entry = malloc(sizeof(struct hashmap_entry));
|
|
if (!entry)
|
|
return -ENOMEM;
|
|
|
|
entry->key = key;
|
|
entry->value = value;
|
|
hashmap_add_entry(&map->buckets[h], entry);
|
|
map->sz++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
bool hashmap_find(const struct hashmap *map, long key, long *value)
|
|
{
|
|
struct hashmap_entry *entry;
|
|
size_t h;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (!hashmap_find_entry(map, key, h, NULL, &entry))
|
|
return false;
|
|
|
|
if (value)
|
|
*value = entry->value;
|
|
return true;
|
|
}
|
|
|
|
bool hashmap_delete(struct hashmap *map, long key,
|
|
long *old_key, long *old_value)
|
|
{
|
|
struct hashmap_entry **pprev, *entry;
|
|
size_t h;
|
|
|
|
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
|
|
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
|
|
return false;
|
|
|
|
if (old_key)
|
|
*old_key = entry->key;
|
|
if (old_value)
|
|
*old_value = entry->value;
|
|
|
|
hashmap_del_entry(pprev, entry);
|
|
free(entry);
|
|
map->sz--;
|
|
|
|
return true;
|
|
}
|