mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 12:12:05 +00:00
wireguard: peer: allocate in kmem_cache
With deployments having upwards of 600k peers now, this somewhat heavy
structure could benefit from more fine-grained allocations.
Specifically, instead of using a 2048-byte slab for a 1544-byte object,
we can now use 1544-byte objects directly, thus saving almost 25%
per-peer, or with 600k peers, that's a savings of 303 MiB. This also
makes wireguard's memory usage more transparent in tools like slabtop
and /proc/slabinfo.
Fixes: 8b5553ace8
("wireguard: queueing: get rid of per-peer ring buffers")
Suggested-by: Arnd Bergmann <arnd@arndb.de>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Cc: stable@vger.kernel.org
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
24b70eeeb4
commit
a4e9f8e328
@ -28,6 +28,10 @@ static int __init mod_init(void)
|
||||
#endif
|
||||
wg_noise_init();
|
||||
|
||||
ret = wg_peer_init();
|
||||
if (ret < 0)
|
||||
goto err_peer;
|
||||
|
||||
ret = wg_device_init();
|
||||
if (ret < 0)
|
||||
goto err_device;
|
||||
@ -44,6 +48,8 @@ static int __init mod_init(void)
|
||||
err_netlink:
|
||||
wg_device_uninit();
|
||||
err_device:
|
||||
wg_peer_uninit();
|
||||
err_peer:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -51,6 +57,7 @@ static void __exit mod_exit(void)
|
||||
{
|
||||
wg_genetlink_uninit();
|
||||
wg_device_uninit();
|
||||
wg_peer_uninit();
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
static struct kmem_cache *peer_cache;
|
||||
static atomic64_t peer_counter = ATOMIC64_INIT(0);
|
||||
|
||||
struct wg_peer *wg_peer_create(struct wg_device *wg,
|
||||
@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
|
||||
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
peer = kzalloc(sizeof(*peer), GFP_KERNEL);
|
||||
peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
|
||||
if (unlikely(!peer))
|
||||
return ERR_PTR(ret);
|
||||
if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
|
||||
if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
|
||||
goto err;
|
||||
|
||||
peer->device = wg;
|
||||
@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
|
||||
return peer;
|
||||
|
||||
err:
|
||||
kfree(peer);
|
||||
kmem_cache_free(peer_cache, peer);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
|
||||
/* The final zeroing takes care of clearing any remaining handshake key
|
||||
* material and other potentially sensitive information.
|
||||
*/
|
||||
kfree_sensitive(peer);
|
||||
memzero_explicit(peer, sizeof(*peer));
|
||||
kmem_cache_free(peer_cache, peer);
|
||||
}
|
||||
|
||||
static void kref_release(struct kref *refcount)
|
||||
@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
|
||||
return;
|
||||
kref_put(&peer->refcount, kref_release);
|
||||
}
|
||||
|
||||
int __init wg_peer_init(void)
|
||||
{
|
||||
peer_cache = KMEM_CACHE(wg_peer, 0);
|
||||
return peer_cache ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void wg_peer_uninit(void)
|
||||
{
|
||||
kmem_cache_destroy(peer_cache);
|
||||
}
|
||||
|
@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
|
||||
void wg_peer_remove(struct wg_peer *peer);
|
||||
void wg_peer_remove_all(struct wg_device *wg);
|
||||
|
||||
int wg_peer_init(void);
|
||||
void wg_peer_uninit(void);
|
||||
|
||||
#endif /* _WG_PEER_H */
|
||||
|
Loading…
Reference in New Issue
Block a user