mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
c846f732b9
This patch moves the xarray lookup functionality for the lkb out of the ls_lkbxa_lock read lock handling. We can do that as the xarray should be possible to access lockless in case of reader like xa_load(). We confirm under ls_lkbxa_lock that the lkb is still part of the data structure and take a reference when its still part of ls_lkbxa to avoid being freed after doing the lookup. To do a check if the lkb is still part of the ls_lkbxa data structure we use a kref_read() as the last put will remove it from the ls_lkbxa data structure and any reference taken means it is still part of ls_lkbxa. A similar approach was done with the DLM rsb rhashtable just with a flag instead of the refcounter because the refcounter has a slightly different meaning. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
178 lines
3.8 KiB
C
178 lines
3.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/******************************************************************************
|
|
*******************************************************************************
|
|
**
|
|
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
|
|
**
|
|
**
|
|
*******************************************************************************
|
|
******************************************************************************/
|
|
|
|
#include "dlm_internal.h"
|
|
#include "midcomms.h"
|
|
#include "lowcomms.h"
|
|
#include "config.h"
|
|
#include "memory.h"
|
|
#include "ast.h"
|
|
|
|
static struct kmem_cache *writequeue_cache;
|
|
static struct kmem_cache *mhandle_cache;
|
|
static struct kmem_cache *msg_cache;
|
|
static struct kmem_cache *lkb_cache;
|
|
static struct kmem_cache *rsb_cache;
|
|
static struct kmem_cache *cb_cache;
|
|
|
|
|
|
int __init dlm_memory_init(void)
|
|
{
|
|
writequeue_cache = dlm_lowcomms_writequeue_cache_create();
|
|
if (!writequeue_cache)
|
|
goto out;
|
|
|
|
mhandle_cache = dlm_midcomms_cache_create();
|
|
if (!mhandle_cache)
|
|
goto mhandle;
|
|
|
|
lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
|
|
__alignof__(struct dlm_lkb), 0, NULL);
|
|
if (!lkb_cache)
|
|
goto lkb;
|
|
|
|
msg_cache = dlm_lowcomms_msg_cache_create();
|
|
if (!msg_cache)
|
|
goto msg;
|
|
|
|
rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb),
|
|
__alignof__(struct dlm_rsb), 0, NULL);
|
|
if (!rsb_cache)
|
|
goto rsb;
|
|
|
|
cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback),
|
|
__alignof__(struct dlm_callback), 0,
|
|
NULL);
|
|
if (!cb_cache)
|
|
goto cb;
|
|
|
|
return 0;
|
|
|
|
cb:
|
|
kmem_cache_destroy(rsb_cache);
|
|
rsb:
|
|
kmem_cache_destroy(msg_cache);
|
|
msg:
|
|
kmem_cache_destroy(lkb_cache);
|
|
lkb:
|
|
kmem_cache_destroy(mhandle_cache);
|
|
mhandle:
|
|
kmem_cache_destroy(writequeue_cache);
|
|
out:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
void dlm_memory_exit(void)
|
|
{
|
|
rcu_barrier();
|
|
|
|
kmem_cache_destroy(writequeue_cache);
|
|
kmem_cache_destroy(mhandle_cache);
|
|
kmem_cache_destroy(msg_cache);
|
|
kmem_cache_destroy(lkb_cache);
|
|
kmem_cache_destroy(rsb_cache);
|
|
kmem_cache_destroy(cb_cache);
|
|
}
|
|
|
|
char *dlm_allocate_lvb(struct dlm_ls *ls)
|
|
{
|
|
return kzalloc(ls->ls_lvblen, GFP_ATOMIC);
|
|
}
|
|
|
|
void dlm_free_lvb(char *p)
|
|
{
|
|
kfree(p);
|
|
}
|
|
|
|
struct dlm_rsb *dlm_allocate_rsb(void)
|
|
{
|
|
return kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
static void __free_rsb_rcu(struct rcu_head *rcu)
|
|
{
|
|
struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu);
|
|
if (r->res_lvbptr)
|
|
dlm_free_lvb(r->res_lvbptr);
|
|
kmem_cache_free(rsb_cache, r);
|
|
}
|
|
|
|
void dlm_free_rsb(struct dlm_rsb *r)
|
|
{
|
|
call_rcu(&r->rcu, __free_rsb_rcu);
|
|
}
|
|
|
|
struct dlm_lkb *dlm_allocate_lkb(void)
|
|
{
|
|
return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
static void __free_lkb_rcu(struct rcu_head *rcu)
|
|
{
|
|
struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu);
|
|
|
|
if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
|
|
struct dlm_user_args *ua;
|
|
ua = lkb->lkb_ua;
|
|
if (ua) {
|
|
kfree(ua->lksb.sb_lvbptr);
|
|
kfree(ua);
|
|
}
|
|
}
|
|
|
|
kmem_cache_free(lkb_cache, lkb);
|
|
}
|
|
|
|
void dlm_free_lkb(struct dlm_lkb *lkb)
|
|
{
|
|
call_rcu(&lkb->rcu, __free_lkb_rcu);
|
|
}
|
|
|
|
struct dlm_mhandle *dlm_allocate_mhandle(void)
|
|
{
|
|
return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
void dlm_free_mhandle(struct dlm_mhandle *mhandle)
|
|
{
|
|
kmem_cache_free(mhandle_cache, mhandle);
|
|
}
|
|
|
|
struct writequeue_entry *dlm_allocate_writequeue(void)
|
|
{
|
|
return kmem_cache_alloc(writequeue_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
void dlm_free_writequeue(struct writequeue_entry *writequeue)
|
|
{
|
|
kmem_cache_free(writequeue_cache, writequeue);
|
|
}
|
|
|
|
struct dlm_msg *dlm_allocate_msg(void)
|
|
{
|
|
return kmem_cache_alloc(msg_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
void dlm_free_msg(struct dlm_msg *msg)
|
|
{
|
|
kmem_cache_free(msg_cache, msg);
|
|
}
|
|
|
|
struct dlm_callback *dlm_allocate_cb(void)
|
|
{
|
|
return kmem_cache_alloc(cb_cache, GFP_ATOMIC);
|
|
}
|
|
|
|
void dlm_free_cb(struct dlm_callback *cb)
|
|
{
|
|
kmem_cache_free(cb_cache, cb);
|
|
}
|