gfs2: Faster gfs2_upgrade_iopen_glock wakeups

Move function needs_demote() to glock.h and rename it to
glock_needs_demote().  In handle_callback(), wake up the glock when
setting the GLF_PENDING_DEMOTE flag as well.  (Setting the GLF_DEMOTE
flag already triggered a wake-up.)

With that, check for glock_needs_demote() in gfs2_upgrade_iopen_glock()
to wake up when either of those flags is set for the inode glock: the
faster we can react to contention, the better.

The GLF_PENDING_DEMOTE flag is only used for inode glocks (see
gfs2_glock_cb()) so it's okay to only check for the GLF_DEMOTE flag in
gfs2_drop_inode().  Still, using glock_needs_demote() there as well
makes the code a little easier to read.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2024-09-09 23:38:59 +02:00
parent f9417fcfca
commit ee51baa817
3 changed files with 15 additions and 18 deletions

View File

@ -563,11 +563,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
static void gfs2_set_demote(struct gfs2_glock *gl)
static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
set_bit(GLF_DEMOTE, &gl->gl_flags);
set_bit(nr, &gl->gl_flags);
smp_mb();
wake_up(&sdp->sd_async_glock_wait);
}
@ -1101,7 +1101,7 @@ static void glock_work_func(struct work_struct *work)
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
gfs2_set_demote(gl);
gfs2_set_demote(GLF_DEMOTE, gl);
}
}
run_queue(gl, 0);
@ -1443,10 +1443,7 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
static void request_demote(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote)
{
if (delay)
set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
else
gfs2_set_demote(gl);
gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
@ -1636,12 +1633,6 @@ int gfs2_glock_poll(struct gfs2_holder *gh)
return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
}
static inline bool needs_demote(struct gfs2_glock *gl)
{
return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
}
static void __gfs2_glock_dq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
@ -1650,8 +1641,8 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
/*
* This holder should not be cached, so mark it for demote.
* Note: this should be done before the check for needs_demote
* below.
* Note: this should be done before the glock_needs_demote
* check below.
*/
if (gh->gh_flags & GL_NOCACHE)
request_demote(gl, LM_ST_UNLOCKED, 0, false);
@ -1664,7 +1655,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
* If there hasn't been a demote request we are done.
* (Let the remaining holders, if any, keep holding it.)
*/
if (!needs_demote(gl)) {
if (!glock_needs_demote(gl)) {
if (list_empty(&gl->gl_holders))
fast_path = 1;
}

View File

@ -285,4 +285,10 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
static inline bool glock_needs_demote(struct gfs2_glock *gl)
{
return (test_bit(GLF_DEMOTE, &gl->gl_flags) ||
test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags));
}
#endif /* __GLOCK_DOT_H__ */

View File

@ -1030,7 +1030,7 @@ static int gfs2_drop_inode(struct inode *inode)
if (inode->i_nlink &&
gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
if (glock_needs_demote(gl))
clear_nlink(inode);
}
@ -1294,7 +1294,7 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
!test_bit(HIF_WAIT, &gh->gh_iflags) ||
test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
glock_needs_demote(ip->i_gl),
5 * HZ);
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
gfs2_glock_dq(gh);