mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
gfs2 fixes
- Fix a glock state (non-)transition bug when a dlm request times out and is canceled, and we have locking requests that can now be granted immediately. - Various fixes and cleanups in how the logd and quotad daemons are woken up and terminated. - Fix several bugs in the quota data reference counting and shrinking. Free quota data objects synchronously in put_super() instead of letting call_rcu() run wild. - Make sure not to deallocate quota data during a withdraw; rather, defer quota data deallocation to put_super(). Withdraws can happen in contexts in which callers on the stack are holding quota data references. - Many minor quota fixes and cleanups by Bob. - Update the the mailing list address for gfs2 and dlm. (It's the same list for both and we are moving it to gfs2@lists.linux.dev.) - Various other minor cleanups. -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmT3T7UUHGFncnVlbmJh QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTqxhw/+IWp+4cY4htNkTRG7xkheTeQ+5whG NU40mp7Hj+WY5GoHqsk676q1pBkVAq5mNN1kt9S/oC6lLHrdu1HLpdIkgFow2nAC nDqlEqx9/Da9Q4H/+K442usO90S4o1MmOXOE9xcGcvJLqK4FLDOVDXbUWa43OXrK 4HxgjgGSNPF4itD+U0o/V4f19jQ+4cNwmo6hGLyhsYillaUHiIXJezQlH7XycByM qGJqlG6odJ56wE38NG8Bt9Lj+91PsLLqO1TJxSYyzpf0h9QGQ2ySvu6/esTXwxWO XRuT4db7yjyAUhJoJMw+YU77xWQTz0/jriIDS7VqzvR1ns3GPaWdtb31TdUTBG4H IvBA8ep3oxHtcYFoPzCLBXgOIDej6KjAgS3RSv51yLeaZRHFUBc21fTSXbcDTIUs gkusZlRNQ9ANdBCVyf8hZxbE54HnaBJ8dKMZtynOXJEHs0EtGV8YKCNIpkFLxOvE vZkKcRsmVtuZ9fVhX1iH7dYmcsCMPI8RNo47k7hHk2EG8dU+eqyPSbi4QCmErNFf DlqX+fIuiDtOkbmWcrb2qdphn6j6bMLhDaOMJGIBOmgOPi+AU9dNAfmtu1cG4u1b 2TFyUISayiwqHJQgguzvDed15fxexYdgoLB7O9t9TMbCENxisguNa5TsAN6ZkiLQ 0hY6h80xSR2kCPU= =EonA -----END PGP SIGNATURE----- Merge tag 'gfs2-v6.5-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Andreas Gruenbacher: - Fix a glock state (non-)transition bug when a dlm request times out and is canceled, and we have locking requests that can now be granted immediately - Various fixes and cleanups in how the logd and quotad daemons are woken up and terminated - Fix several bugs in the quota data reference counting and shrinking. Free quota data objects synchronously in put_super() instead of letting call_rcu() run wild - Make sure not to deallocate quota data during a withdraw; rather, defer quota data deallocation to put_super(). Withdraws can happen in contexts in which callers on the stack are holding quota data references - Many minor quota fixes and cleanups by Bob - Update the the mailing list address for gfs2 and dlm. (It's the same list for both and we are moving it to gfs2@lists.linux.dev) - Various other minor cleanups * tag 'gfs2-v6.5-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (51 commits) MAINTAINERS: Update dlm mailing list MAINTAINERS: Update gfs2 mailing list gfs2: change qd_slot_count to qd_slot_ref gfs2: check for no eligible quota changes gfs2: Remove useless assignment gfs2: simplify slot_get gfs2: Simplify qd2offset gfs2: introduce qd_bh_get_or_undo gfs2: Remove quota allocation info from quota file gfs2: use constant for array size gfs2: Set qd_sync_gen in do_sync gfs2: Remove useless err set gfs2: Small gfs2_quota_lock cleanup gfs2: move qdsb_put and reduce redundancy gfs2: improvements to sysfs status gfs2: Don't try to sync non-changes gfs2: Simplify function need_sync gfs2: remove unneeded pg_oflow variable gfs2: remove unneeded variable done gfs2: pass sdp to gfs2_write_buf_to_page ...
This commit is contained in:
commit
65d6e954e3
@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
|
|||||||
just the holders) associated with the glock. If there are any
|
just the holders) associated with the glock. If there are any
|
||||||
held locks, then they will be contiguous entries at the head
|
held locks, then they will be contiguous entries at the head
|
||||||
of the list. Locks are granted in strictly the order that they
|
of the list. Locks are granted in strictly the order that they
|
||||||
are queued, except for those marked LM_FLAG_PRIORITY which are
|
are queued.
|
||||||
used only during recovery, and even then only for journal locks.
|
|
||||||
|
|
||||||
There are three lock states that users of the glock layer can request,
|
There are three lock states that users of the glock layer can request,
|
||||||
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
|
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
|
||||||
|
@ -6118,7 +6118,7 @@ F: include/video/udlfb.h
|
|||||||
DISTRIBUTED LOCK MANAGER (DLM)
|
DISTRIBUTED LOCK MANAGER (DLM)
|
||||||
M: Christine Caulfield <ccaulfie@redhat.com>
|
M: Christine Caulfield <ccaulfie@redhat.com>
|
||||||
M: David Teigland <teigland@redhat.com>
|
M: David Teigland <teigland@redhat.com>
|
||||||
L: cluster-devel@redhat.com
|
L: gfs2@lists.linux.dev
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://sources.redhat.com/cluster/
|
W: http://sources.redhat.com/cluster/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git
|
||||||
@ -8774,7 +8774,7 @@ F: scripts/get_maintainer.pl
|
|||||||
GFS2 FILE SYSTEM
|
GFS2 FILE SYSTEM
|
||||||
M: Bob Peterson <rpeterso@redhat.com>
|
M: Bob Peterson <rpeterso@redhat.com>
|
||||||
M: Andreas Gruenbacher <agruenba@redhat.com>
|
M: Andreas Gruenbacher <agruenba@redhat.com>
|
||||||
L: cluster-devel@redhat.com
|
L: gfs2@lists.linux.dev
|
||||||
S: Supported
|
S: Supported
|
||||||
B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
|
B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
|
||||||
|
@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Even if we didn't write any pages here, we might still be holding
|
* Even if we didn't write enough pages here, we might still be holding
|
||||||
* dirty pages in the ail. We forcibly flush the ail because we don't
|
* dirty pages in the ail. We forcibly flush the ail because we don't
|
||||||
* want balance_dirty_pages() to loop indefinitely trying to write out
|
* want balance_dirty_pages() to loop indefinitely trying to write out
|
||||||
* pages held in the ail that it can't find.
|
* pages held in the ail that it can't find.
|
||||||
*/
|
*/
|
||||||
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
|
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
|
||||||
if (ret == 0)
|
if (ret == 0 && wbc->nr_to_write > 0)
|
||||||
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
|
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -272,8 +272,7 @@ static int gfs2_write_jdata_batch(struct address_space *mapping,
|
|||||||
* not be suitable for data integrity
|
* not be suitable for data integrity
|
||||||
* writeout).
|
* writeout).
|
||||||
*/
|
*/
|
||||||
*done_index = folio->index +
|
*done_index = folio_next_index(folio);
|
||||||
folio_nr_pages(folio);
|
|
||||||
ret = 1;
|
ret = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -161,7 +161,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip)
|
|||||||
int error;
|
int error;
|
||||||
|
|
||||||
down_write(&ip->i_rw_mutex);
|
down_write(&ip->i_rw_mutex);
|
||||||
page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
|
page = grab_cache_page(inode->i_mapping, 0);
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (!page)
|
if (!page)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -176,7 +176,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
|
|||||||
wake_up_glock(gl);
|
wake_up_glock(gl);
|
||||||
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
|
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
|
||||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||||
wake_up(&sdp->sd_glock_wait);
|
wake_up(&sdp->sd_kill_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -468,10 +468,10 @@ int gfs2_instantiate(struct gfs2_holder *gh)
|
|||||||
* do_promote - promote as many requests as possible on the current queue
|
* do_promote - promote as many requests as possible on the current queue
|
||||||
* @gl: The glock
|
* @gl: The glock
|
||||||
*
|
*
|
||||||
* Returns: 1 if there is a blocked holder at the head of the list
|
* Returns true on success (i.e., progress was made or there are no waiters).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int do_promote(struct gfs2_glock *gl)
|
static bool do_promote(struct gfs2_glock *gl)
|
||||||
{
|
{
|
||||||
struct gfs2_holder *gh, *current_gh;
|
struct gfs2_holder *gh, *current_gh;
|
||||||
|
|
||||||
@ -484,10 +484,10 @@ static int do_promote(struct gfs2_glock *gl)
|
|||||||
* If we get here, it means we may not grant this
|
* If we get here, it means we may not grant this
|
||||||
* holder for some reason. If this holder is at the
|
* holder for some reason. If this holder is at the
|
||||||
* head of the list, it means we have a blocked holder
|
* head of the list, it means we have a blocked holder
|
||||||
* at the head, so return 1.
|
* at the head, so return false.
|
||||||
*/
|
*/
|
||||||
if (list_is_first(&gh->gh_list, &gl->gl_holders))
|
if (list_is_first(&gh->gh_list, &gl->gl_holders))
|
||||||
return 1;
|
return false;
|
||||||
do_error(gl, 0);
|
do_error(gl, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -497,7 +497,7 @@ static int do_promote(struct gfs2_glock *gl)
|
|||||||
if (!current_gh)
|
if (!current_gh)
|
||||||
current_gh = gh;
|
current_gh = gh;
|
||||||
}
|
}
|
||||||
return 0;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -591,10 +591,11 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
|||||||
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
|
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
|
||||||
/* move to back of queue and try next entry */
|
/* move to back of queue and try next entry */
|
||||||
if (ret & LM_OUT_CANCELED) {
|
if (ret & LM_OUT_CANCELED) {
|
||||||
if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
|
list_move_tail(&gh->gh_list, &gl->gl_holders);
|
||||||
list_move_tail(&gh->gh_list, &gl->gl_holders);
|
|
||||||
gh = find_first_waiter(gl);
|
gh = find_first_waiter(gl);
|
||||||
gl->gl_target = gh->gh_state;
|
gl->gl_target = gh->gh_state;
|
||||||
|
if (do_promote(gl))
|
||||||
|
goto out;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
/* Some error or failed "try lock" - report it */
|
/* Some error or failed "try lock" - report it */
|
||||||
@ -679,8 +680,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||||||
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
|
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
|
||||||
goto skip_inval;
|
goto skip_inval;
|
||||||
|
|
||||||
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
|
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
|
||||||
LM_FLAG_PRIORITY);
|
|
||||||
GLOCK_BUG_ON(gl, gl->gl_state == target);
|
GLOCK_BUG_ON(gl, gl->gl_state == target);
|
||||||
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
||||||
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
|
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
|
||||||
@ -834,7 +834,7 @@ __acquires(&gl->gl_lockref.lock)
|
|||||||
} else {
|
} else {
|
||||||
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
|
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
|
||||||
gfs2_demote_wake(gl);
|
gfs2_demote_wake(gl);
|
||||||
if (do_promote(gl) == 0)
|
if (do_promote(gl))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
gh = find_first_waiter(gl);
|
gh = find_first_waiter(gl);
|
||||||
gl->gl_target = gh->gh_state;
|
gl->gl_target = gh->gh_state;
|
||||||
@ -1022,7 +1022,7 @@ static void delete_work_func(struct work_struct *work)
|
|||||||
* step entirely.
|
* step entirely.
|
||||||
*/
|
*/
|
||||||
if (gfs2_try_evict(gl)) {
|
if (gfs2_try_evict(gl)) {
|
||||||
if (test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
|
if (test_bit(SDF_KILL, &sdp->sd_flags))
|
||||||
goto out;
|
goto out;
|
||||||
if (gfs2_queue_verify_evict(gl))
|
if (gfs2_queue_verify_evict(gl))
|
||||||
return;
|
return;
|
||||||
@ -1035,7 +1035,7 @@ static void delete_work_func(struct work_struct *work)
|
|||||||
GFS2_BLKST_UNLINKED);
|
GFS2_BLKST_UNLINKED);
|
||||||
if (IS_ERR(inode)) {
|
if (IS_ERR(inode)) {
|
||||||
if (PTR_ERR(inode) == -EAGAIN &&
|
if (PTR_ERR(inode) == -EAGAIN &&
|
||||||
!test_bit(SDF_DEACTIVATING, &sdp->sd_flags) &&
|
!test_bit(SDF_KILL, &sdp->sd_flags) &&
|
||||||
gfs2_queue_verify_evict(gl))
|
gfs2_queue_verify_evict(gl))
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
@ -1231,7 +1231,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
|||||||
out_free:
|
out_free:
|
||||||
gfs2_glock_dealloc(&gl->gl_rcu);
|
gfs2_glock_dealloc(&gl->gl_rcu);
|
||||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||||
wake_up(&sdp->sd_glock_wait);
|
wake_up(&sdp->sd_kill_wait);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
@ -1515,27 +1515,20 @@ __acquires(&gl->gl_lockref.lock)
|
|||||||
}
|
}
|
||||||
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
|
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
|
||||||
continue;
|
continue;
|
||||||
if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
|
|
||||||
insert_pt = &gh2->gh_list;
|
|
||||||
}
|
}
|
||||||
trace_gfs2_glock_queue(gh, 1);
|
trace_gfs2_glock_queue(gh, 1);
|
||||||
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
|
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
|
||||||
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
|
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
|
||||||
if (likely(insert_pt == NULL)) {
|
if (likely(insert_pt == NULL)) {
|
||||||
list_add_tail(&gh->gh_list, &gl->gl_holders);
|
list_add_tail(&gh->gh_list, &gl->gl_holders);
|
||||||
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
|
|
||||||
goto do_cancel;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
list_add_tail(&gh->gh_list, insert_pt);
|
list_add_tail(&gh->gh_list, insert_pt);
|
||||||
do_cancel:
|
|
||||||
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
|
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
|
||||||
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
|
spin_unlock(&gl->gl_lockref.lock);
|
||||||
spin_unlock(&gl->gl_lockref.lock);
|
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
|
||||||
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
|
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
|
||||||
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
|
spin_lock(&gl->gl_lockref.lock);
|
||||||
spin_lock(&gl->gl_lockref.lock);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trap_recursive:
|
trap_recursive:
|
||||||
@ -2195,7 +2188,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
|
|||||||
flush_workqueue(glock_workqueue);
|
flush_workqueue(glock_workqueue);
|
||||||
glock_hash_walk(clear_glock, sdp);
|
glock_hash_walk(clear_glock, sdp);
|
||||||
flush_workqueue(glock_workqueue);
|
flush_workqueue(glock_workqueue);
|
||||||
wait_event_timeout(sdp->sd_glock_wait,
|
wait_event_timeout(sdp->sd_kill_wait,
|
||||||
atomic_read(&sdp->sd_glock_disposal) == 0,
|
atomic_read(&sdp->sd_glock_disposal) == 0,
|
||||||
HZ * 600);
|
HZ * 600);
|
||||||
glock_hash_walk(dump_glock_func, sdp);
|
glock_hash_walk(dump_glock_func, sdp);
|
||||||
@ -2227,8 +2220,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
|
|||||||
*p++ = 'e';
|
*p++ = 'e';
|
||||||
if (flags & LM_FLAG_ANY)
|
if (flags & LM_FLAG_ANY)
|
||||||
*p++ = 'A';
|
*p++ = 'A';
|
||||||
if (flags & LM_FLAG_PRIORITY)
|
|
||||||
*p++ = 'p';
|
|
||||||
if (flags & LM_FLAG_NODE_SCOPE)
|
if (flags & LM_FLAG_NODE_SCOPE)
|
||||||
*p++ = 'n';
|
*p++ = 'n';
|
||||||
if (flags & GL_ASYNC)
|
if (flags & GL_ASYNC)
|
||||||
|
@ -68,14 +68,6 @@ enum {
|
|||||||
* also be granted in SHARED. The preferred state is whichever is compatible
|
* also be granted in SHARED. The preferred state is whichever is compatible
|
||||||
* with other granted locks, or the specified state if no other locks exist.
|
* with other granted locks, or the specified state if no other locks exist.
|
||||||
*
|
*
|
||||||
* LM_FLAG_PRIORITY
|
|
||||||
* Override fairness considerations. Suppose a lock is held in a shared state
|
|
||||||
* and there is a pending request for the deferred state. A shared lock
|
|
||||||
* request with the priority flag would be allowed to bypass the deferred
|
|
||||||
* request and directly join the other shared lock. A shared lock request
|
|
||||||
* without the priority flag might be forced to wait until the deferred
|
|
||||||
* requested had acquired and released the lock.
|
|
||||||
*
|
|
||||||
* LM_FLAG_NODE_SCOPE
|
* LM_FLAG_NODE_SCOPE
|
||||||
* This holder agrees to share the lock within this node. In other words,
|
* This holder agrees to share the lock within this node. In other words,
|
||||||
* the glock is held in EX mode according to DLM, but local holders on the
|
* the glock is held in EX mode according to DLM, but local holders on the
|
||||||
@ -86,7 +78,6 @@ enum {
|
|||||||
#define LM_FLAG_TRY_1CB 0x0002
|
#define LM_FLAG_TRY_1CB 0x0002
|
||||||
#define LM_FLAG_NOEXP 0x0004
|
#define LM_FLAG_NOEXP 0x0004
|
||||||
#define LM_FLAG_ANY 0x0008
|
#define LM_FLAG_ANY 0x0008
|
||||||
#define LM_FLAG_PRIORITY 0x0010
|
|
||||||
#define LM_FLAG_NODE_SCOPE 0x0020
|
#define LM_FLAG_NODE_SCOPE 0x0020
|
||||||
#define GL_ASYNC 0x0040
|
#define GL_ASYNC 0x0040
|
||||||
#define GL_EXACT 0x0080
|
#define GL_EXACT 0x0080
|
||||||
|
@ -637,7 +637,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
|||||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||||
|
|
||||||
if (!remote || sb_rdonly(sdp->sd_vfs) ||
|
if (!remote || sb_rdonly(sdp->sd_vfs) ||
|
||||||
test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
|
test_bit(SDF_KILL, &sdp->sd_flags))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
|
||||||
|
@ -452,7 +452,7 @@ struct gfs2_quota_data {
|
|||||||
s64 qd_change_sync;
|
s64 qd_change_sync;
|
||||||
|
|
||||||
unsigned int qd_slot;
|
unsigned int qd_slot;
|
||||||
unsigned int qd_slot_count;
|
unsigned int qd_slot_ref;
|
||||||
|
|
||||||
struct buffer_head *qd_bh;
|
struct buffer_head *qd_bh;
|
||||||
struct gfs2_quota_change *qd_bh_qc;
|
struct gfs2_quota_change *qd_bh_qc;
|
||||||
@ -537,6 +537,7 @@ struct gfs2_statfs_change_host {
|
|||||||
#define GFS2_QUOTA_OFF 0
|
#define GFS2_QUOTA_OFF 0
|
||||||
#define GFS2_QUOTA_ACCOUNT 1
|
#define GFS2_QUOTA_ACCOUNT 1
|
||||||
#define GFS2_QUOTA_ON 2
|
#define GFS2_QUOTA_ON 2
|
||||||
|
#define GFS2_QUOTA_QUIET 3 /* on but not complaining */
|
||||||
|
|
||||||
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
|
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
|
||||||
#define GFS2_DATA_WRITEBACK 1
|
#define GFS2_DATA_WRITEBACK 1
|
||||||
@ -606,7 +607,7 @@ enum {
|
|||||||
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
|
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
|
||||||
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
|
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
|
||||||
withdrawing */
|
withdrawing */
|
||||||
SDF_DEACTIVATING = 15,
|
SDF_KILL = 15,
|
||||||
SDF_EVICTING = 16,
|
SDF_EVICTING = 16,
|
||||||
SDF_FROZEN = 17,
|
SDF_FROZEN = 17,
|
||||||
};
|
};
|
||||||
@ -716,7 +717,7 @@ struct gfs2_sbd {
|
|||||||
struct gfs2_glock *sd_rename_gl;
|
struct gfs2_glock *sd_rename_gl;
|
||||||
struct gfs2_glock *sd_freeze_gl;
|
struct gfs2_glock *sd_freeze_gl;
|
||||||
struct work_struct sd_freeze_work;
|
struct work_struct sd_freeze_work;
|
||||||
wait_queue_head_t sd_glock_wait;
|
wait_queue_head_t sd_kill_wait;
|
||||||
wait_queue_head_t sd_async_glock_wait;
|
wait_queue_head_t sd_async_glock_wait;
|
||||||
atomic_t sd_glock_disposal;
|
atomic_t sd_glock_disposal;
|
||||||
struct completion sd_locking_init;
|
struct completion sd_locking_init;
|
||||||
|
@ -276,10 +276,16 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
|
|||||||
* gfs2_lookup_simple callers expect ENOENT
|
* gfs2_lookup_simple callers expect ENOENT
|
||||||
* and do not check for NULL.
|
* and do not check for NULL.
|
||||||
*/
|
*/
|
||||||
if (inode == NULL)
|
if (IS_ERR_OR_NULL(inode))
|
||||||
return ERR_PTR(-ENOENT);
|
return inode ? inode : ERR_PTR(-ENOENT);
|
||||||
else
|
|
||||||
return inode;
|
/*
|
||||||
|
* Must not call back into the filesystem when allocating
|
||||||
|
* pages in the metadata inode's address space.
|
||||||
|
*/
|
||||||
|
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||||
|
|
||||||
|
return inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -222,11 +222,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
|
|||||||
lkf |= DLM_LKF_NOQUEUEBAST;
|
lkf |= DLM_LKF_NOQUEUEBAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gfs_flags & LM_FLAG_PRIORITY) {
|
|
||||||
lkf |= DLM_LKF_NOORDER;
|
|
||||||
lkf |= DLM_LKF_HEADQUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gfs_flags & LM_FLAG_ANY) {
|
if (gfs_flags & LM_FLAG_ANY) {
|
||||||
if (req == DLM_LOCK_PR)
|
if (req == DLM_LOCK_PR)
|
||||||
lkf |= DLM_LKF_ALTCW;
|
lkf |= DLM_LKF_ALTCW;
|
||||||
|
@ -1227,6 +1227,21 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||||||
gfs2_log_unlock(sdp);
|
gfs2_log_unlock(sdp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
|
||||||
|
{
|
||||||
|
return atomic_read(&sdp->sd_log_pinned) +
|
||||||
|
atomic_read(&sdp->sd_log_blks_needed) >=
|
||||||
|
atomic_read(&sdp->sd_log_thresh1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
|
||||||
|
{
|
||||||
|
return sdp->sd_jdesc->jd_blocks -
|
||||||
|
atomic_read(&sdp->sd_log_blks_free) +
|
||||||
|
atomic_read(&sdp->sd_log_blks_needed) >=
|
||||||
|
atomic_read(&sdp->sd_log_thresh2);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_log_commit - Commit a transaction to the log
|
* gfs2_log_commit - Commit a transaction to the log
|
||||||
* @sdp: the filesystem
|
* @sdp: the filesystem
|
||||||
@ -1246,9 +1261,7 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
|
|||||||
{
|
{
|
||||||
log_refund(sdp, tr);
|
log_refund(sdp, tr);
|
||||||
|
|
||||||
if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
|
if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
|
||||||
((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
|
|
||||||
atomic_read(&sdp->sd_log_thresh2)))
|
|
||||||
wake_up(&sdp->sd_logd_waitq);
|
wake_up(&sdp->sd_logd_waitq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1271,24 +1284,6 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
|
|||||||
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
|
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
|
|
||||||
{
|
|
||||||
return (atomic_read(&sdp->sd_log_pinned) +
|
|
||||||
atomic_read(&sdp->sd_log_blks_needed) >=
|
|
||||||
atomic_read(&sdp->sd_log_thresh1));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
|
|
||||||
{
|
|
||||||
unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
|
|
||||||
|
|
||||||
if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
|
|
||||||
atomic_read(&sdp->sd_log_thresh2);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
|
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
|
||||||
* @data: Pointer to GFS2 superblock
|
* @data: Pointer to GFS2 superblock
|
||||||
@ -1301,14 +1296,11 @@ int gfs2_logd(void *data)
|
|||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = data;
|
struct gfs2_sbd *sdp = data;
|
||||||
unsigned long t = 1;
|
unsigned long t = 1;
|
||||||
DEFINE_WAIT(wait);
|
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
|
if (gfs2_withdrawn(sdp))
|
||||||
|
break;
|
||||||
|
|
||||||
if (gfs2_withdrawn(sdp)) {
|
|
||||||
msleep_interruptible(HZ);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* Check for errors writing to the journal */
|
/* Check for errors writing to the journal */
|
||||||
if (sdp->sd_log_error) {
|
if (sdp->sd_log_error) {
|
||||||
gfs2_lm(sdp,
|
gfs2_lm(sdp,
|
||||||
@ -1317,7 +1309,7 @@ int gfs2_logd(void *data)
|
|||||||
"prevent further damage.\n",
|
"prevent further damage.\n",
|
||||||
sdp->sd_fsname, sdp->sd_log_error);
|
sdp->sd_fsname, sdp->sd_log_error);
|
||||||
gfs2_withdraw(sdp);
|
gfs2_withdraw(sdp);
|
||||||
continue;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
|
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
|
||||||
@ -1326,7 +1318,9 @@ int gfs2_logd(void *data)
|
|||||||
GFS2_LFC_LOGD_JFLUSH_REQD);
|
GFS2_LFC_LOGD_JFLUSH_REQD);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gfs2_ail_flush_reqd(sdp)) {
|
if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
|
||||||
|
gfs2_ail_flush_reqd(sdp)) {
|
||||||
|
clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
|
||||||
gfs2_ail1_start(sdp);
|
gfs2_ail1_start(sdp);
|
||||||
gfs2_ail1_wait(sdp);
|
gfs2_ail1_wait(sdp);
|
||||||
gfs2_ail1_empty(sdp, 0);
|
gfs2_ail1_empty(sdp, 0);
|
||||||
@ -1338,17 +1332,14 @@ int gfs2_logd(void *data)
|
|||||||
|
|
||||||
try_to_freeze();
|
try_to_freeze();
|
||||||
|
|
||||||
do {
|
t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
|
||||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
|
||||||
TASK_INTERRUPTIBLE);
|
gfs2_ail_flush_reqd(sdp) ||
|
||||||
if (!gfs2_ail_flush_reqd(sdp) &&
|
gfs2_jrnl_flush_reqd(sdp) ||
|
||||||
!gfs2_jrnl_flush_reqd(sdp) &&
|
sdp->sd_log_error ||
|
||||||
!kthread_should_stop())
|
gfs2_withdrawn(sdp) ||
|
||||||
t = schedule_timeout(t);
|
kthread_should_stop(),
|
||||||
} while(t && !gfs2_ail_flush_reqd(sdp) &&
|
t);
|
||||||
!gfs2_jrnl_flush_reqd(sdp) &&
|
|
||||||
!kthread_should_stop());
|
|
||||||
finish_wait(&sdp->sd_logd_waitq, &wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -456,7 +456,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
|
|||||||
* Find the folio with 'index' in the journal's mapping. Search the folio for
|
* Find the folio with 'index' in the journal's mapping. Search the folio for
|
||||||
* the journal head if requested (cleanup == false). Release refs on the
|
* the journal head if requested (cleanup == false). Release refs on the
|
||||||
* folio so the page cache can reclaim it. We grabbed a
|
* folio so the page cache can reclaim it. We grabbed a
|
||||||
* reference on this folio twice, first when we did a find_or_create_page()
|
* reference on this folio twice, first when we did a grab_cache_page()
|
||||||
* to obtain the folio to add it to the bio and second when we do a
|
* to obtain the folio to add it to the bio and second when we do a
|
||||||
* filemap_get_folio() here to get the folio to wait on while I/O on it is being
|
* filemap_get_folio() here to get the folio to wait on while I/O on it is being
|
||||||
* completed.
|
* completed.
|
||||||
@ -481,7 +481,7 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
|
|||||||
if (!*done)
|
if (!*done)
|
||||||
*done = gfs2_jhead_pg_srch(jd, head, &folio->page);
|
*done = gfs2_jhead_pg_srch(jd, head, &folio->page);
|
||||||
|
|
||||||
/* filemap_get_folio() and the earlier find_or_create_page() */
|
/* filemap_get_folio() and the earlier grab_cache_page() */
|
||||||
folio_put_refs(folio, 2);
|
folio_put_refs(folio, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -535,8 +535,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
|
|||||||
|
|
||||||
for (; block < je->lblock + je->blocks; block++, dblock++) {
|
for (; block < je->lblock + je->blocks; block++, dblock++) {
|
||||||
if (!page) {
|
if (!page) {
|
||||||
page = find_or_create_page(mapping,
|
page = grab_cache_page(mapping, block >> shift);
|
||||||
block >> shift, GFP_NOFS);
|
|
||||||
if (!page) {
|
if (!page) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
done = true;
|
done = true;
|
||||||
|
@ -152,9 +152,9 @@ static int __init init_gfs2_fs(void)
|
|||||||
goto fail_shrinker;
|
goto fail_shrinker;
|
||||||
|
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
|
gfs2_recovery_wq = alloc_workqueue("gfs2_recovery",
|
||||||
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
|
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
|
||||||
if (!gfs_recovery_wq)
|
if (!gfs2_recovery_wq)
|
||||||
goto fail_wq1;
|
goto fail_wq1;
|
||||||
|
|
||||||
gfs2_control_wq = alloc_workqueue("gfs2_control",
|
gfs2_control_wq = alloc_workqueue("gfs2_control",
|
||||||
@ -162,7 +162,7 @@ static int __init init_gfs2_fs(void)
|
|||||||
if (!gfs2_control_wq)
|
if (!gfs2_control_wq)
|
||||||
goto fail_wq2;
|
goto fail_wq2;
|
||||||
|
|
||||||
gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
|
gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", 0, 0);
|
||||||
|
|
||||||
if (!gfs2_freeze_wq)
|
if (!gfs2_freeze_wq)
|
||||||
goto fail_wq3;
|
goto fail_wq3;
|
||||||
@ -194,7 +194,7 @@ static int __init init_gfs2_fs(void)
|
|||||||
fail_wq3:
|
fail_wq3:
|
||||||
destroy_workqueue(gfs2_control_wq);
|
destroy_workqueue(gfs2_control_wq);
|
||||||
fail_wq2:
|
fail_wq2:
|
||||||
destroy_workqueue(gfs_recovery_wq);
|
destroy_workqueue(gfs2_recovery_wq);
|
||||||
fail_wq1:
|
fail_wq1:
|
||||||
unregister_shrinker(&gfs2_qd_shrinker);
|
unregister_shrinker(&gfs2_qd_shrinker);
|
||||||
fail_shrinker:
|
fail_shrinker:
|
||||||
@ -234,7 +234,7 @@ static void __exit exit_gfs2_fs(void)
|
|||||||
gfs2_unregister_debugfs();
|
gfs2_unregister_debugfs();
|
||||||
unregister_filesystem(&gfs2_fs_type);
|
unregister_filesystem(&gfs2_fs_type);
|
||||||
unregister_filesystem(&gfs2meta_fs_type);
|
unregister_filesystem(&gfs2meta_fs_type);
|
||||||
destroy_workqueue(gfs_recovery_wq);
|
destroy_workqueue(gfs2_recovery_wq);
|
||||||
destroy_workqueue(gfs2_control_wq);
|
destroy_workqueue(gfs2_control_wq);
|
||||||
destroy_workqueue(gfs2_freeze_wq);
|
destroy_workqueue(gfs2_freeze_wq);
|
||||||
list_lru_destroy(&gfs2_qd_lru);
|
list_lru_destroy(&gfs2_qd_lru);
|
||||||
|
@ -87,7 +87,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
|||||||
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
|
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
|
||||||
gfs2_tune_init(&sdp->sd_tune);
|
gfs2_tune_init(&sdp->sd_tune);
|
||||||
|
|
||||||
init_waitqueue_head(&sdp->sd_glock_wait);
|
init_waitqueue_head(&sdp->sd_kill_wait);
|
||||||
init_waitqueue_head(&sdp->sd_async_glock_wait);
|
init_waitqueue_head(&sdp->sd_async_glock_wait);
|
||||||
atomic_set(&sdp->sd_glock_disposal, 0);
|
atomic_set(&sdp->sd_glock_disposal, 0);
|
||||||
init_completion(&sdp->sd_locking_init);
|
init_completion(&sdp->sd_locking_init);
|
||||||
@ -1103,29 +1103,49 @@ static int init_threads(struct gfs2_sbd *sdp)
|
|||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
|
p = kthread_create(gfs2_logd, sdp, "gfs2_logd/%s", sdp->sd_fsname);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
error = PTR_ERR(p);
|
error = PTR_ERR(p);
|
||||||
fs_err(sdp, "can't start logd thread: %d\n", error);
|
fs_err(sdp, "can't create logd thread: %d\n", error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
get_task_struct(p);
|
||||||
sdp->sd_logd_process = p;
|
sdp->sd_logd_process = p;
|
||||||
|
|
||||||
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
|
p = kthread_create(gfs2_quotad, sdp, "gfs2_quotad/%s", sdp->sd_fsname);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
error = PTR_ERR(p);
|
error = PTR_ERR(p);
|
||||||
fs_err(sdp, "can't start quotad thread: %d\n", error);
|
fs_err(sdp, "can't create quotad thread: %d\n", error);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
get_task_struct(p);
|
||||||
sdp->sd_quotad_process = p;
|
sdp->sd_quotad_process = p;
|
||||||
|
|
||||||
|
wake_up_process(sdp->sd_logd_process);
|
||||||
|
wake_up_process(sdp->sd_quotad_process);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
kthread_stop(sdp->sd_logd_process);
|
kthread_stop(sdp->sd_logd_process);
|
||||||
|
put_task_struct(sdp->sd_logd_process);
|
||||||
sdp->sd_logd_process = NULL;
|
sdp->sd_logd_process = NULL;
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gfs2_destroy_threads(struct gfs2_sbd *sdp)
|
||||||
|
{
|
||||||
|
if (sdp->sd_logd_process) {
|
||||||
|
kthread_stop(sdp->sd_logd_process);
|
||||||
|
put_task_struct(sdp->sd_logd_process);
|
||||||
|
sdp->sd_logd_process = NULL;
|
||||||
|
}
|
||||||
|
if (sdp->sd_quotad_process) {
|
||||||
|
kthread_stop(sdp->sd_quotad_process);
|
||||||
|
put_task_struct(sdp->sd_quotad_process);
|
||||||
|
sdp->sd_quotad_process = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_fill_super - Read in superblock
|
* gfs2_fill_super - Read in superblock
|
||||||
* @sb: The VFS superblock
|
* @sb: The VFS superblock
|
||||||
@ -1276,12 +1296,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
|||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||||
if (sdp->sd_quotad_process)
|
gfs2_destroy_threads(sdp);
|
||||||
kthread_stop(sdp->sd_quotad_process);
|
|
||||||
sdp->sd_quotad_process = NULL;
|
|
||||||
if (sdp->sd_logd_process)
|
|
||||||
kthread_stop(sdp->sd_logd_process);
|
|
||||||
sdp->sd_logd_process = NULL;
|
|
||||||
fs_err(sdp, "can't make FS RW: %d\n", error);
|
fs_err(sdp, "can't make FS RW: %d\n", error);
|
||||||
goto fail_per_node;
|
goto fail_per_node;
|
||||||
}
|
}
|
||||||
@ -1381,6 +1396,7 @@ static const struct constant_table gfs2_param_quota[] = {
|
|||||||
{"off", GFS2_QUOTA_OFF},
|
{"off", GFS2_QUOTA_OFF},
|
||||||
{"account", GFS2_QUOTA_ACCOUNT},
|
{"account", GFS2_QUOTA_ACCOUNT},
|
||||||
{"on", GFS2_QUOTA_ON},
|
{"on", GFS2_QUOTA_ON},
|
||||||
|
{"quiet", GFS2_QUOTA_QUIET},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1786,9 +1802,9 @@ static void gfs2_kill_sb(struct super_block *sb)
|
|||||||
/*
|
/*
|
||||||
* Flush and then drain the delete workqueue here (via
|
* Flush and then drain the delete workqueue here (via
|
||||||
* destroy_workqueue()) to ensure that any delete work that
|
* destroy_workqueue()) to ensure that any delete work that
|
||||||
* may be running will also see the SDF_DEACTIVATING flag.
|
* may be running will also see the SDF_KILL flag.
|
||||||
*/
|
*/
|
||||||
set_bit(SDF_DEACTIVATING, &sdp->sd_flags);
|
set_bit(SDF_KILL, &sdp->sd_flags);
|
||||||
gfs2_flush_delete_work(sdp);
|
gfs2_flush_delete_work(sdp);
|
||||||
destroy_workqueue(sdp->sd_delete_wq);
|
destroy_workqueue(sdp->sd_delete_wq);
|
||||||
|
|
||||||
|
370
fs/gfs2/quota.c
370
fs/gfs2/quota.c
@ -109,38 +109,44 @@ static inline void spin_unlock_bucket(unsigned int hash)
|
|||||||
static void gfs2_qd_dealloc(struct rcu_head *rcu)
|
static void gfs2_qd_dealloc(struct rcu_head *rcu)
|
||||||
{
|
{
|
||||||
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
|
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
|
||||||
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
kmem_cache_free(gfs2_quotad_cachep, qd);
|
kmem_cache_free(gfs2_quotad_cachep, qd);
|
||||||
|
if (atomic_dec_and_test(&sdp->sd_quota_count))
|
||||||
|
wake_up(&sdp->sd_kill_wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfs2_qd_dispose(struct list_head *list)
|
static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
|
||||||
|
{
|
||||||
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
|
spin_lock(&qd_lock);
|
||||||
|
list_del(&qd->qd_list);
|
||||||
|
spin_unlock(&qd_lock);
|
||||||
|
|
||||||
|
spin_lock_bucket(qd->qd_hash);
|
||||||
|
hlist_bl_del_rcu(&qd->qd_hlist);
|
||||||
|
spin_unlock_bucket(qd->qd_hash);
|
||||||
|
|
||||||
|
if (!gfs2_withdrawn(sdp)) {
|
||||||
|
gfs2_assert_warn(sdp, !qd->qd_change);
|
||||||
|
gfs2_assert_warn(sdp, !qd->qd_slot_ref);
|
||||||
|
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
gfs2_glock_put(qd->qd_gl);
|
||||||
|
call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gfs2_qd_list_dispose(struct list_head *list)
|
||||||
{
|
{
|
||||||
struct gfs2_quota_data *qd;
|
struct gfs2_quota_data *qd;
|
||||||
struct gfs2_sbd *sdp;
|
|
||||||
|
|
||||||
while (!list_empty(list)) {
|
while (!list_empty(list)) {
|
||||||
qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
|
qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
|
||||||
sdp = qd->qd_gl->gl_name.ln_sbd;
|
|
||||||
|
|
||||||
list_del(&qd->qd_lru);
|
list_del(&qd->qd_lru);
|
||||||
|
|
||||||
/* Free from the filesystem-specific list */
|
gfs2_qd_dispose(qd);
|
||||||
spin_lock(&qd_lock);
|
|
||||||
list_del(&qd->qd_list);
|
|
||||||
spin_unlock(&qd_lock);
|
|
||||||
|
|
||||||
spin_lock_bucket(qd->qd_hash);
|
|
||||||
hlist_bl_del_rcu(&qd->qd_hlist);
|
|
||||||
spin_unlock_bucket(qd->qd_hash);
|
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_change);
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_slot_count);
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
|
||||||
|
|
||||||
gfs2_glock_put(qd->qd_gl);
|
|
||||||
atomic_dec(&sdp->sd_quota_count);
|
|
||||||
|
|
||||||
/* Delete it from the common reclaim list */
|
|
||||||
call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -149,18 +155,22 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
|
|||||||
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
|
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
|
||||||
{
|
{
|
||||||
struct list_head *dispose = arg;
|
struct list_head *dispose = arg;
|
||||||
struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
|
struct gfs2_quota_data *qd =
|
||||||
|
list_entry(item, struct gfs2_quota_data, qd_lru);
|
||||||
|
enum lru_status status;
|
||||||
|
|
||||||
if (!spin_trylock(&qd->qd_lockref.lock))
|
if (!spin_trylock(&qd->qd_lockref.lock))
|
||||||
return LRU_SKIP;
|
return LRU_SKIP;
|
||||||
|
|
||||||
|
status = LRU_SKIP;
|
||||||
if (qd->qd_lockref.count == 0) {
|
if (qd->qd_lockref.count == 0) {
|
||||||
lockref_mark_dead(&qd->qd_lockref);
|
lockref_mark_dead(&qd->qd_lockref);
|
||||||
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
|
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
|
||||||
|
status = LRU_REMOVED;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&qd->qd_lockref.lock);
|
spin_unlock(&qd->qd_lockref.lock);
|
||||||
return LRU_REMOVED;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
||||||
@ -175,7 +185,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
|
|||||||
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
|
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
|
||||||
gfs2_qd_isolate, &dispose);
|
gfs2_qd_isolate, &dispose);
|
||||||
|
|
||||||
gfs2_qd_dispose(&dispose);
|
gfs2_qd_list_dispose(&dispose);
|
||||||
|
|
||||||
return freed;
|
return freed;
|
||||||
}
|
}
|
||||||
@ -203,12 +213,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
|
|||||||
|
|
||||||
static u64 qd2offset(struct gfs2_quota_data *qd)
|
static u64 qd2offset(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
u64 offset;
|
return qd2index(qd) * sizeof(struct gfs2_quota);
|
||||||
|
|
||||||
offset = qd2index(qd);
|
|
||||||
offset *= sizeof(struct gfs2_quota);
|
|
||||||
|
|
||||||
return offset;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
|
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
|
||||||
@ -221,7 +226,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
qd->qd_sbd = sdp;
|
qd->qd_sbd = sdp;
|
||||||
qd->qd_lockref.count = 1;
|
qd->qd_lockref.count = 0;
|
||||||
spin_lock_init(&qd->qd_lockref.lock);
|
spin_lock_init(&qd->qd_lockref.lock);
|
||||||
qd->qd_id = qid;
|
qd->qd_id = qid;
|
||||||
qd->qd_slot = -1;
|
qd->qd_slot = -1;
|
||||||
@ -283,6 +288,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|||||||
spin_lock_bucket(hash);
|
spin_lock_bucket(hash);
|
||||||
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
|
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
|
||||||
if (qd == NULL) {
|
if (qd == NULL) {
|
||||||
|
new_qd->qd_lockref.count++;
|
||||||
*qdp = new_qd;
|
*qdp = new_qd;
|
||||||
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
|
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
|
||||||
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
|
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
|
||||||
@ -302,20 +308,31 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|||||||
|
|
||||||
static void qd_hold(struct gfs2_quota_data *qd)
|
static void qd_hold(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
|
gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
|
||||||
lockref_get(&qd->qd_lockref);
|
lockref_get(&qd->qd_lockref);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qd_put(struct gfs2_quota_data *qd)
|
static void qd_put(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
|
struct gfs2_sbd *sdp;
|
||||||
|
|
||||||
if (lockref_put_or_lock(&qd->qd_lockref))
|
if (lockref_put_or_lock(&qd->qd_lockref))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
BUG_ON(__lockref_is_dead(&qd->qd_lockref));
|
||||||
|
sdp = qd->qd_sbd;
|
||||||
|
if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
|
||||||
|
lockref_mark_dead(&qd->qd_lockref);
|
||||||
|
spin_unlock(&qd->qd_lockref.lock);
|
||||||
|
|
||||||
|
gfs2_qd_dispose(qd);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
qd->qd_lockref.count = 0;
|
qd->qd_lockref.count = 0;
|
||||||
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
|
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
|
||||||
spin_unlock(&qd->qd_lockref.lock);
|
spin_unlock(&qd->qd_lockref.lock);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int slot_get(struct gfs2_quota_data *qd)
|
static int slot_get(struct gfs2_quota_data *qd)
|
||||||
@ -325,20 +342,19 @@ static int slot_get(struct gfs2_quota_data *qd)
|
|||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
spin_lock(&sdp->sd_bitmap_lock);
|
spin_lock(&sdp->sd_bitmap_lock);
|
||||||
if (qd->qd_slot_count != 0)
|
if (qd->qd_slot_ref == 0) {
|
||||||
goto out;
|
bit = find_first_zero_bit(sdp->sd_quota_bitmap,
|
||||||
|
sdp->sd_quota_slots);
|
||||||
error = -ENOSPC;
|
if (bit >= sdp->sd_quota_slots) {
|
||||||
bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
|
error = -ENOSPC;
|
||||||
if (bit < sdp->sd_quota_slots) {
|
goto out;
|
||||||
|
}
|
||||||
set_bit(bit, sdp->sd_quota_bitmap);
|
set_bit(bit, sdp->sd_quota_bitmap);
|
||||||
qd->qd_slot = bit;
|
qd->qd_slot = bit;
|
||||||
error = 0;
|
|
||||||
out:
|
|
||||||
qd->qd_slot_count++;
|
|
||||||
}
|
}
|
||||||
|
qd->qd_slot_ref++;
|
||||||
|
out:
|
||||||
spin_unlock(&sdp->sd_bitmap_lock);
|
spin_unlock(&sdp->sd_bitmap_lock);
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -347,8 +363,8 @@ static void slot_hold(struct gfs2_quota_data *qd)
|
|||||||
struct gfs2_sbd *sdp = qd->qd_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
spin_lock(&sdp->sd_bitmap_lock);
|
spin_lock(&sdp->sd_bitmap_lock);
|
||||||
gfs2_assert(sdp, qd->qd_slot_count);
|
gfs2_assert(sdp, qd->qd_slot_ref);
|
||||||
qd->qd_slot_count++;
|
qd->qd_slot_ref++;
|
||||||
spin_unlock(&sdp->sd_bitmap_lock);
|
spin_unlock(&sdp->sd_bitmap_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,8 +373,8 @@ static void slot_put(struct gfs2_quota_data *qd)
|
|||||||
struct gfs2_sbd *sdp = qd->qd_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
spin_lock(&sdp->sd_bitmap_lock);
|
spin_lock(&sdp->sd_bitmap_lock);
|
||||||
gfs2_assert(sdp, qd->qd_slot_count);
|
gfs2_assert(sdp, qd->qd_slot_ref);
|
||||||
if (!--qd->qd_slot_count) {
|
if (!--qd->qd_slot_ref) {
|
||||||
BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
|
BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
|
||||||
qd->qd_slot = -1;
|
qd->qd_slot = -1;
|
||||||
}
|
}
|
||||||
@ -367,7 +383,7 @@ static void slot_put(struct gfs2_quota_data *qd)
|
|||||||
|
|
||||||
static int bh_get(struct gfs2_quota_data *qd)
|
static int bh_get(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
struct inode *inode = sdp->sd_qc_inode;
|
struct inode *inode = sdp->sd_qc_inode;
|
||||||
struct gfs2_inode *ip = GFS2_I(inode);
|
struct gfs2_inode *ip = GFS2_I(inode);
|
||||||
unsigned int block, offset;
|
unsigned int block, offset;
|
||||||
@ -421,7 +437,7 @@ static int bh_get(struct gfs2_quota_data *qd)
|
|||||||
|
|
||||||
static void bh_put(struct gfs2_quota_data *qd)
|
static void bh_put(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
mutex_lock(&sdp->sd_quota_mutex);
|
mutex_lock(&sdp->sd_quota_mutex);
|
||||||
gfs2_assert(sdp, qd->qd_bh_count);
|
gfs2_assert(sdp, qd->qd_bh_count);
|
||||||
@ -451,6 +467,20 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
|
||||||
|
{
|
||||||
|
int error;
|
||||||
|
|
||||||
|
error = bh_get(qd);
|
||||||
|
if (!error)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
||||||
|
slot_put(qd);
|
||||||
|
qd_put(qd);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
||||||
{
|
{
|
||||||
struct gfs2_quota_data *qd = NULL, *iter;
|
struct gfs2_quota_data *qd = NULL, *iter;
|
||||||
@ -473,30 +503,29 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
|||||||
spin_unlock(&qd_lock);
|
spin_unlock(&qd_lock);
|
||||||
|
|
||||||
if (qd) {
|
if (qd) {
|
||||||
error = bh_get(qd);
|
error = qd_bh_get_or_undo(sdp, qd);
|
||||||
if (error) {
|
if (error)
|
||||||
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
|
||||||
slot_put(qd);
|
|
||||||
qd_put(qd);
|
|
||||||
return error;
|
return error;
|
||||||
}
|
*qdp = qd;
|
||||||
}
|
}
|
||||||
|
|
||||||
*qdp = qd;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qd_unlock(struct gfs2_quota_data *qd)
|
static void qdsb_put(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
|
|
||||||
test_bit(QDF_LOCKED, &qd->qd_flags));
|
|
||||||
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
|
||||||
bh_put(qd);
|
bh_put(qd);
|
||||||
slot_put(qd);
|
slot_put(qd);
|
||||||
qd_put(qd);
|
qd_put(qd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void qd_unlock(struct gfs2_quota_data *qd)
|
||||||
|
{
|
||||||
|
gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
|
||||||
|
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
||||||
|
qdsb_put(qd);
|
||||||
|
}
|
||||||
|
|
||||||
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
|
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
|
||||||
struct gfs2_quota_data **qdp)
|
struct gfs2_quota_data **qdp)
|
||||||
{
|
{
|
||||||
@ -523,13 +552,6 @@ static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qdsb_put(struct gfs2_quota_data *qd)
|
|
||||||
{
|
|
||||||
bh_put(qd);
|
|
||||||
slot_put(qd);
|
|
||||||
qd_put(qd);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_qa_get - make sure we have a quota allocations data structure,
|
* gfs2_qa_get - make sure we have a quota allocations data structure,
|
||||||
* if necessary
|
* if necessary
|
||||||
@ -666,7 +688,7 @@ static int sort_qd(const void *a, const void *b)
|
|||||||
|
|
||||||
static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
|
static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
|
||||||
struct gfs2_quota_change *qc = qd->qd_bh_qc;
|
struct gfs2_quota_change *qc = qd->qd_bh_qc;
|
||||||
s64 x;
|
s64 x;
|
||||||
@ -708,30 +730,29 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
|
|||||||
mutex_unlock(&sdp->sd_quota_mutex);
|
mutex_unlock(&sdp->sd_quota_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
|
||||||
unsigned off, void *buf, unsigned bytes)
|
unsigned off, void *buf, unsigned bytes)
|
||||||
{
|
{
|
||||||
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||||
struct inode *inode = &ip->i_inode;
|
struct inode *inode = &ip->i_inode;
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
u64 blk;
|
u64 blk;
|
||||||
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
|
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
|
||||||
unsigned to_write = bytes, pg_off = off;
|
unsigned to_write = bytes, pg_off = off;
|
||||||
int done = 0;
|
|
||||||
|
|
||||||
blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
|
blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
|
||||||
boff = off % bsize;
|
boff = off % bsize;
|
||||||
|
|
||||||
page = find_or_create_page(mapping, index, GFP_NOFS);
|
page = grab_cache_page(mapping, index);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
if (!page_has_buffers(page))
|
if (!page_has_buffers(page))
|
||||||
create_empty_buffers(page, bsize, 0);
|
create_empty_buffers(page, bsize, 0);
|
||||||
|
|
||||||
bh = page_buffers(page);
|
bh = page_buffers(page);
|
||||||
while (!done) {
|
for(;;) {
|
||||||
/* Find the beginning block within the page */
|
/* Find the beginning block within the page */
|
||||||
if (pg_off >= ((bnum * bsize) + bsize)) {
|
if (pg_off >= ((bnum * bsize) + bsize)) {
|
||||||
bh = bh->b_this_page;
|
bh = bh->b_this_page;
|
||||||
@ -751,10 +772,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
|||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
|
if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
|
||||||
goto unlock_out;
|
goto unlock_out;
|
||||||
if (gfs2_is_jdata(ip))
|
gfs2_trans_add_data(ip->i_gl, bh);
|
||||||
gfs2_trans_add_data(ip->i_gl, bh);
|
|
||||||
else
|
|
||||||
gfs2_ordered_add_inode(ip);
|
|
||||||
|
|
||||||
/* If we need to write to the next block as well */
|
/* If we need to write to the next block as well */
|
||||||
if (to_write > (bsize - boff)) {
|
if (to_write > (bsize - boff)) {
|
||||||
@ -763,7 +781,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
|||||||
boff = pg_off % bsize;
|
boff = pg_off % bsize;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
done = 1;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Write to the page, now that we have setup the buffer(s) */
|
/* Write to the page, now that we have setup the buffer(s) */
|
||||||
@ -780,12 +798,12 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
|
static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
|
||||||
loff_t loc)
|
loff_t loc)
|
||||||
{
|
{
|
||||||
unsigned long pg_beg;
|
unsigned long pg_beg;
|
||||||
unsigned pg_off, nbytes, overflow = 0;
|
unsigned pg_off, nbytes, overflow = 0;
|
||||||
int pg_oflow = 0, error;
|
int error;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
nbytes = sizeof(struct gfs2_quota);
|
nbytes = sizeof(struct gfs2_quota);
|
||||||
@ -794,17 +812,15 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
|
|||||||
pg_off = offset_in_page(loc);
|
pg_off = offset_in_page(loc);
|
||||||
|
|
||||||
/* If the quota straddles a page boundary, split the write in two */
|
/* If the quota straddles a page boundary, split the write in two */
|
||||||
if ((pg_off + nbytes) > PAGE_SIZE) {
|
if ((pg_off + nbytes) > PAGE_SIZE)
|
||||||
pg_oflow = 1;
|
|
||||||
overflow = (pg_off + nbytes) - PAGE_SIZE;
|
overflow = (pg_off + nbytes) - PAGE_SIZE;
|
||||||
}
|
|
||||||
|
|
||||||
ptr = qp;
|
ptr = qp;
|
||||||
error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
|
error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
|
||||||
nbytes - overflow);
|
nbytes - overflow);
|
||||||
/* If there's an overflow, write the remaining bytes to the next page */
|
/* If there's an overflow, write the remaining bytes to the next page */
|
||||||
if (!error && pg_oflow)
|
if (!error && overflow)
|
||||||
error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
|
error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
|
||||||
ptr + nbytes - overflow,
|
ptr + nbytes - overflow,
|
||||||
overflow);
|
overflow);
|
||||||
return error;
|
return error;
|
||||||
@ -812,7 +828,7 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_adjust_quota - adjust record of current block usage
|
* gfs2_adjust_quota - adjust record of current block usage
|
||||||
* @ip: The quota inode
|
* @sdp: The superblock
|
||||||
* @loc: Offset of the entry in the quota file
|
* @loc: Offset of the entry in the quota file
|
||||||
* @change: The amount of usage change to record
|
* @change: The amount of usage change to record
|
||||||
* @qd: The quota data
|
* @qd: The quota data
|
||||||
@ -824,12 +840,12 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
|
|||||||
* Returns: 0 or -ve on error
|
* Returns: 0 or -ve on error
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
|
||||||
s64 change, struct gfs2_quota_data *qd,
|
s64 change, struct gfs2_quota_data *qd,
|
||||||
struct qc_dqblk *fdq)
|
struct qc_dqblk *fdq)
|
||||||
{
|
{
|
||||||
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||||
struct inode *inode = &ip->i_inode;
|
struct inode *inode = &ip->i_inode;
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
|
||||||
struct gfs2_quota q;
|
struct gfs2_quota q;
|
||||||
int err;
|
int err;
|
||||||
u64 size;
|
u64 size;
|
||||||
@ -846,7 +862,6 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
|
loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
|
||||||
err = -EIO;
|
|
||||||
be64_add_cpu(&q.qu_value, change);
|
be64_add_cpu(&q.qu_value, change);
|
||||||
if (((s64)be64_to_cpu(q.qu_value)) < 0)
|
if (((s64)be64_to_cpu(q.qu_value)) < 0)
|
||||||
q.qu_value = 0; /* Never go negative on quota usage */
|
q.qu_value = 0; /* Never go negative on quota usage */
|
||||||
@ -866,7 +881,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = gfs2_write_disk_quota(ip, &q, loc);
|
err = gfs2_write_disk_quota(sdp, &q, loc);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
size = loc + sizeof(struct gfs2_quota);
|
size = loc + sizeof(struct gfs2_quota);
|
||||||
if (size > inode->i_size)
|
if (size > inode->i_size)
|
||||||
@ -881,7 +896,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
|
|||||||
|
|
||||||
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = (*qda)->qd_sbd;
|
||||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||||
struct gfs2_alloc_parms ap = { .aflags = 0, };
|
struct gfs2_alloc_parms ap = { .aflags = 0, };
|
||||||
unsigned int data_blocks, ind_blocks;
|
unsigned int data_blocks, ind_blocks;
|
||||||
@ -893,18 +908,12 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||||||
unsigned int nalloc = 0, blocks;
|
unsigned int nalloc = 0, blocks;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = gfs2_qa_get(ip);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
|
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
|
||||||
&data_blocks, &ind_blocks);
|
&data_blocks, &ind_blocks);
|
||||||
|
|
||||||
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
|
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
|
||||||
if (!ghs) {
|
if (!ghs)
|
||||||
error = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
|
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
|
||||||
inode_lock(&ip->i_inode);
|
inode_lock(&ip->i_inode);
|
||||||
@ -953,7 +962,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||||||
for (x = 0; x < num_qd; x++) {
|
for (x = 0; x < num_qd; x++) {
|
||||||
qd = qda[x];
|
qd = qda[x];
|
||||||
offset = qd2offset(qd);
|
offset = qd2offset(qd);
|
||||||
error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
|
error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
|
||||||
|
NULL);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_end_trans;
|
goto out_end_trans;
|
||||||
|
|
||||||
@ -961,8 +971,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||||||
set_bit(QDF_REFRESH, &qd->qd_flags);
|
set_bit(QDF_REFRESH, &qd->qd_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
error = 0;
|
|
||||||
|
|
||||||
out_end_trans:
|
out_end_trans:
|
||||||
gfs2_trans_end(sdp);
|
gfs2_trans_end(sdp);
|
||||||
out_ipres:
|
out_ipres:
|
||||||
@ -976,8 +984,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
|||||||
kfree(ghs);
|
kfree(ghs);
|
||||||
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
|
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
|
||||||
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
|
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
|
||||||
out:
|
if (!error) {
|
||||||
gfs2_qa_put(ip);
|
for (x = 0; x < num_qd; x++)
|
||||||
|
qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
|
||||||
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1009,11 +1019,12 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
|
|||||||
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
|
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
|
||||||
struct gfs2_holder *q_gh)
|
struct gfs2_holder *q_gh)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
|
||||||
struct gfs2_holder i_gh;
|
struct gfs2_holder i_gh;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
|
||||||
restart:
|
restart:
|
||||||
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
|
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
|
||||||
if (error)
|
if (error)
|
||||||
@ -1059,9 +1070,10 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
|||||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||||
struct gfs2_quota_data *qd;
|
struct gfs2_quota_data *qd;
|
||||||
u32 x;
|
u32 x;
|
||||||
int error = 0;
|
int error;
|
||||||
|
|
||||||
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
|
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
|
||||||
|
sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error = gfs2_quota_hold(ip, uid, gid);
|
error = gfs2_quota_hold(ip, uid, gid);
|
||||||
@ -1089,16 +1101,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int need_sync(struct gfs2_quota_data *qd)
|
static bool need_sync(struct gfs2_quota_data *qd)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
struct gfs2_tune *gt = &sdp->sd_tune;
|
struct gfs2_tune *gt = &sdp->sd_tune;
|
||||||
s64 value;
|
s64 value;
|
||||||
unsigned int num, den;
|
unsigned int num, den;
|
||||||
int do_sync = 1;
|
|
||||||
|
|
||||||
if (!qd->qd_qb.qb_limit)
|
if (!qd->qd_qb.qb_limit)
|
||||||
return 0;
|
return false;
|
||||||
|
|
||||||
spin_lock(&qd_lock);
|
spin_lock(&qd_lock);
|
||||||
value = qd->qd_change;
|
value = qd->qd_change;
|
||||||
@ -1109,26 +1120,26 @@ static int need_sync(struct gfs2_quota_data *qd)
|
|||||||
den = gt->gt_quota_scale_den;
|
den = gt->gt_quota_scale_den;
|
||||||
spin_unlock(>->gt_spin);
|
spin_unlock(>->gt_spin);
|
||||||
|
|
||||||
if (value < 0)
|
if (value <= 0)
|
||||||
do_sync = 0;
|
return false;
|
||||||
else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
|
else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
|
||||||
(s64)be64_to_cpu(qd->qd_qb.qb_limit))
|
(s64)be64_to_cpu(qd->qd_qb.qb_limit))
|
||||||
do_sync = 0;
|
return false;
|
||||||
else {
|
else {
|
||||||
value *= gfs2_jindex_size(sdp) * num;
|
value *= gfs2_jindex_size(sdp) * num;
|
||||||
value = div_s64(value, den);
|
value = div_s64(value, den);
|
||||||
value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
|
value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
|
||||||
if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
|
if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
|
||||||
do_sync = 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return do_sync;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void gfs2_quota_unlock(struct gfs2_inode *ip)
|
void gfs2_quota_unlock(struct gfs2_inode *ip)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||||
struct gfs2_quota_data *qda[4];
|
struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
|
||||||
unsigned int count = 0;
|
unsigned int count = 0;
|
||||||
u32 x;
|
u32 x;
|
||||||
int found;
|
int found;
|
||||||
@ -1138,7 +1149,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
|
|||||||
|
|
||||||
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
|
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
|
||||||
struct gfs2_quota_data *qd;
|
struct gfs2_quota_data *qd;
|
||||||
int sync;
|
bool sync;
|
||||||
|
|
||||||
qd = ip->i_qadata->qa_qd[x];
|
qd = ip->i_qadata->qa_qd[x];
|
||||||
sync = need_sync(qd);
|
sync = need_sync(qd);
|
||||||
@ -1154,15 +1165,8 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
|
|||||||
if (!found)
|
if (!found)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, qd->qd_change_sync);
|
if (!qd_bh_get_or_undo(sdp, qd))
|
||||||
if (bh_get(qd)) {
|
qda[count++] = qd;
|
||||||
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
|
||||||
slot_put(qd);
|
|
||||||
qd_put(qd);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
qda[count++] = qd;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
@ -1178,12 +1182,13 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
|
|||||||
|
|
||||||
static int print_message(struct gfs2_quota_data *qd, char *type)
|
static int print_message(struct gfs2_quota_data *qd, char *type)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
struct gfs2_sbd *sdp = qd->qd_sbd;
|
||||||
|
|
||||||
fs_info(sdp, "quota %s for %s %u\n",
|
if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
|
||||||
type,
|
fs_info(sdp, "quota %s for %s %u\n",
|
||||||
(qd->qd_id.type == USRQUOTA) ? "user" : "group",
|
type,
|
||||||
from_kqid(&init_user_ns, qd->qd_id));
|
(qd->qd_id.type == USRQUOTA) ? "user" : "group",
|
||||||
|
from_kqid(&init_user_ns, qd->qd_id));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1269,7 +1274,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
|||||||
u32 x;
|
u32 x;
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||||
|
|
||||||
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
|
if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
|
||||||
|
sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
|
||||||
gfs2_assert_warn(sdp, change))
|
gfs2_assert_warn(sdp, change))
|
||||||
return;
|
return;
|
||||||
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
|
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
|
||||||
@ -1288,6 +1294,24 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool qd_changed(struct gfs2_sbd *sdp)
|
||||||
|
{
|
||||||
|
struct gfs2_quota_data *qd;
|
||||||
|
bool changed = false;
|
||||||
|
|
||||||
|
spin_lock(&qd_lock);
|
||||||
|
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
|
||||||
|
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
|
||||||
|
!test_bit(QDF_CHANGE, &qd->qd_flags))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
changed = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
spin_unlock(&qd_lock);
|
||||||
|
return changed;
|
||||||
|
}
|
||||||
|
|
||||||
int gfs2_quota_sync(struct super_block *sb, int type)
|
int gfs2_quota_sync(struct super_block *sb, int type)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||||
@ -1297,6 +1321,9 @@ int gfs2_quota_sync(struct super_block *sb, int type)
|
|||||||
unsigned int x;
|
unsigned int x;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
|
if (!qd_changed(sdp))
|
||||||
|
return 0;
|
||||||
|
|
||||||
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
|
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
|
||||||
if (!qda)
|
if (!qda)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -1318,10 +1345,6 @@ int gfs2_quota_sync(struct super_block *sb, int type)
|
|||||||
if (num_qd) {
|
if (num_qd) {
|
||||||
if (!error)
|
if (!error)
|
||||||
error = do_sync(num_qd, qda);
|
error = do_sync(num_qd, qda);
|
||||||
if (!error)
|
|
||||||
for (x = 0; x < num_qd; x++)
|
|
||||||
qda[x]->qd_sync_gen =
|
|
||||||
sdp->sd_quota_sync_gen;
|
|
||||||
|
|
||||||
for (x = 0; x < num_qd; x++)
|
for (x = 0; x < num_qd; x++)
|
||||||
qd_unlock(qda[x]);
|
qd_unlock(qda[x]);
|
||||||
@ -1423,7 +1446,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
|||||||
set_bit(QDF_CHANGE, &qd->qd_flags);
|
set_bit(QDF_CHANGE, &qd->qd_flags);
|
||||||
qd->qd_change = qc_change;
|
qd->qd_change = qc_change;
|
||||||
qd->qd_slot = slot;
|
qd->qd_slot = slot;
|
||||||
qd->qd_slot_count = 1;
|
qd->qd_slot_ref = 1;
|
||||||
|
|
||||||
spin_lock(&qd_lock);
|
spin_lock(&qd_lock);
|
||||||
BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
|
BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
|
||||||
@ -1455,36 +1478,35 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
|||||||
|
|
||||||
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
|
||||||
{
|
{
|
||||||
struct list_head *head = &sdp->sd_quota_list;
|
|
||||||
struct gfs2_quota_data *qd;
|
struct gfs2_quota_data *qd;
|
||||||
|
LIST_HEAD(dispose);
|
||||||
|
int count;
|
||||||
|
|
||||||
|
BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
|
||||||
|
|
||||||
spin_lock(&qd_lock);
|
spin_lock(&qd_lock);
|
||||||
while (!list_empty(head)) {
|
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
|
||||||
qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
|
spin_lock(&qd->qd_lockref.lock);
|
||||||
|
if (qd->qd_lockref.count != 0) {
|
||||||
|
spin_unlock(&qd->qd_lockref.lock);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
lockref_mark_dead(&qd->qd_lockref);
|
||||||
|
spin_unlock(&qd->qd_lockref.lock);
|
||||||
|
|
||||||
list_del(&qd->qd_list);
|
|
||||||
|
|
||||||
/* Also remove if this qd exists in the reclaim list */
|
|
||||||
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
|
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
|
||||||
atomic_dec(&sdp->sd_quota_count);
|
list_add(&qd->qd_lru, &dispose);
|
||||||
spin_unlock(&qd_lock);
|
|
||||||
|
|
||||||
spin_lock_bucket(qd->qd_hash);
|
|
||||||
hlist_bl_del_rcu(&qd->qd_hlist);
|
|
||||||
spin_unlock_bucket(qd->qd_hash);
|
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_change);
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_slot_count);
|
|
||||||
gfs2_assert_warn(sdp, !qd->qd_bh_count);
|
|
||||||
|
|
||||||
gfs2_glock_put(qd->qd_gl);
|
|
||||||
call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
|
|
||||||
|
|
||||||
spin_lock(&qd_lock);
|
|
||||||
}
|
}
|
||||||
spin_unlock(&qd_lock);
|
spin_unlock(&qd_lock);
|
||||||
|
|
||||||
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
|
gfs2_qd_list_dispose(&dispose);
|
||||||
|
|
||||||
|
wait_event_timeout(sdp->sd_kill_wait,
|
||||||
|
(count = atomic_read(&sdp->sd_quota_count)) == 0,
|
||||||
|
HZ * 60);
|
||||||
|
|
||||||
|
if (count != 0)
|
||||||
|
fs_err(sdp, "%d left-over quota data objects\n", count);
|
||||||
|
|
||||||
kvfree(sdp->sd_quota_bitmap);
|
kvfree(sdp->sd_quota_bitmap);
|
||||||
sdp->sd_quota_bitmap = NULL;
|
sdp->sd_quota_bitmap = NULL;
|
||||||
@ -1536,12 +1558,11 @@ int gfs2_quotad(void *data)
|
|||||||
unsigned long statfs_timeo = 0;
|
unsigned long statfs_timeo = 0;
|
||||||
unsigned long quotad_timeo = 0;
|
unsigned long quotad_timeo = 0;
|
||||||
unsigned long t = 0;
|
unsigned long t = 0;
|
||||||
DEFINE_WAIT(wait);
|
|
||||||
|
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
|
|
||||||
if (gfs2_withdrawn(sdp))
|
if (gfs2_withdrawn(sdp))
|
||||||
goto bypass;
|
break;
|
||||||
|
|
||||||
/* Update the master statfs file */
|
/* Update the master statfs file */
|
||||||
if (sdp->sd_statfs_force_sync) {
|
if (sdp->sd_statfs_force_sync) {
|
||||||
int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
|
int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
|
||||||
@ -1559,15 +1580,16 @@ int gfs2_quotad(void *data)
|
|||||||
|
|
||||||
try_to_freeze();
|
try_to_freeze();
|
||||||
|
|
||||||
bypass:
|
|
||||||
t = min(quotad_timeo, statfs_timeo);
|
t = min(quotad_timeo, statfs_timeo);
|
||||||
|
|
||||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
|
||||||
if (!sdp->sd_statfs_force_sync)
|
sdp->sd_statfs_force_sync ||
|
||||||
t -= schedule_timeout(t);
|
gfs2_withdrawn(sdp) ||
|
||||||
else
|
kthread_should_stop(),
|
||||||
|
t);
|
||||||
|
|
||||||
|
if (sdp->sd_statfs_force_sync)
|
||||||
t = 0;
|
t = 0;
|
||||||
finish_wait(&sdp->sd_quota_wait, &wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1580,6 +1602,8 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
|
|||||||
memset(state, 0, sizeof(*state));
|
memset(state, 0, sizeof(*state));
|
||||||
|
|
||||||
switch (sdp->sd_args.ar_quota) {
|
switch (sdp->sd_args.ar_quota) {
|
||||||
|
case GFS2_QUOTA_QUIET:
|
||||||
|
fallthrough;
|
||||||
case GFS2_QUOTA_ON:
|
case GFS2_QUOTA_ON:
|
||||||
state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
|
state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
|
||||||
state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
|
state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
|
||||||
@ -1726,7 +1750,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
|
|||||||
goto out_release;
|
goto out_release;
|
||||||
|
|
||||||
/* Apply changes */
|
/* Apply changes */
|
||||||
error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
|
error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
|
||||||
if (!error)
|
if (!error)
|
||||||
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
|
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
#include "util.h"
|
#include "util.h"
|
||||||
#include "dir.h"
|
#include "dir.h"
|
||||||
|
|
||||||
struct workqueue_struct *gfs_recovery_wq;
|
struct workqueue_struct *gfs2_recovery_wq;
|
||||||
|
|
||||||
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
|
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
|
||||||
struct buffer_head **bh)
|
struct buffer_head **bh)
|
||||||
@ -570,7 +570,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/* we have JDF_RECOVERY, queue should always succeed */
|
/* we have JDF_RECOVERY, queue should always succeed */
|
||||||
rv = queue_work(gfs_recovery_wq, &jd->jd_work);
|
rv = queue_work(gfs2_recovery_wq, &jd->jd_work);
|
||||||
BUG_ON(!rv);
|
BUG_ON(!rv);
|
||||||
|
|
||||||
if (wait)
|
if (wait)
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
#include "incore.h"
|
#include "incore.h"
|
||||||
|
|
||||||
extern struct workqueue_struct *gfs_recovery_wq;
|
extern struct workqueue_struct *gfs2_recovery_wq;
|
||||||
|
|
||||||
static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
|
static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
|
||||||
{
|
{
|
||||||
|
@ -546,20 +546,10 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||||||
{
|
{
|
||||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||||
|
|
||||||
if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
|
if (!test_bit(SDF_KILL, &sdp->sd_flags))
|
||||||
gfs2_flush_delete_work(sdp);
|
gfs2_flush_delete_work(sdp);
|
||||||
|
|
||||||
if (!log_write_allowed && current == sdp->sd_quotad_process)
|
gfs2_destroy_threads(sdp);
|
||||||
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
|
|
||||||
else if (sdp->sd_quotad_process)
|
|
||||||
kthread_stop(sdp->sd_quotad_process);
|
|
||||||
sdp->sd_quotad_process = NULL;
|
|
||||||
|
|
||||||
if (!log_write_allowed && current == sdp->sd_logd_process)
|
|
||||||
fs_warn(sdp, "The logd daemon is withdrawing.\n");
|
|
||||||
else if (sdp->sd_logd_process)
|
|
||||||
kthread_stop(sdp->sd_logd_process);
|
|
||||||
sdp->sd_logd_process = NULL;
|
|
||||||
|
|
||||||
if (log_write_allowed) {
|
if (log_write_allowed) {
|
||||||
gfs2_quota_sync(sdp->sd_vfs, 0);
|
gfs2_quota_sync(sdp->sd_vfs, 0);
|
||||||
@ -580,15 +570,8 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||||||
gfs2_log_is_empty(sdp),
|
gfs2_log_is_empty(sdp),
|
||||||
HZ * 5);
|
HZ * 5);
|
||||||
gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
|
gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
|
||||||
} else {
|
|
||||||
wait_event_timeout(sdp->sd_log_waitq,
|
|
||||||
gfs2_log_is_empty(sdp),
|
|
||||||
HZ * 5);
|
|
||||||
}
|
}
|
||||||
gfs2_quota_cleanup(sdp);
|
gfs2_quota_cleanup(sdp);
|
||||||
|
|
||||||
if (!log_write_allowed)
|
|
||||||
sdp->sd_vfs->s_flags |= SB_RDONLY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -622,6 +605,10 @@ static void gfs2_put_super(struct super_block *sb)
|
|||||||
if (!sb_rdonly(sb)) {
|
if (!sb_rdonly(sb)) {
|
||||||
gfs2_make_fs_ro(sdp);
|
gfs2_make_fs_ro(sdp);
|
||||||
}
|
}
|
||||||
|
if (gfs2_withdrawn(sdp)) {
|
||||||
|
gfs2_destroy_threads(sdp);
|
||||||
|
gfs2_quota_cleanup(sdp);
|
||||||
|
}
|
||||||
WARN_ON(gfs2_withdrawing(sdp));
|
WARN_ON(gfs2_withdrawing(sdp));
|
||||||
|
|
||||||
/* At this point, we're through modifying the disk */
|
/* At this point, we're through modifying the disk */
|
||||||
@ -1134,6 +1121,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
|
|||||||
case GFS2_QUOTA_ON:
|
case GFS2_QUOTA_ON:
|
||||||
state = "on";
|
state = "on";
|
||||||
break;
|
break;
|
||||||
|
case GFS2_QUOTA_QUIET:
|
||||||
|
state = "quiet";
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
state = "unknown";
|
state = "unknown";
|
||||||
break;
|
break;
|
||||||
|
@ -36,6 +36,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
|
|||||||
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
|
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
|
||||||
extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
|
extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
|
||||||
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
|
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
|
||||||
|
extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
|
||||||
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
|
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
|
||||||
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
||||||
s64 dinodes);
|
s64 dinodes);
|
||||||
|
@ -98,7 +98,10 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||||||
"sd_log_flush_head: %d\n"
|
"sd_log_flush_head: %d\n"
|
||||||
"sd_log_flush_tail: %d\n"
|
"sd_log_flush_tail: %d\n"
|
||||||
"sd_log_blks_reserved: %d\n"
|
"sd_log_blks_reserved: %d\n"
|
||||||
"sd_log_revokes_available: %d\n",
|
"sd_log_revokes_available: %d\n"
|
||||||
|
"sd_log_pinned: %d\n"
|
||||||
|
"sd_log_thresh1: %d\n"
|
||||||
|
"sd_log_thresh2: %d\n",
|
||||||
test_bit(SDF_JOURNAL_CHECKED, &f),
|
test_bit(SDF_JOURNAL_CHECKED, &f),
|
||||||
test_bit(SDF_JOURNAL_LIVE, &f),
|
test_bit(SDF_JOURNAL_LIVE, &f),
|
||||||
(sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
|
(sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
|
||||||
@ -118,7 +121,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||||||
test_bit(SDF_WITHDRAW_IN_PROG, &f),
|
test_bit(SDF_WITHDRAW_IN_PROG, &f),
|
||||||
test_bit(SDF_REMOTE_WITHDRAW, &f),
|
test_bit(SDF_REMOTE_WITHDRAW, &f),
|
||||||
test_bit(SDF_WITHDRAW_RECOVERY, &f),
|
test_bit(SDF_WITHDRAW_RECOVERY, &f),
|
||||||
test_bit(SDF_DEACTIVATING, &f),
|
test_bit(SDF_KILL, &f),
|
||||||
sdp->sd_log_error,
|
sdp->sd_log_error,
|
||||||
rwsem_is_locked(&sdp->sd_log_flush_lock),
|
rwsem_is_locked(&sdp->sd_log_flush_lock),
|
||||||
sdp->sd_log_num_revoke,
|
sdp->sd_log_num_revoke,
|
||||||
@ -128,7 +131,10 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
|||||||
sdp->sd_log_flush_head,
|
sdp->sd_log_flush_head,
|
||||||
sdp->sd_log_flush_tail,
|
sdp->sd_log_flush_tail,
|
||||||
sdp->sd_log_blks_reserved,
|
sdp->sd_log_blks_reserved,
|
||||||
atomic_read(&sdp->sd_log_revokes_available));
|
atomic_read(&sdp->sd_log_revokes_available),
|
||||||
|
atomic_read(&sdp->sd_log_pinned),
|
||||||
|
atomic_read(&sdp->sd_log_thresh1),
|
||||||
|
atomic_read(&sdp->sd_log_thresh2));
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/completion.h>
|
#include <linux/completion.h>
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
|
#include <linux/kthread.h>
|
||||||
#include <linux/crc32.h>
|
#include <linux/crc32.h>
|
||||||
#include <linux/gfs2_ondisk.h>
|
#include <linux/gfs2_ondisk.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
@ -150,7 +151,14 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||||||
if (!sb_rdonly(sdp->sd_vfs)) {
|
if (!sb_rdonly(sdp->sd_vfs)) {
|
||||||
bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
|
bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
|
||||||
|
|
||||||
gfs2_make_fs_ro(sdp);
|
wake_up(&sdp->sd_logd_waitq);
|
||||||
|
wake_up(&sdp->sd_quota_wait);
|
||||||
|
|
||||||
|
wait_event_timeout(sdp->sd_log_waitq,
|
||||||
|
gfs2_log_is_empty(sdp),
|
||||||
|
HZ * 5);
|
||||||
|
|
||||||
|
sdp->sd_vfs->s_flags |= SB_RDONLY;
|
||||||
|
|
||||||
if (locked)
|
if (locked)
|
||||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||||
@ -315,19 +323,19 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
|
|||||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||||
const struct lm_lockops *lm = ls->ls_ops;
|
const struct lm_lockops *lm = ls->ls_ops;
|
||||||
|
|
||||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW &&
|
|
||||||
test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
|
|
||||||
if (!test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags))
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG,
|
|
||||||
TASK_UNINTERRUPTIBLE);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
|
|
||||||
|
|
||||||
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
|
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
|
||||||
|
unsigned long old = READ_ONCE(sdp->sd_flags), new;
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (old & BIT(SDF_WITHDRAWN)) {
|
||||||
|
wait_on_bit(&sdp->sd_flags,
|
||||||
|
SDF_WITHDRAW_IN_PROG,
|
||||||
|
TASK_UNINTERRUPTIBLE);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG);
|
||||||
|
} while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new)));
|
||||||
|
|
||||||
fs_err(sdp, "about to withdraw this file system\n");
|
fs_err(sdp, "about to withdraw this file system\n");
|
||||||
BUG_ON(sdp->sd_args.ar_debug);
|
BUG_ON(sdp->sd_args.ar_debug);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user