mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
gfs2 fixes
- Move the freeze/thaw logic from glock callback context to process / worker thread context to prevent deadlocks. - Fix a quota reference couting bug in do_qc(). - Carry on deallocating inodes even when gfs2_rindex_update() fails. - Retry filesystem-internal reads when they are interruped by a signal. - Eliminate kmap_atomic() in favor of kmap_local_page() / memcpy_{from,to}_page(). - Get rid of noop_direct_IO. - And a few more minor fixes and cleanups. -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmSkEWIUHGFncnVlbmJh QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTqTSQ/+OKFnr1O1pVFP2k4CA5bGuJX4sfOt TT045OAfy7VuE79q1Xi6HZ880kdP7fg4XUbcrwm45fEt5Q5kbbe6pjP34fCGGVSE vLmb5BG/msb6fDLRN17S6uzyqKwFv9tGofTAtCtWTs7gmtezpBByHNHAu6duG89c 5LOhUbz3BhIOPrHUs6RB3h/OpmofVNwWCKQmfCEzYsHMzoWz1XuZJMWRb5Ho6BII mOF0tWlEfr5gdavyWZ9UkuHqe3HI/1PfUNFVAD9S/V3kp2XPnc+HM3yP+S8df4p8 HT5VqVjH3JQL7sf6CnUXo9LP1veB+hHuvAaOyrHIKqHbIHwgxlWIIYcEYWKEGG8p 1CMjm6bI/hLQuFBUrpD1z3ppLavPZdl16Z3kCAVx8Ixl3livqMuiiZGBRdtBGdBr RT66+SX1GurLp1EcWncqEXdJ5jgYKqVeArZKdh3thXSVaO+b8yq3IeuDRHseLnLA egyzO3yLocvze3YFiZI4Y0V4ako9NO+2GNPd5+O9Bh9L7RepjiMCloDNpfDPQsTR fJdW4t9vMts2eCzRASsXBUdUUV0k1Qvxdm+9BbDHFW9YvH7BQzEoi6qTZFsamdGT NQqKdhrfiKsSEA8HFCgePYPzOGMAyH7fiaQUaXUGwkZy6AVmK+piKlJn+csNKbHQ 3dUo4upAzoeMwsA= =GKxj -----END PGP SIGNATURE----- Merge tag 'gfs2-v6.4-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Andreas Gruenbacher: - Move the freeze/thaw logic from glock callback context to process / worker thread context to prevent deadlocks - Fix a quota reference couting bug in do_qc() - Carry on deallocating inodes even when gfs2_rindex_update() fails - Retry filesystem-internal reads when they are interruped by a signal - Eliminate kmap_atomic() in favor of kmap_local_page() / memcpy_{from,to}_page() - Get rid of noop_direct_IO - And a few more minor fixes and cleanups * tag 'gfs2-v6.4-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (23 commits) gfs2: Add quota_change type gfs2: Use memcpy_{from,to}_page where appropriate gfs2: Convert remaining kmap_atomic calls to kmap_local_page gfs2: Replace deprecated kmap_atomic with kmap_local_page gfs: Get rid of unnucessary locking in inode_go_dump gfs2: gfs2_freeze_lock_shared cleanup gfs2: Replace sd_freeze_state with SDF_FROZEN flag gfs2: Rework freeze / thaw logic gfs2: Rename SDF_{FS_FROZEN => FREEZE_INITIATOR} gfs2: Reconfiguring frozen filesystem already rejected gfs2: Rename gfs2_freeze_lock{ => _shared } gfs2: Rename the {freeze,thaw}_super callbacks gfs2: Rename remaining "transaction" glock references gfs2: retry interrupted internal reads gfs2: Fix possible data races in gfs2_show_options() gfs2: Fix duplicate should_fault_in_pages() call gfs2: set FMODE_CAN_ODIRECT instead of a dummy direct_IO method gfs2: Don't remember delete unless it's successful gfs2: Update rl_unlinked before releasing rgrp lock gfs2: Fix gfs2_qa_get imbalance in gfs2_quota_hold ...
This commit is contained in:
commit
94c76955e8
@ -432,10 +432,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
kaddr = kmap_local_page(page);
|
||||
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
|
||||
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
flush_dcache_page(page);
|
||||
brelse(dibh);
|
||||
SetPageUptodate(page);
|
||||
@ -489,18 +489,18 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
|
||||
unsigned copied = 0;
|
||||
unsigned amt;
|
||||
struct page *page;
|
||||
void *p;
|
||||
|
||||
do {
|
||||
page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
if (PTR_ERR(page) == -EINTR)
|
||||
continue;
|
||||
return PTR_ERR(page);
|
||||
}
|
||||
amt = size - copied;
|
||||
if (offset + size > PAGE_SIZE)
|
||||
amt = PAGE_SIZE - offset;
|
||||
page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
p = kmap_atomic(page);
|
||||
memcpy(buf + copied, p + offset, amt);
|
||||
kunmap_atomic(p);
|
||||
memcpy_from_page(buf + copied, page, offset, amt);
|
||||
put_page(page);
|
||||
copied += amt;
|
||||
index++;
|
||||
@ -751,7 +751,6 @@ static const struct address_space_operations gfs2_aops = {
|
||||
.release_folio = iomap_release_folio,
|
||||
.invalidate_folio = iomap_invalidate_folio,
|
||||
.bmap = gfs2_bmap,
|
||||
.direct_IO = noop_direct_IO,
|
||||
.migrate_folio = filemap_migrate_folio,
|
||||
.is_partially_uptodate = iomap_is_partially_uptodate,
|
||||
.error_remove_page = generic_error_remove_page,
|
||||
|
@ -1729,8 +1729,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
|
||||
|
||||
if (offset >= maxsize) {
|
||||
/*
|
||||
* The starting point lies beyond the allocated meta-data;
|
||||
* there are no blocks do deallocate.
|
||||
* The starting point lies beyond the allocated metadata;
|
||||
* there are no blocks to deallocate.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
@ -630,6 +630,9 @@ int gfs2_open_common(struct inode *inode, struct file *file)
|
||||
ret = generic_file_open(inode, file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!gfs2_is_jdata(GFS2_I(inode)))
|
||||
file->f_mode |= FMODE_CAN_ODIRECT;
|
||||
}
|
||||
|
||||
fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
|
||||
@ -1030,8 +1033,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
|
||||
}
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
|
||||
retry:
|
||||
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
|
||||
retry:
|
||||
window_size -= fault_in_iov_iter_readable(from, window_size);
|
||||
if (!window_size) {
|
||||
ret = -EFAULT;
|
||||
|
@ -145,8 +145,8 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
||||
*
|
||||
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
|
||||
* when we're withdrawn. For example, to maintain metadata integrity, we should
|
||||
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
|
||||
* iopen or the transaction glocks may be safely used because none of their
|
||||
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
|
||||
* the iopen or freeze glock may be safely used because none of their
|
||||
* metadata goes through the journal. So in general, we should disallow all
|
||||
* glocks that are journaled, and allow all the others. One exception is:
|
||||
* we need to allow our active journal to be promoted and demoted so others
|
||||
|
@ -236,7 +236,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
|
||||
truncate_inode_pages_range(mapping, start, end);
|
||||
}
|
||||
|
||||
static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
|
||||
static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
|
||||
const char *fs_id_buf)
|
||||
{
|
||||
struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||
@ -536,72 +536,53 @@ static int inode_go_held(struct gfs2_holder *gh)
|
||||
*
|
||||
*/
|
||||
|
||||
static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
|
||||
static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
|
||||
const char *fs_id_buf)
|
||||
{
|
||||
struct gfs2_inode *ip = gl->gl_object;
|
||||
struct inode *inode;
|
||||
unsigned long nrpages;
|
||||
const struct inode *inode = &ip->i_inode;
|
||||
|
||||
if (ip == NULL)
|
||||
return;
|
||||
|
||||
inode = &ip->i_inode;
|
||||
xa_lock_irq(&inode->i_data.i_pages);
|
||||
nrpages = inode->i_data.nrpages;
|
||||
xa_unlock_irq(&inode->i_data.i_pages);
|
||||
|
||||
gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
|
||||
"p:%lu\n", fs_id_buf,
|
||||
(unsigned long long)ip->i_no_formal_ino,
|
||||
(unsigned long long)ip->i_no_addr,
|
||||
IF2DT(ip->i_inode.i_mode), ip->i_flags,
|
||||
IF2DT(inode->i_mode), ip->i_flags,
|
||||
(unsigned int)ip->i_diskflags,
|
||||
(unsigned long long)i_size_read(inode), nrpages);
|
||||
(unsigned long long)i_size_read(inode),
|
||||
inode->i_data.nrpages);
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_go_sync - promote/demote the freeze glock
|
||||
* freeze_go_callback - A cluster node is requesting a freeze
|
||||
* @gl: the glock
|
||||
* @remote: true if this came from a different cluster node
|
||||
*/
|
||||
|
||||
static int freeze_go_sync(struct gfs2_glock *gl)
|
||||
static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
int error = 0;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
|
||||
if (!remote ||
|
||||
gl->gl_state != LM_ST_SHARED ||
|
||||
gl->gl_demote_state != LM_ST_UNLOCKED)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We need to check gl_state == LM_ST_SHARED here and not gl_req ==
|
||||
* LM_ST_EXCLUSIVE. That's because when any node does a freeze,
|
||||
* all the nodes should have the freeze glock in SH mode and they all
|
||||
* call do_xmote: One for EX and the others for UN. They ALL must
|
||||
* freeze locally, and they ALL must queue freeze work. The freeze_work
|
||||
* calls freeze_func, which tries to reacquire the freeze glock in SH,
|
||||
* effectively waiting for the thaw on the node who holds it in EX.
|
||||
* Once thawed, the work func acquires the freeze glock in
|
||||
* SH and everybody goes back to thawed.
|
||||
* Try to get an active super block reference to prevent racing with
|
||||
* unmount (see trylock_super()). But note that unmount isn't the only
|
||||
* place where a write lock on s_umount is taken, and we can fail here
|
||||
* because of things like remount as well.
|
||||
*/
|
||||
if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
|
||||
!test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
|
||||
error = freeze_super(sdp->sd_vfs);
|
||||
if (error) {
|
||||
fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
|
||||
error);
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
|
||||
return 0;
|
||||
}
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
}
|
||||
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
|
||||
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
|
||||
GFS2_LFC_FREEZE_GO_SYNC);
|
||||
else /* read-only mounts */
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
|
||||
if (down_read_trylock(&sb->s_umount)) {
|
||||
atomic_inc(&sb->s_active);
|
||||
up_read(&sb->s_umount);
|
||||
if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
|
||||
deactivate_super(sb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -761,9 +742,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
||||
};
|
||||
|
||||
const struct gfs2_glock_operations gfs2_freeze_glops = {
|
||||
.go_sync = freeze_go_sync,
|
||||
.go_xmote_bh = freeze_go_xmote_bh,
|
||||
.go_demote_ok = freeze_go_demote_ok,
|
||||
.go_callback = freeze_go_callback,
|
||||
.go_type = LM_TYPE_NONDISK,
|
||||
.go_flags = GLOF_NONDISK,
|
||||
};
|
||||
|
@ -221,7 +221,7 @@ struct gfs2_glock_operations {
|
||||
int (*go_demote_ok) (const struct gfs2_glock *gl);
|
||||
int (*go_instantiate) (struct gfs2_glock *gl);
|
||||
int (*go_held)(struct gfs2_holder *gh);
|
||||
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
|
||||
void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl,
|
||||
const char *fs_id_buf);
|
||||
void (*go_callback)(struct gfs2_glock *gl, bool remote);
|
||||
void (*go_free)(struct gfs2_glock *gl);
|
||||
@ -600,7 +600,7 @@ enum {
|
||||
SDF_RORECOVERY = 7, /* read only recovery */
|
||||
SDF_SKIP_DLM_UNLOCK = 8,
|
||||
SDF_FORCE_AIL_FLUSH = 9,
|
||||
SDF_FS_FROZEN = 10,
|
||||
SDF_FREEZE_INITIATOR = 10,
|
||||
SDF_WITHDRAWING = 11, /* Will withdraw eventually */
|
||||
SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
|
||||
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
|
||||
@ -608,12 +608,7 @@ enum {
|
||||
withdrawing */
|
||||
SDF_DEACTIVATING = 15,
|
||||
SDF_EVICTING = 16,
|
||||
};
|
||||
|
||||
enum gfs2_freeze_state {
|
||||
SFS_UNFROZEN = 0,
|
||||
SFS_STARTING_FREEZE = 1,
|
||||
SFS_FROZEN = 2,
|
||||
SDF_FROZEN = 17,
|
||||
};
|
||||
|
||||
#define GFS2_FSNAME_LEN 256
|
||||
@ -841,7 +836,6 @@ struct gfs2_sbd {
|
||||
|
||||
/* For quiescing the filesystem */
|
||||
struct gfs2_holder sd_freeze_gh;
|
||||
atomic_t sd_freeze_state;
|
||||
struct mutex sd_freeze_mutex;
|
||||
|
||||
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
|
||||
|
@ -296,10 +296,8 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
||||
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
|
||||
int error;
|
||||
|
||||
if (gl->gl_lksb.sb_lkid == 0) {
|
||||
gfs2_glock_free(gl);
|
||||
return;
|
||||
}
|
||||
if (gl->gl_lksb.sb_lkid == 0)
|
||||
goto out_free;
|
||||
|
||||
clear_bit(GLF_BLOCKING, &gl->gl_flags);
|
||||
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
|
||||
@ -307,17 +305,13 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
||||
gfs2_update_request_times(gl);
|
||||
|
||||
/* don't want to call dlm if we've unmounted the lock protocol */
|
||||
if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
|
||||
gfs2_glock_free(gl);
|
||||
return;
|
||||
}
|
||||
if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
|
||||
goto out_free;
|
||||
/* don't want to skip dlm_unlock writing the lvb when lock has one */
|
||||
|
||||
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
|
||||
!gl->gl_lksb.sb_lvbptr) {
|
||||
gfs2_glock_free(gl);
|
||||
return;
|
||||
}
|
||||
!gl->gl_lksb.sb_lvbptr)
|
||||
goto out_free;
|
||||
|
||||
again:
|
||||
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
|
||||
@ -331,8 +325,11 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
|
||||
fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
|
||||
gl->gl_name.ln_type,
|
||||
(unsigned long long)gl->gl_name.ln_number, error);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
|
||||
out_free:
|
||||
gfs2_glock_free(gl);
|
||||
}
|
||||
|
||||
static void gdlm_cancel(struct gfs2_glock *gl)
|
||||
|
@ -914,9 +914,8 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
|
||||
{
|
||||
blk_opf_t op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
|
||||
gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
|
||||
gfs2_assert_withdraw(sdp, !test_bit(SDF_FROZEN, &sdp->sd_flags));
|
||||
|
||||
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
|
||||
gfs2_ordered_wait(sdp);
|
||||
@ -1036,7 +1035,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
||||
{
|
||||
struct gfs2_trans *tr = NULL;
|
||||
unsigned int reserved_blocks = 0, used_blocks = 0;
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
|
||||
unsigned int first_log_head;
|
||||
unsigned int reserved_revokes = 0;
|
||||
|
||||
@ -1067,7 +1066,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
||||
if (tr) {
|
||||
sdp->sd_log_tr = NULL;
|
||||
tr->tr_first = first_log_head;
|
||||
if (unlikely (state == SFS_FROZEN)) {
|
||||
if (unlikely(frozen)) {
|
||||
if (gfs2_assert_withdraw_delayed(sdp,
|
||||
!tr->tr_num_buf_new && !tr->tr_num_databuf_new))
|
||||
goto out_withdraw;
|
||||
@ -1092,7 +1091,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
||||
if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
if (unlikely(state == SFS_FROZEN))
|
||||
if (unlikely(frozen))
|
||||
if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
|
||||
goto out_withdraw;
|
||||
|
||||
@ -1136,8 +1135,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
|
||||
if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
|
||||
GFS2_LOG_HEAD_FLUSH_FREEZE))
|
||||
gfs2_log_shutdown(sdp);
|
||||
if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
|
||||
}
|
||||
|
||||
out_end:
|
||||
|
@ -427,10 +427,11 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
|
||||
{
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
struct gfs2_log_header_host lh;
|
||||
void *kaddr = kmap_atomic(page);
|
||||
void *kaddr;
|
||||
unsigned int offset;
|
||||
bool ret = false;
|
||||
|
||||
kaddr = kmap_local_page(page);
|
||||
for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
|
||||
if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
|
||||
if (lh.lh_sequence >= head->lh_sequence)
|
||||
@ -441,7 +442,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
|
||||
}
|
||||
}
|
||||
}
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -626,11 +627,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
|
||||
__be32 *ptr;
|
||||
|
||||
clear_buffer_escaped(bh);
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
kaddr = kmap_local_page(bh->b_page);
|
||||
ptr = kaddr + bh_offset(bh);
|
||||
if (*ptr == cpu_to_be32(GFS2_MAGIC))
|
||||
set_buffer_escaped(bh);
|
||||
kunmap_atomic(kaddr);
|
||||
kunmap_local(kaddr);
|
||||
}
|
||||
|
||||
static int blocknr_cmp(void *priv, const struct list_head *a,
|
||||
@ -696,14 +697,12 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
|
||||
lock_buffer(bd2->bd_bh);
|
||||
|
||||
if (buffer_escaped(bd2->bd_bh)) {
|
||||
void *kaddr;
|
||||
void *p;
|
||||
|
||||
page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
|
||||
ptr = page_address(page);
|
||||
kaddr = kmap_atomic(bd2->bd_bh->b_page);
|
||||
memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
|
||||
bd2->bd_bh->b_size);
|
||||
kunmap_atomic(kaddr);
|
||||
*(__be32 *)ptr = 0;
|
||||
p = page_address(page);
|
||||
memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
|
||||
*(__be32 *)p = 0;
|
||||
clear_buffer_escaped(bd2->bd_bh);
|
||||
unlock_buffer(bd2->bd_bh);
|
||||
brelse(bd2->bd_bh);
|
||||
|
@ -135,7 +135,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
|
||||
init_rwsem(&sdp->sd_log_flush_lock);
|
||||
atomic_set(&sdp->sd_log_in_flight, 0);
|
||||
init_waitqueue_head(&sdp->sd_log_flush_wait);
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
|
||||
mutex_init(&sdp->sd_freeze_mutex);
|
||||
|
||||
return sdp;
|
||||
@ -434,7 +433,7 @@ static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
|
||||
error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
|
||||
CREATE, &sdp->sd_freeze_gl);
|
||||
if (error) {
|
||||
fs_err(sdp, "can't create transaction glock: %d\n", error);
|
||||
fs_err(sdp, "can't create freeze glock: %d\n", error);
|
||||
goto fail_rename;
|
||||
}
|
||||
|
||||
@ -1140,7 +1139,6 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
int silent = fc->sb_flags & SB_SILENT;
|
||||
struct gfs2_sbd *sdp;
|
||||
struct gfs2_holder mount_gh;
|
||||
struct gfs2_holder freeze_gh;
|
||||
int error;
|
||||
|
||||
sdp = init_sbd(sb);
|
||||
@ -1269,15 +1267,15 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
}
|
||||
}
|
||||
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
error = gfs2_freeze_lock_shared(sdp);
|
||||
if (error)
|
||||
goto fail_per_node;
|
||||
|
||||
if (!sb_rdonly(sb))
|
||||
error = gfs2_make_fs_rw(sdp);
|
||||
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
if (error) {
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
if (sdp->sd_quotad_process)
|
||||
kthread_stop(sdp->sd_quotad_process);
|
||||
sdp->sd_quotad_process = NULL;
|
||||
@ -1590,12 +1588,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
|
||||
fc->sb_flags |= SB_RDONLY;
|
||||
|
||||
if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
|
||||
struct gfs2_holder freeze_gh;
|
||||
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
if (error)
|
||||
return -EINVAL;
|
||||
|
||||
if (fc->sb_flags & SB_RDONLY) {
|
||||
gfs2_make_fs_ro(sdp);
|
||||
} else {
|
||||
@ -1603,7 +1595,6 @@ static int gfs2_reconfigure(struct fs_context *fc)
|
||||
if (error)
|
||||
errorfc(fc, "unable to remount read-write");
|
||||
}
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
sdp->sd_args = *newargs;
|
||||
|
||||
|
@ -75,6 +75,9 @@
|
||||
#define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
|
||||
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
|
||||
|
||||
#define QC_CHANGE 0
|
||||
#define QC_SYNC 1
|
||||
|
||||
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
|
||||
/* -> sd_bitmap_lock */
|
||||
static DEFINE_SPINLOCK(qd_lock);
|
||||
@ -470,7 +473,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
||||
spin_unlock(&qd_lock);
|
||||
|
||||
if (qd) {
|
||||
gfs2_assert_warn(sdp, qd->qd_change_sync);
|
||||
error = bh_get(qd);
|
||||
if (error) {
|
||||
clear_bit(QDF_LOCKED, &qd->qd_flags);
|
||||
@ -591,6 +593,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
||||
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
|
||||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
|
||||
error = -EIO;
|
||||
gfs2_qa_put(ip);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -661,7 +664,7 @@ static int sort_qd(const void *a, const void *b)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void do_qc(struct gfs2_quota_data *qd, s64 change)
|
||||
static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
|
||||
{
|
||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
|
||||
@ -686,16 +689,18 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
|
||||
qd->qd_change = x;
|
||||
spin_unlock(&qd_lock);
|
||||
|
||||
if (!x) {
|
||||
if (qc_type == QC_CHANGE) {
|
||||
if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
|
||||
qd_hold(qd);
|
||||
slot_hold(qd);
|
||||
}
|
||||
} else {
|
||||
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
|
||||
clear_bit(QDF_CHANGE, &qd->qd_flags);
|
||||
qc->qc_flags = 0;
|
||||
qc->qc_id = 0;
|
||||
slot_put(qd);
|
||||
qd_put(qd);
|
||||
} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
|
||||
qd_hold(qd);
|
||||
slot_hold(qd);
|
||||
}
|
||||
|
||||
if (change < 0) /* Reset quiet flag if we freed some blocks */
|
||||
@ -711,7 +716,6 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
struct buffer_head *bh;
|
||||
void *kaddr;
|
||||
u64 blk;
|
||||
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
|
||||
unsigned to_write = bytes, pg_off = off;
|
||||
@ -763,10 +767,8 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
|
||||
}
|
||||
|
||||
/* Write to the page, now that we have setup the buffer(s) */
|
||||
kaddr = kmap_atomic(page);
|
||||
memcpy(kaddr + off, buf, bytes);
|
||||
memcpy_to_page(page, off, buf, bytes);
|
||||
flush_dcache_page(page);
|
||||
kunmap_atomic(kaddr);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
|
||||
@ -955,7 +957,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
|
||||
if (error)
|
||||
goto out_end_trans;
|
||||
|
||||
do_qc(qd, -qd->qd_change_sync);
|
||||
do_qc(qd, -qd->qd_change_sync, QC_SYNC);
|
||||
set_bit(QDF_REFRESH, &qd->qd_flags);
|
||||
}
|
||||
|
||||
@ -1281,7 +1283,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
|
||||
|
||||
if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
|
||||
qid_eq(qd->qd_id, make_kqid_gid(gid))) {
|
||||
do_qc(qd, change);
|
||||
do_qc(qd, change, QC_CHANGE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
|
||||
struct gfs2_log_header_host head;
|
||||
struct gfs2_holder j_gh, ji_gh, thaw_gh;
|
||||
struct gfs2_holder j_gh, ji_gh;
|
||||
ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
|
||||
int ro = 0;
|
||||
unsigned int pass;
|
||||
@ -420,10 +420,10 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
if (sdp->sd_args.ar_spectator)
|
||||
goto fail;
|
||||
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
|
||||
fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
|
||||
fs_info(sdp, "jid=%u: Trying to acquire journal glock...\n",
|
||||
jd->jd_jid);
|
||||
jlocked = 1;
|
||||
/* Acquire the journal lock so we can do recovery */
|
||||
/* Acquire the journal glock so we can do recovery */
|
||||
|
||||
error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
|
||||
LM_ST_EXCLUSIVE,
|
||||
@ -465,14 +465,14 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
ktime_ms_delta(t_jhd, t_jlck));
|
||||
|
||||
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
|
||||
fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
|
||||
jd->jd_jid);
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
|
||||
/* Acquire a shared hold on the freeze lock */
|
||||
|
||||
error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
|
||||
if (error)
|
||||
if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
fs_warn(sdp, "jid=%u: Can't replay: filesystem "
|
||||
"is frozen\n", jd->jd_jid);
|
||||
goto fail_gunlock_ji;
|
||||
}
|
||||
|
||||
if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
|
||||
ro = 1;
|
||||
@ -496,7 +496,7 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
fs_warn(sdp, "jid=%u: Can't replay: read-only block "
|
||||
"device\n", jd->jd_jid);
|
||||
error = -EROFS;
|
||||
goto fail_gunlock_thaw;
|
||||
goto fail_gunlock_nofreeze;
|
||||
}
|
||||
|
||||
t_tlck = ktime_get();
|
||||
@ -514,7 +514,7 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
lops_after_scan(jd, error, pass);
|
||||
if (error) {
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
goto fail_gunlock_thaw;
|
||||
goto fail_gunlock_nofreeze;
|
||||
}
|
||||
}
|
||||
|
||||
@ -522,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
clean_journal(jd, &head);
|
||||
up_read(&sdp->sd_log_flush_lock);
|
||||
|
||||
gfs2_freeze_unlock(&thaw_gh);
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
t_rep = ktime_get();
|
||||
fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
|
||||
"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
|
||||
@ -543,8 +543,8 @@ void gfs2_recover_func(struct work_struct *work)
|
||||
fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
|
||||
goto done;
|
||||
|
||||
fail_gunlock_thaw:
|
||||
gfs2_freeze_unlock(&thaw_gh);
|
||||
fail_gunlock_nofreeze:
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
fail_gunlock_ji:
|
||||
if (jlocked) {
|
||||
gfs2_glock_dq_uninit(&ji_gh);
|
||||
|
@ -2584,8 +2584,8 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
|
||||
|
||||
gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
|
||||
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
|
||||
rgrp_unlock_local(rgd);
|
||||
be32_add_cpu(&rgd->rd_rgl->rl_unlinked, -1);
|
||||
rgrp_unlock_local(rgd);
|
||||
|
||||
gfs2_statfs_change(sdp, 0, +1, -1);
|
||||
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
|
||||
|
215
fs/gfs2/super.c
215
fs/gfs2/super.c
@ -332,7 +332,12 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
|
||||
struct lfcc *lfcc;
|
||||
LIST_HEAD(list);
|
||||
struct gfs2_log_header_host lh;
|
||||
int error;
|
||||
int error, error2;
|
||||
|
||||
/*
|
||||
* Grab all the journal glocks in SH mode. We are *probably* doing
|
||||
* that to prevent recovery.
|
||||
*/
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
|
||||
@ -349,11 +354,13 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
|
||||
list_add(&lfcc->list, &list);
|
||||
}
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
|
||||
LM_FLAG_NOEXP | GL_NOPID,
|
||||
&sdp->sd_freeze_gh);
|
||||
if (error)
|
||||
goto out;
|
||||
goto relock_shared;
|
||||
|
||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||
error = gfs2_jdesc_check(jd);
|
||||
@ -368,8 +375,14 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
|
||||
}
|
||||
}
|
||||
|
||||
if (error)
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
if (!error)
|
||||
goto out; /* success */
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
relock_shared:
|
||||
error2 = gfs2_freeze_lock_shared(sdp);
|
||||
gfs2_assert_withdraw(sdp, !error2);
|
||||
|
||||
out:
|
||||
while (!list_empty(&list)) {
|
||||
@ -463,7 +476,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||
* @flags: The type of dirty
|
||||
*
|
||||
* Unfortunately it can be called under any combination of inode
|
||||
* glock and transaction lock, so we have to check carefully.
|
||||
* glock and freeze glock, so we have to check carefully.
|
||||
*
|
||||
* At the moment this deals only with atime - it should be possible
|
||||
* to expand that role in future, once a review of the locking has
|
||||
@ -615,6 +628,8 @@ static void gfs2_put_super(struct super_block *sb)
|
||||
|
||||
/* Release stuff */
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
iput(sdp->sd_jindex);
|
||||
iput(sdp->sd_statfs_inode);
|
||||
iput(sdp->sd_rindex);
|
||||
@ -669,59 +684,109 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
|
||||
return sdp->sd_log_error;
|
||||
}
|
||||
|
||||
static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
int error;
|
||||
|
||||
error = freeze_super(sb);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
|
||||
GFS2_LFC_FREEZE_GO_SYNC);
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
thaw_super(sb);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gfs2_do_thaw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
int error;
|
||||
|
||||
error = gfs2_freeze_lock_shared(sdp);
|
||||
if (error)
|
||||
goto fail;
|
||||
error = thaw_super(sb);
|
||||
if (!error)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_freeze_func(struct work_struct *work)
|
||||
{
|
||||
int error;
|
||||
struct gfs2_holder freeze_gh;
|
||||
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
int error;
|
||||
|
||||
atomic_inc(&sb->s_active);
|
||||
error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
|
||||
if (error) {
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
} else {
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
|
||||
error = thaw_super(sb);
|
||||
if (error) {
|
||||
fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
|
||||
error);
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
}
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
error = -EBUSY;
|
||||
if (test_bit(SDF_FROZEN, &sdp->sd_flags))
|
||||
goto freeze_failed;
|
||||
|
||||
error = gfs2_freeze_locally(sdp);
|
||||
if (error)
|
||||
goto freeze_failed;
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
set_bit(SDF_FROZEN, &sdp->sd_flags);
|
||||
|
||||
error = gfs2_do_thaw(sdp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
clear_bit(SDF_FROZEN, &sdp->sd_flags);
|
||||
goto out;
|
||||
|
||||
freeze_failed:
|
||||
fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
|
||||
|
||||
out:
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
deactivate_super(sb);
|
||||
clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
|
||||
wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_freeze - prevent further writes to the filesystem
|
||||
* gfs2_freeze_super - prevent further writes to the filesystem
|
||||
* @sb: the VFS structure for the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static int gfs2_freeze(struct super_block *sb)
|
||||
static int gfs2_freeze_super(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
|
||||
error = -EBUSY;
|
||||
if (!mutex_trylock(&sdp->sd_freeze_mutex))
|
||||
return -EBUSY;
|
||||
error = -EBUSY;
|
||||
if (test_bit(SDF_FROZEN, &sdp->sd_flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
if (gfs2_withdrawn(sdp)) {
|
||||
error = -EINVAL;
|
||||
error = gfs2_freeze_locally(sdp);
|
||||
if (error) {
|
||||
fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
|
||||
error);
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = gfs2_lock_fs_check_clean(sdp);
|
||||
if (!error)
|
||||
break;
|
||||
break; /* success */
|
||||
|
||||
error = gfs2_do_thaw(sdp);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
if (error == -EBUSY)
|
||||
fs_err(sdp, "waiting for recovery before freeze\n");
|
||||
@ -735,32 +800,58 @@ static int gfs2_freeze(struct super_block *sb)
|
||||
fs_err(sdp, "retrying...\n");
|
||||
msleep(1000);
|
||||
}
|
||||
set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
|
||||
|
||||
out:
|
||||
if (!error) {
|
||||
set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
|
||||
set_bit(SDF_FROZEN, &sdp->sd_flags);
|
||||
}
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_unfreeze - reallow writes to the filesystem
|
||||
* gfs2_thaw_super - reallow writes to the filesystem
|
||||
* @sb: the VFS structure for the filesystem
|
||||
*
|
||||
*/
|
||||
|
||||
static int gfs2_unfreeze(struct super_block *sb)
|
||||
static int gfs2_thaw_super(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
|
||||
if (!mutex_trylock(&sdp->sd_freeze_mutex))
|
||||
return -EBUSY;
|
||||
error = -EINVAL;
|
||||
if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
|
||||
goto out;
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
error = gfs2_do_thaw(sdp);
|
||||
|
||||
if (!error) {
|
||||
clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
|
||||
clear_bit(SDF_FROZEN, &sdp->sd_flags);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return error;
|
||||
}
|
||||
|
||||
void gfs2_thaw_freeze_initiator(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
|
||||
mutex_lock(&sdp->sd_freeze_mutex);
|
||||
if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
|
||||
!gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
|
||||
goto out;
|
||||
|
||||
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
|
||||
|
||||
out:
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1004,7 +1095,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
|
||||
{
|
||||
struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
|
||||
struct gfs2_args *args = &sdp->sd_args;
|
||||
int val;
|
||||
unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
|
||||
|
||||
spin_lock(&sdp->sd_tune.gt_spin);
|
||||
logd_secs = sdp->sd_tune.gt_logd_secs;
|
||||
quota_quantum = sdp->sd_tune.gt_quota_quantum;
|
||||
statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
|
||||
statfs_slow = sdp->sd_tune.gt_statfs_slow;
|
||||
spin_unlock(&sdp->sd_tune.gt_spin);
|
||||
|
||||
if (is_ancestor(root, sdp->sd_master_dir))
|
||||
seq_puts(s, ",meta");
|
||||
@ -1059,17 +1157,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
|
||||
}
|
||||
if (args->ar_discard)
|
||||
seq_puts(s, ",discard");
|
||||
val = sdp->sd_tune.gt_logd_secs;
|
||||
if (val != 30)
|
||||
seq_printf(s, ",commit=%d", val);
|
||||
val = sdp->sd_tune.gt_statfs_quantum;
|
||||
if (val != 30)
|
||||
seq_printf(s, ",statfs_quantum=%d", val);
|
||||
else if (sdp->sd_tune.gt_statfs_slow)
|
||||
if (logd_secs != 30)
|
||||
seq_printf(s, ",commit=%d", logd_secs);
|
||||
if (statfs_quantum != 30)
|
||||
seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
|
||||
else if (statfs_slow)
|
||||
seq_puts(s, ",statfs_quantum=0");
|
||||
val = sdp->sd_tune.gt_quota_quantum;
|
||||
if (val != 60)
|
||||
seq_printf(s, ",quota_quantum=%d", val);
|
||||
if (quota_quantum != 60)
|
||||
seq_printf(s, ",quota_quantum=%d", quota_quantum);
|
||||
if (args->ar_statfs_percent)
|
||||
seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
|
||||
if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
|
||||
@ -1131,9 +1226,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
error = gfs2_rindex_update(sdp);
|
||||
if (error)
|
||||
return error;
|
||||
gfs2_rindex_update(sdp);
|
||||
|
||||
error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
|
||||
if (error)
|
||||
@ -1334,9 +1427,6 @@ static int evict_unlinked_inode(struct inode *inode)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ip->i_gl)
|
||||
gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
|
||||
|
||||
/*
|
||||
* As soon as we clear the bitmap for the dinode, gfs2_create_inode()
|
||||
* can get called to recreate it, or even gfs2_inode_lookup() if the
|
||||
@ -1350,6 +1440,9 @@ static int evict_unlinked_inode(struct inode *inode)
|
||||
*/
|
||||
|
||||
ret = gfs2_dinode_dealloc(ip);
|
||||
if (!ret && ip->i_gl)
|
||||
gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -1528,8 +1621,8 @@ const struct super_operations gfs2_super_ops = {
|
||||
.evict_inode = gfs2_evict_inode,
|
||||
.put_super = gfs2_put_super,
|
||||
.sync_fs = gfs2_sync_fs,
|
||||
.freeze_super = gfs2_freeze,
|
||||
.thaw_super = gfs2_unfreeze,
|
||||
.freeze_super = gfs2_freeze_super,
|
||||
.thaw_super = gfs2_thaw_super,
|
||||
.statfs = gfs2_statfs,
|
||||
.drop_inode = gfs2_drop_inode,
|
||||
.show_options = gfs2_show_options,
|
||||
|
@ -46,6 +46,7 @@ extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
|
||||
extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
|
||||
extern int gfs2_statfs_sync(struct super_block *sb, int type);
|
||||
extern void gfs2_freeze_func(struct work_struct *work);
|
||||
extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
|
||||
|
||||
extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
|
||||
extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
|
||||
|
@ -82,6 +82,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
||||
"RO Recovery: %d\n"
|
||||
"Skip DLM Unlock: %d\n"
|
||||
"Force AIL Flush: %d\n"
|
||||
"FS Freeze Initiator: %d\n"
|
||||
"FS Frozen: %d\n"
|
||||
"Withdrawing: %d\n"
|
||||
"Withdraw In Prog: %d\n"
|
||||
@ -111,7 +112,8 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
|
||||
test_bit(SDF_RORECOVERY, &f),
|
||||
test_bit(SDF_SKIP_DLM_UNLOCK, &f),
|
||||
test_bit(SDF_FORCE_AIL_FLUSH, &f),
|
||||
test_bit(SDF_FS_FROZEN, &f),
|
||||
test_bit(SDF_FREEZE_INITIATOR, &f),
|
||||
test_bit(SDF_FROZEN, &f),
|
||||
test_bit(SDF_WITHDRAWING, &f),
|
||||
test_bit(SDF_WITHDRAW_IN_PROG, &f),
|
||||
test_bit(SDF_REMOTE_WITHDRAW, &f),
|
||||
|
@ -233,7 +233,6 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
|
||||
struct gfs2_bufdata *bd;
|
||||
struct gfs2_meta_header *mh;
|
||||
struct gfs2_trans *tr = current->journal_info;
|
||||
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
|
||||
|
||||
lock_buffer(bh);
|
||||
if (buffer_pinned(bh)) {
|
||||
@ -267,7 +266,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
|
||||
(unsigned long long)bd->bd_bh->b_blocknr);
|
||||
BUG();
|
||||
}
|
||||
if (unlikely(state == SFS_FROZEN)) {
|
||||
if (unlikely(test_bit(SDF_FROZEN, &sdp->sd_flags))) {
|
||||
fs_info(sdp, "GFS2:adding buf while frozen\n");
|
||||
gfs2_assert_withdraw(sdp, 0);
|
||||
}
|
||||
|
@ -93,21 +93,18 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
}
|
||||
|
||||
/**
|
||||
* gfs2_freeze_lock - hold the freeze glock
|
||||
* gfs2_freeze_lock_shared - hold the freeze glock
|
||||
* @sdp: the superblock
|
||||
* @freeze_gh: pointer to the requested holder
|
||||
* @caller_flags: any additional flags needed by the caller
|
||||
*/
|
||||
int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
|
||||
int caller_flags)
|
||||
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
|
||||
{
|
||||
int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
|
||||
int error;
|
||||
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
|
||||
freeze_gh);
|
||||
if (error && error != GLR_TRYFAILED)
|
||||
fs_err(sdp, "can't lock the freeze lock: %d\n", error);
|
||||
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
|
||||
LM_FLAG_NOEXP | GL_EXACT,
|
||||
&sdp->sd_freeze_gh);
|
||||
if (error)
|
||||
fs_err(sdp, "can't lock the freeze glock: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -124,7 +121,6 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_glock *i_gl;
|
||||
u64 no_formal_ino;
|
||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
int ret = 0;
|
||||
int tries;
|
||||
|
||||
@ -152,24 +148,18 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
*/
|
||||
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
if (!sb_rdonly(sdp->sd_vfs)) {
|
||||
struct gfs2_holder freeze_gh;
|
||||
bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
|
||||
|
||||
gfs2_make_fs_ro(sdp);
|
||||
|
||||
if (locked)
|
||||
mutex_unlock(&sdp->sd_freeze_mutex);
|
||||
|
||||
gfs2_holder_mark_uninitialized(&freeze_gh);
|
||||
if (sdp->sd_freeze_gl &&
|
||||
!gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
|
||||
ret = gfs2_freeze_lock(sdp, &freeze_gh,
|
||||
log_write_allowed ? 0 : LM_FLAG_TRY);
|
||||
if (ret == GLR_TRYFAILED)
|
||||
ret = 0;
|
||||
}
|
||||
if (!ret)
|
||||
gfs2_make_fs_ro(sdp);
|
||||
/*
|
||||
* Dequeue any pending non-system glock holders that can no
|
||||
* longer be granted because the file system is withdrawn.
|
||||
*/
|
||||
gfs2_gl_dq_holders(sdp);
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
|
||||
@ -187,15 +177,8 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
}
|
||||
sdp->sd_jinode_gh.gh_flags |= GL_NOCACHE;
|
||||
gfs2_glock_dq(&sdp->sd_jinode_gh);
|
||||
if (test_bit(SDF_FS_FROZEN, &sdp->sd_flags)) {
|
||||
/* Make sure gfs2_unfreeze works if partially-frozen */
|
||||
flush_work(&sdp->sd_freeze_work);
|
||||
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
|
||||
thaw_super(sdp->sd_vfs);
|
||||
} else {
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
gfs2_thaw_freeze_initiator(sdp->sd_vfs);
|
||||
wait_on_bit(&i_gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* holder_uninit to force glock_put, to force dlm to let go
|
||||
|
@ -149,8 +149,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
|
||||
|
||||
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
|
||||
bool verbose);
|
||||
extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
|
||||
struct gfs2_holder *freeze_gh, int caller_flags);
|
||||
extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
|
||||
|
||||
#define gfs2_io_error(sdp) \
|
||||
|
Loading…
Reference in New Issue
Block a user