mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
gfs2 fixes
- Clean up the allocation of glocks that have an address space attached. - Quota locking fix and quota iomap conversion. - Fix the FITRIM error reporting. - Some list iterator cleanups. -----BEGIN PGP SIGNATURE----- iQJIBAABCAAyFiEEJZs3krPW0xkhLMTc1b+f6wMTZToFAmKNMukUHGFncnVlbmJh QHJlZGhhdC5jb20ACgkQ1b+f6wMTZTqKig/8DV+EjY6hLX7Olq5EL+TgPvUJ6fyy UQMRLL0wZerC9ArIv0yGTAtSpU6mDJTI4TbNS69LnQWfNYiP3WhZg1mrO4vEAP7o 1h1yE4MCIKV0oDRzENqshzhaKFcvCJvcoY8ToLWYRgwgtuCZ9GLlQ8v38j+yZmQk hD+Z5LWNaQ51L8cl2A+yDfw8EFlaQgcJfcU3GUPdo8WDj+BXzTM0gI6Awrrwtg16 y0NyEJiCRn5Cx4h/HBhqoMrOIfz+wWgtYov2OQowlVGzgDsNQ7TG7a4qKCOKEBww bZYVlet6zUt7csAweTcYXbCzL075dYOEiJ25tIpn2WXtDrwjTNNO3e78UXqy85Fb PPp4iESAuvqvQwByNazH4Nc0sJdDvKto8cDpvenoJtr6cEDBqSBAibFmojGde4e1 X7VKBSyREnkjZ+FOr24wYTD0ztkfs0M91Ax5Ha22DQOmtmt1XNIEynGQYT+VDqJL LDnvObANIZ6h3+bKXB/ddBhCKkV3POI1VojWNwywOD2yryKVVVFu51S+dbLaeyaz W4hz6pqTl1fnFwWv9WPpYS/0kShQrHndhK8eCBZW32+0MpNbDlLfnjSwE7PoO4gV ZzqjfJYDYeRW5rq0CSIvbXvNy5EH9Av4fjuZ2/JOQRDG5LHa5Ov+5Yk+XrWcQ8fN duiMl/gvm8DKMUU= =7hCN -----END PGP SIGNATURE----- Merge tag 'gfs2-v5.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2 Pull gfs2 updates from Andreas Gruenbacher: - Clean up the allocation of glocks that have an address space attached - Quota locking fix and quota iomap conversion - Fix the FITRIM error reporting - Some list iterator cleanups * tag 'gfs2-v5.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: gfs2: Convert function bh_get to use iomap gfs2: use i_lock spin_lock for inode qadata gfs2: Return more useful errors from gfs2_rgrp_send_discards() gfs2: Use container_of() for gfs2_glock(aspace) gfs2: Explain some direct I/O oddities gfs2: replace 'found' with dedicated list iterator variable
This commit is contained in:
commit
7208c9842c
@ -840,6 +840,7 @@ retry:
|
||||
pagefault_enable();
|
||||
if (ret <= 0 && ret != -EFAULT)
|
||||
goto out_unlock;
|
||||
/* No increment (+=) because iomap_dio_rw returns a cumulative value. */
|
||||
if (ret > 0)
|
||||
read = ret;
|
||||
|
||||
@ -854,6 +855,7 @@ out_unlock:
|
||||
gfs2_glock_dq(gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(gh);
|
||||
/* User space doesn't expect partial success. */
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return read;
|
||||
@ -906,6 +908,7 @@ retry:
|
||||
if (ret != -EFAULT)
|
||||
goto out_unlock;
|
||||
}
|
||||
/* No increment (+=) because iomap_dio_rw returns a cumulative value. */
|
||||
if (ret > 0)
|
||||
written = ret;
|
||||
|
||||
@ -920,6 +923,7 @@ out_unlock:
|
||||
gfs2_glock_dq(gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(gh);
|
||||
/* User space doesn't expect partial success. */
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return written;
|
||||
|
@ -127,9 +127,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
|
||||
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
|
||||
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE)
|
||||
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
|
||||
else
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
|
||||
struct gfs2_glock_aspace *gla =
|
||||
container_of(gl, struct gfs2_glock_aspace, glock);
|
||||
kmem_cache_free(gfs2_glock_aspace_cachep, gla);
|
||||
} else
|
||||
kmem_cache_free(gfs2_glock_cachep, gl);
|
||||
}
|
||||
|
||||
@ -1159,7 +1161,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
.ln_sbd = sdp };
|
||||
struct gfs2_glock *gl, *tmp;
|
||||
struct address_space *mapping;
|
||||
struct kmem_cache *cachep;
|
||||
int ret = 0;
|
||||
|
||||
gl = find_insert_glock(&name, NULL);
|
||||
@ -1170,20 +1171,24 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
if (!create)
|
||||
return -ENOENT;
|
||||
|
||||
if (glops->go_flags & GLOF_ASPACE)
|
||||
cachep = gfs2_glock_aspace_cachep;
|
||||
else
|
||||
cachep = gfs2_glock_cachep;
|
||||
gl = kmem_cache_alloc(cachep, GFP_NOFS);
|
||||
if (!gl)
|
||||
return -ENOMEM;
|
||||
|
||||
if (glops->go_flags & GLOF_ASPACE) {
|
||||
struct gfs2_glock_aspace *gla =
|
||||
kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
|
||||
if (!gla)
|
||||
return -ENOMEM;
|
||||
gl = &gla->glock;
|
||||
} else {
|
||||
gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
|
||||
if (!gl)
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
|
||||
gl->gl_ops = glops;
|
||||
|
||||
if (glops->go_flags & GLOF_LVB) {
|
||||
gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
|
||||
if (!gl->gl_lksb.sb_lvbptr) {
|
||||
kmem_cache_free(cachep, gl);
|
||||
gfs2_glock_dealloc(&gl->gl_rcu);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -1197,7 +1202,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
gl->gl_state = LM_ST_UNLOCKED;
|
||||
gl->gl_target = LM_ST_UNLOCKED;
|
||||
gl->gl_demote_state = LM_ST_EXCLUSIVE;
|
||||
gl->gl_ops = glops;
|
||||
gl->gl_dstamp = 0;
|
||||
preempt_disable();
|
||||
/* We use the global stats to estimate the initial per-glock stats */
|
||||
@ -1234,8 +1238,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
|
||||
*glp = tmp;
|
||||
|
||||
out_free:
|
||||
kfree(gl->gl_lksb.sb_lvbptr);
|
||||
kmem_cache_free(cachep, gl);
|
||||
gfs2_glock_dealloc(&gl->gl_rcu);
|
||||
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
|
||||
wake_up(&sdp->sd_glock_wait);
|
||||
|
||||
|
@ -138,6 +138,11 @@ struct lm_lockops {
|
||||
const match_table_t *lm_tokens;
|
||||
};
|
||||
|
||||
struct gfs2_glock_aspace {
|
||||
struct gfs2_glock glock;
|
||||
struct address_space mapping;
|
||||
};
|
||||
|
||||
extern struct workqueue_struct *gfs2_delete_workqueue;
|
||||
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||
{
|
||||
@ -179,8 +184,11 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
|
||||
|
||||
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
|
||||
{
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE)
|
||||
return (struct address_space *)(gl + 1);
|
||||
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
|
||||
struct gfs2_glock_aspace *gla =
|
||||
container_of(gl, struct gfs2_glock_aspace, glock);
|
||||
return &gla->mapping;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -62,11 +62,10 @@ static void gfs2_init_glock_once(void *foo)
|
||||
|
||||
static void gfs2_init_gl_aspace_once(void *foo)
|
||||
{
|
||||
struct gfs2_glock *gl = foo;
|
||||
struct address_space *mapping = (struct address_space *)(gl + 1);
|
||||
struct gfs2_glock_aspace *gla = foo;
|
||||
|
||||
gfs2_init_glock_once(gl);
|
||||
address_space_init_once(mapping);
|
||||
gfs2_init_glock_once(&gla->glock);
|
||||
address_space_init_once(&gla->mapping);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -104,8 +103,7 @@ static int __init init_gfs2_fs(void)
|
||||
goto fail_cachep1;
|
||||
|
||||
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
|
||||
sizeof(struct gfs2_glock) +
|
||||
sizeof(struct address_space),
|
||||
sizeof(struct gfs2_glock_aspace),
|
||||
0, 0, gfs2_init_gl_aspace_once);
|
||||
|
||||
if (!gfs2_glock_aspace_cachep)
|
||||
|
@ -40,9 +40,11 @@ extern const struct address_space_operations gfs2_rgrp_aops;
|
||||
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
if (mapping->a_ops == &gfs2_meta_aops)
|
||||
return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd;
|
||||
else if (mapping->a_ops == &gfs2_rgrp_aops)
|
||||
if (mapping->a_ops == &gfs2_meta_aops) {
|
||||
struct gfs2_glock_aspace *gla =
|
||||
container_of(mapping, struct gfs2_glock_aspace, mapping);
|
||||
return gla->glock.gl_name.ln_sbd;
|
||||
} else if (mapping->a_ops == &gfs2_rgrp_aops)
|
||||
return container_of(mapping, struct gfs2_sbd, sd_aspace);
|
||||
else
|
||||
return inode->i_sb->s_fs_info;
|
||||
|
@ -365,11 +365,12 @@ static void slot_put(struct gfs2_quota_data *qd)
|
||||
static int bh_get(struct gfs2_quota_data *qd)
|
||||
{
|
||||
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
|
||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
|
||||
struct inode *inode = sdp->sd_qc_inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
unsigned int block, offset;
|
||||
struct buffer_head *bh;
|
||||
struct iomap iomap = { };
|
||||
int error;
|
||||
struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
|
||||
|
||||
mutex_lock(&sdp->sd_quota_mutex);
|
||||
|
||||
@ -381,11 +382,17 @@ static int bh_get(struct gfs2_quota_data *qd)
|
||||
block = qd->qd_slot / sdp->sd_qc_per_block;
|
||||
offset = qd->qd_slot % sdp->sd_qc_per_block;
|
||||
|
||||
bh_map.b_size = BIT(ip->i_inode.i_blkbits);
|
||||
error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
|
||||
error = gfs2_iomap_get(inode,
|
||||
(loff_t)block << inode->i_blkbits,
|
||||
i_blocksize(inode), &iomap);
|
||||
if (error)
|
||||
goto fail;
|
||||
error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
|
||||
error = -ENOENT;
|
||||
if (iomap.type != IOMAP_MAPPED)
|
||||
goto fail;
|
||||
|
||||
error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
|
||||
DIO_WAIT, 0, &bh);
|
||||
if (error)
|
||||
goto fail;
|
||||
error = -EIO;
|
||||
@ -443,9 +450,8 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
|
||||
|
||||
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
||||
{
|
||||
struct gfs2_quota_data *qd = NULL;
|
||||
struct gfs2_quota_data *qd = NULL, *iter;
|
||||
int error;
|
||||
int found = 0;
|
||||
|
||||
*qdp = NULL;
|
||||
|
||||
@ -454,15 +460,13 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
|
||||
|
||||
spin_lock(&qd_lock);
|
||||
|
||||
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
|
||||
found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
|
||||
if (found)
|
||||
list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
|
||||
if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
|
||||
qd = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
qd = NULL;
|
||||
|
||||
spin_unlock(&qd_lock);
|
||||
|
||||
if (qd) {
|
||||
@ -531,34 +535,42 @@ static void qdsb_put(struct gfs2_quota_data *qd)
|
||||
*/
|
||||
int gfs2_qa_get(struct gfs2_inode *ip)
|
||||
{
|
||||
int error = 0;
|
||||
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
|
||||
struct inode *inode = &ip->i_inode;
|
||||
|
||||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
||||
return 0;
|
||||
|
||||
down_write(&ip->i_rw_mutex);
|
||||
spin_lock(&inode->i_lock);
|
||||
if (ip->i_qadata == NULL) {
|
||||
ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
|
||||
if (!ip->i_qadata) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
struct gfs2_qadata *tmp;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (ip->i_qadata == NULL)
|
||||
ip->i_qadata = tmp;
|
||||
else
|
||||
kmem_cache_free(gfs2_qadata_cachep, tmp);
|
||||
}
|
||||
ip->i_qadata->qa_ref++;
|
||||
out:
|
||||
up_write(&ip->i_rw_mutex);
|
||||
return error;
|
||||
spin_unlock(&inode->i_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void gfs2_qa_put(struct gfs2_inode *ip)
|
||||
{
|
||||
down_write(&ip->i_rw_mutex);
|
||||
struct inode *inode = &ip->i_inode;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
|
||||
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
|
||||
ip->i_qadata = NULL;
|
||||
}
|
||||
up_write(&ip->i_rw_mutex);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
||||
|
@ -55,17 +55,16 @@ int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
|
||||
int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
|
||||
{
|
||||
struct list_head *head = &jd->jd_revoke_list;
|
||||
struct gfs2_revoke_replay *rr;
|
||||
int found = 0;
|
||||
struct gfs2_revoke_replay *rr = NULL, *iter;
|
||||
|
||||
list_for_each_entry(rr, head, rr_list) {
|
||||
if (rr->rr_blkno == blkno) {
|
||||
found = 1;
|
||||
list_for_each_entry(iter, head, rr_list) {
|
||||
if (iter->rr_blkno == blkno) {
|
||||
rr = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
if (rr) {
|
||||
rr->rr_where = where;
|
||||
return 0;
|
||||
}
|
||||
@ -83,18 +82,17 @@ int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
|
||||
|
||||
int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
|
||||
{
|
||||
struct gfs2_revoke_replay *rr;
|
||||
struct gfs2_revoke_replay *rr = NULL, *iter;
|
||||
int wrap, a, b, revoke;
|
||||
int found = 0;
|
||||
|
||||
list_for_each_entry(rr, &jd->jd_revoke_list, rr_list) {
|
||||
if (rr->rr_blkno == blkno) {
|
||||
found = 1;
|
||||
list_for_each_entry(iter, &jd->jd_revoke_list, rr_list) {
|
||||
if (iter->rr_blkno == blkno) {
|
||||
rr = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
if (!rr)
|
||||
return 0;
|
||||
|
||||
wrap = (rr->rr_where < jd->jd_replay_tail);
|
||||
|
@ -1315,7 +1315,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
|
||||
u64 blk;
|
||||
sector_t start = 0;
|
||||
sector_t nr_blks = 0;
|
||||
int rv;
|
||||
int rv = -EIO;
|
||||
unsigned int x;
|
||||
u32 trimmed = 0;
|
||||
u8 diff;
|
||||
@ -1371,7 +1371,7 @@ fail:
|
||||
if (sdp->sd_args.ar_discard)
|
||||
fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
|
||||
sdp->sd_args.ar_discard = 0;
|
||||
return -EIO;
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user