mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
four cifs.ko client fixes
-----BEGIN PGP SIGNATURE----- iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmbTuKMACgkQiiy9cAdy T1GsHwwAnrVfxJ+ZiAH0wbfyFcgRLOAePeADcedn4QWQaPbmyjqqQbHfiwRwDa5X sICpnxCS+3MM9aahA7G4FOZNle/DexmFUODScESmYMfdqt4hMGzGbi9KhA4l7TY8 rcewHNpbAiPW3S0y/VtOBoXXskURMEL6+KCaBwE3u990jimJtCxPie4PQbfI/V6O 4Qjqc8qjryPo70ru4g72h/LfJdaDKxV/JYymDyhhu5/Gf7PPbv0QKZ9hhxhpc6Y4 81IcJ7S4JnLA8V9nrglrbV3ymvOCXNH0UQRHOa4Hc6H7MmrVj1aE5nu0/nfgVaOh iaaKfuuv6ItDQBWqUg6tHqM8DSPONJkbhuFkXqL/rOmrl7B0G5T1UBlt3ZqNZEy5 bEX1VCqCDQRsr1nUCxC7t5r03teXeNq59nWg/JWBBbLohWLp4Dw4eKW0xlKyo3VT Oxho3E8DnVXRu8MdTF/OeFJllp71KY3ujt2wm8uu+f5H45vz9mBN0UEUAx6hoh3c SsxufLuG =l4NV -----END PGP SIGNATURE----- Merge tag 'v6.11-rc5-smb-client-fixes' of git://git.samba.org/sfrench/cifs-2.6 Pull smb client fixes from Steve French: - copy_file_range fix - two read fixes including read past end of file rc fix and read retry crediting fix - falloc zero range fix * tag 'v6.11-rc5-smb-client-fixes' of git://git.samba.org/sfrench/cifs-2.6: cifs: Fix FALLOC_FL_ZERO_RANGE to preflush buffered part of target region cifs: Fix copy offload to flush destination region netfs, cifs: Fix handling of short DIO read cifs: Fix lack of credit renegotiation on read retry
This commit is contained in:
commit
6b9ffc4595
@ -306,6 +306,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
|
||||
break;
|
||||
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
|
||||
subreq->error = 0;
|
||||
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
|
||||
netfs_stat(&netfs_n_rh_download_instead);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
@ -313,6 +314,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
|
||||
netfs_reset_subreq_iter(rreq, subreq);
|
||||
netfs_read_from_server(rreq, subreq);
|
||||
} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
|
||||
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
|
||||
netfs_reset_subreq_iter(rreq, subreq);
|
||||
netfs_rreq_short_read(rreq, subreq);
|
||||
}
|
||||
@ -366,7 +368,8 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
|
||||
if (subreq->error || subreq->transferred == 0)
|
||||
break;
|
||||
transferred += subreq->transferred;
|
||||
if (subreq->transferred < subreq->len)
|
||||
if (subreq->transferred < subreq->len ||
|
||||
test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -501,7 +504,8 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
|
||||
|
||||
subreq->error = 0;
|
||||
subreq->transferred += transferred_or_error;
|
||||
if (subreq->transferred < subreq->len)
|
||||
if (subreq->transferred < subreq->len &&
|
||||
!test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags))
|
||||
goto incomplete;
|
||||
|
||||
complete:
|
||||
@ -780,10 +784,13 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
ret = rreq->error;
|
||||
if (ret == 0 && rreq->submitted < rreq->len &&
|
||||
rreq->origin != NETFS_DIO_READ) {
|
||||
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
|
||||
ret = -EIO;
|
||||
if (ret == 0) {
|
||||
if (rreq->origin == NETFS_DIO_READ) {
|
||||
ret = rreq->transferred;
|
||||
} else if (rreq->submitted < rreq->len) {
|
||||
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
|
||||
ret = -EIO;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If we decrement nr_outstanding to 0, the ref belongs to us. */
|
||||
|
@ -1341,7 +1341,6 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
|
||||
struct cifsFileInfo *smb_file_target;
|
||||
struct cifs_tcon *src_tcon;
|
||||
struct cifs_tcon *target_tcon;
|
||||
unsigned long long destend, fstart, fend;
|
||||
ssize_t rc;
|
||||
|
||||
cifs_dbg(FYI, "copychunk range\n");
|
||||
@ -1391,25 +1390,13 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
destend = destoff + len - 1;
|
||||
|
||||
/* Flush the folios at either end of the destination range to prevent
|
||||
* accidental loss of dirty data outside of the range.
|
||||
/* Flush and invalidate all the folios in the destination region. If
|
||||
* the copy was successful, then some of the flush is extra overhead,
|
||||
* but we need to allow for the copy failing in some way (eg. ENOSPC).
|
||||
*/
|
||||
fstart = destoff;
|
||||
fend = destend;
|
||||
|
||||
rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
|
||||
rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
|
||||
if (rc)
|
||||
goto unlock;
|
||||
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
|
||||
if (rc)
|
||||
goto unlock;
|
||||
if (fend > target_cifsi->netfs.zero_point)
|
||||
target_cifsi->netfs.zero_point = fend + 1;
|
||||
|
||||
/* Discard all the folios that overlap the destination region. */
|
||||
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
|
||||
|
||||
fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
|
||||
i_size_read(target_inode), 0);
|
||||
|
@ -1485,6 +1485,7 @@ struct cifs_io_subrequest {
|
||||
struct cifs_io_request *req;
|
||||
};
|
||||
ssize_t got_bytes;
|
||||
size_t actual_len;
|
||||
unsigned int xid;
|
||||
int result;
|
||||
bool have_xid;
|
||||
|
@ -111,6 +111,7 @@ static void cifs_issue_write(struct netfs_io_subrequest *subreq)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
wdata->actual_len = wdata->subreq.len;
|
||||
rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
|
||||
if (rc)
|
||||
goto fail;
|
||||
@ -153,7 +154,7 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
|
||||
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
|
||||
struct TCP_Server_Info *server = req->server;
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
|
||||
size_t rsize = 0;
|
||||
size_t rsize;
|
||||
int rc;
|
||||
|
||||
rdata->xid = get_xid();
|
||||
@ -166,8 +167,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
|
||||
cifs_sb->ctx);
|
||||
|
||||
|
||||
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
|
||||
&rdata->credits);
|
||||
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
|
||||
&rsize, &rdata->credits);
|
||||
if (rc) {
|
||||
subreq->error = rc;
|
||||
return false;
|
||||
@ -183,7 +184,8 @@ static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
|
||||
server->credits, server->in_flight, 0,
|
||||
cifs_trace_rw_credits_read_submit);
|
||||
|
||||
subreq->len = min_t(size_t, subreq->len, rsize);
|
||||
subreq->len = umin(subreq->len, rsize);
|
||||
rdata->actual_len = subreq->len;
|
||||
|
||||
#ifdef CONFIG_CIFS_SMB_DIRECT
|
||||
if (server->smbd_conn)
|
||||
@ -203,12 +205,39 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
|
||||
struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
|
||||
struct TCP_Server_Info *server = req->server;
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
|
||||
int rc = 0;
|
||||
|
||||
cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
|
||||
__func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
|
||||
subreq->transferred, subreq->len);
|
||||
|
||||
if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
|
||||
/*
|
||||
* As we're issuing a retry, we need to negotiate some new
|
||||
* credits otherwise the server may reject the op with
|
||||
* INVALID_PARAMETER. Note, however, we may get back less
|
||||
* credit than we need to complete the op, in which case, we
|
||||
* shorten the op and rely on additional rounds of retry.
|
||||
*/
|
||||
size_t rsize = umin(subreq->len - subreq->transferred,
|
||||
cifs_sb->ctx->rsize);
|
||||
|
||||
rc = server->ops->wait_mtu_credits(server, rsize, &rdata->actual_len,
|
||||
&rdata->credits);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rdata->credits.in_flight_check = 1;
|
||||
|
||||
trace_smb3_rw_credits(rdata->rreq->debug_id,
|
||||
rdata->subreq.debug_index,
|
||||
rdata->credits.value,
|
||||
server->credits, server->in_flight, 0,
|
||||
cifs_trace_rw_credits_read_resubmit);
|
||||
}
|
||||
|
||||
if (req->cfile->invalidHandle) {
|
||||
do {
|
||||
rc = cifs_reopen_file(req->cfile, true);
|
||||
|
@ -301,7 +301,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
|
||||
unsigned int /*enum smb3_rw_credits_trace*/ trace)
|
||||
{
|
||||
struct cifs_credits *credits = &subreq->credits;
|
||||
int new_val = DIV_ROUND_UP(subreq->subreq.len, SMB2_MAX_BUFFER_SIZE);
|
||||
int new_val = DIV_ROUND_UP(subreq->actual_len, SMB2_MAX_BUFFER_SIZE);
|
||||
int scredits, in_flight;
|
||||
|
||||
if (!credits->value || credits->value == new_val)
|
||||
@ -3237,13 +3237,15 @@ static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
|
||||
}
|
||||
|
||||
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
|
||||
loff_t offset, loff_t len, bool keep_size)
|
||||
unsigned long long offset, unsigned long long len,
|
||||
bool keep_size)
|
||||
{
|
||||
struct cifs_ses *ses = tcon->ses;
|
||||
struct inode *inode = file_inode(file);
|
||||
struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
||||
struct cifsFileInfo *cfile = file->private_data;
|
||||
unsigned long long new_size;
|
||||
struct netfs_inode *ictx = netfs_inode(inode);
|
||||
unsigned long long i_size, new_size, remote_size;
|
||||
long rc;
|
||||
unsigned int xid;
|
||||
|
||||
@ -3255,6 +3257,16 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
|
||||
inode_lock(inode);
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
||||
i_size = i_size_read(inode);
|
||||
remote_size = ictx->remote_i_size;
|
||||
if (offset + len >= remote_size && offset < i_size) {
|
||||
unsigned long long top = umin(offset + len, i_size);
|
||||
|
||||
rc = filemap_write_and_wait_range(inode->i_mapping, offset, top - 1);
|
||||
if (rc < 0)
|
||||
goto zero_range_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* We zero the range through ioctl, so we need remove the page caches
|
||||
* first, otherwise the data may be inconsistent with the server.
|
||||
|
@ -4507,6 +4507,7 @@ static void
|
||||
smb2_readv_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
struct cifs_io_subrequest *rdata = mid->callback_data;
|
||||
struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
|
||||
struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
|
||||
struct TCP_Server_Info *server = rdata->server;
|
||||
struct smb2_hdr *shdr =
|
||||
@ -4529,9 +4530,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
||||
"rdata server %p != mid server %p",
|
||||
rdata->server, mid->server);
|
||||
|
||||
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
|
||||
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
|
||||
__func__, mid->mid, mid->mid_state, rdata->result,
|
||||
rdata->subreq.len);
|
||||
rdata->actual_len, rdata->subreq.len - rdata->subreq.transferred);
|
||||
|
||||
switch (mid->mid_state) {
|
||||
case MID_RESPONSE_RECEIVED:
|
||||
@ -4585,22 +4586,29 @@ smb2_readv_callback(struct mid_q_entry *mid)
|
||||
rdata->subreq.debug_index,
|
||||
rdata->xid,
|
||||
rdata->req->cfile->fid.persistent_fid,
|
||||
tcon->tid, tcon->ses->Suid, rdata->subreq.start,
|
||||
rdata->subreq.len, rdata->result);
|
||||
tcon->tid, tcon->ses->Suid,
|
||||
rdata->subreq.start + rdata->subreq.transferred,
|
||||
rdata->actual_len,
|
||||
rdata->result);
|
||||
} else
|
||||
trace_smb3_read_done(rdata->rreq->debug_id,
|
||||
rdata->subreq.debug_index,
|
||||
rdata->xid,
|
||||
rdata->req->cfile->fid.persistent_fid,
|
||||
tcon->tid, tcon->ses->Suid,
|
||||
rdata->subreq.start, rdata->got_bytes);
|
||||
rdata->subreq.start + rdata->subreq.transferred,
|
||||
rdata->got_bytes);
|
||||
|
||||
if (rdata->result == -ENODATA) {
|
||||
/* We may have got an EOF error because fallocate
|
||||
* failed to enlarge the file.
|
||||
*/
|
||||
if (rdata->subreq.start < rdata->subreq.rreq->i_size)
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
|
||||
rdata->result = 0;
|
||||
} else {
|
||||
if (rdata->got_bytes < rdata->actual_len &&
|
||||
rdata->subreq.start + rdata->subreq.transferred + rdata->got_bytes ==
|
||||
ictx->remote_i_size) {
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
|
||||
rdata->result = 0;
|
||||
}
|
||||
}
|
||||
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
|
||||
server->credits, server->in_flight,
|
||||
@ -4621,6 +4629,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
|
||||
{
|
||||
int rc, flags = 0;
|
||||
char *buf;
|
||||
struct netfs_io_subrequest *subreq = &rdata->subreq;
|
||||
struct smb2_hdr *shdr;
|
||||
struct cifs_io_parms io_parms;
|
||||
struct smb_rqst rqst = { .rq_iov = rdata->iov,
|
||||
@ -4631,15 +4640,15 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
|
||||
int credit_request;
|
||||
|
||||
cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
|
||||
__func__, rdata->subreq.start, rdata->subreq.len);
|
||||
__func__, subreq->start, subreq->len);
|
||||
|
||||
if (!rdata->server)
|
||||
rdata->server = cifs_pick_channel(tcon->ses);
|
||||
|
||||
io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
|
||||
io_parms.server = server = rdata->server;
|
||||
io_parms.offset = rdata->subreq.start;
|
||||
io_parms.length = rdata->subreq.len;
|
||||
io_parms.offset = subreq->start + subreq->transferred;
|
||||
io_parms.length = rdata->actual_len;
|
||||
io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
|
||||
io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
|
||||
io_parms.pid = rdata->req->pid;
|
||||
@ -4654,11 +4663,13 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
|
||||
|
||||
rdata->iov[0].iov_base = buf;
|
||||
rdata->iov[0].iov_len = total_len;
|
||||
rdata->got_bytes = 0;
|
||||
rdata->result = 0;
|
||||
|
||||
shdr = (struct smb2_hdr *)buf;
|
||||
|
||||
if (rdata->credits.value > 0) {
|
||||
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len,
|
||||
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->actual_len,
|
||||
SMB2_MAX_BUFFER_SIZE));
|
||||
credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
|
||||
if (server->credits >= server->max_credits)
|
||||
@ -4682,11 +4693,11 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
|
||||
if (rc) {
|
||||
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
|
||||
trace_smb3_read_err(rdata->rreq->debug_id,
|
||||
rdata->subreq.debug_index,
|
||||
subreq->debug_index,
|
||||
rdata->xid, io_parms.persistent_fid,
|
||||
io_parms.tcon->tid,
|
||||
io_parms.tcon->ses->Suid,
|
||||
io_parms.offset, io_parms.length, rc);
|
||||
io_parms.offset, rdata->actual_len, rc);
|
||||
}
|
||||
|
||||
async_readv_out:
|
||||
|
@ -30,6 +30,7 @@
|
||||
EM(cifs_trace_rw_credits_old_session, "old-session") \
|
||||
EM(cifs_trace_rw_credits_read_response_add, "rd-resp-add") \
|
||||
EM(cifs_trace_rw_credits_read_response_clear, "rd-resp-clr") \
|
||||
EM(cifs_trace_rw_credits_read_resubmit, "rd-resubmit") \
|
||||
EM(cifs_trace_rw_credits_read_submit, "rd-submit ") \
|
||||
EM(cifs_trace_rw_credits_write_prepare, "wr-prepare ") \
|
||||
EM(cifs_trace_rw_credits_write_response_add, "wr-resp-add") \
|
||||
|
@ -198,6 +198,7 @@ struct netfs_io_subrequest {
|
||||
#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
|
||||
#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */
|
||||
#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */
|
||||
#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */
|
||||
};
|
||||
|
||||
enum netfs_io_origin {
|
||||
|
Loading…
Reference in New Issue
Block a user