mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
io_uring: get rid of struct io_rw_state
A separate state struct is not needed anymore, just fold it in with io_async_rw. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a9165b83c1
commit
0d10bd77a1
@ -96,12 +96,12 @@ static int __io_import_iovec(int ddir, struct io_kiocb *req,
|
|||||||
rw->len = sqe_len;
|
rw->len = sqe_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
return import_ubuf(ddir, buf, sqe_len, &io->s.iter);
|
return import_ubuf(ddir, buf, sqe_len, &io->iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
io->free_iovec = io->s.fast_iov;
|
io->free_iovec = io->fast_iov;
|
||||||
return __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &io->free_iovec,
|
return __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &io->free_iovec,
|
||||||
&io->s.iter, req->ctx->compat);
|
&io->iter, req->ctx->compat);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int io_import_iovec(int rw, struct io_kiocb *req,
|
static inline int io_import_iovec(int rw, struct io_kiocb *req,
|
||||||
@ -114,7 +114,7 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req,
|
|||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
iov_iter_save_state(&io->s.iter, &io->s.iter_state);
|
iov_iter_save_state(&io->iter, &io->iter_state);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
|
|||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
iov_iter_save_state(&rw->s.iter, &rw->s.iter_state);
|
iov_iter_save_state(&rw->iter, &rw->iter_state);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -308,8 +308,8 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
|||||||
io_req_set_rsrc_node(req, ctx, 0);
|
io_req_set_rsrc_node(req, ctx, 0);
|
||||||
|
|
||||||
io = req->async_data;
|
io = req->async_data;
|
||||||
ret = io_import_fixed(ddir, &io->s.iter, req->imu, rw->addr, rw->len);
|
ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
|
||||||
iov_iter_save_state(&io->s.iter, &io->s.iter_state);
|
iov_iter_save_state(&io->iter, &io->iter_state);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,7 +374,7 @@ static void io_resubmit_prep(struct io_kiocb *req)
|
|||||||
{
|
{
|
||||||
struct io_async_rw *io = req->async_data;
|
struct io_async_rw *io = req->async_data;
|
||||||
|
|
||||||
iov_iter_restore(&io->s.iter, &io->s.iter_state);
|
iov_iter_restore(&io->iter, &io->iter_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool io_rw_should_reissue(struct io_kiocb *req)
|
static bool io_rw_should_reissue(struct io_kiocb *req)
|
||||||
@ -808,7 +808,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
ret = io_rw_init_file(req, FMODE_READ);
|
ret = io_rw_init_file(req, FMODE_READ);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
req->cqe.res = iov_iter_count(&io->s.iter);
|
req->cqe.res = iov_iter_count(&io->iter);
|
||||||
|
|
||||||
if (force_nonblock) {
|
if (force_nonblock) {
|
||||||
/* If the file doesn't support async, just async punt */
|
/* If the file doesn't support async, just async punt */
|
||||||
@ -826,7 +826,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = io_iter_do_read(rw, &io->s.iter);
|
ret = io_iter_do_read(rw, &io->iter);
|
||||||
|
|
||||||
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
|
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
|
||||||
req->flags &= ~REQ_F_REISSUE;
|
req->flags &= ~REQ_F_REISSUE;
|
||||||
@ -853,7 +853,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
* untouched in case of error. Restore it and we'll advance it
|
* untouched in case of error. Restore it and we'll advance it
|
||||||
* manually if we need to.
|
* manually if we need to.
|
||||||
*/
|
*/
|
||||||
iov_iter_restore(&io->s.iter, &io->s.iter_state);
|
iov_iter_restore(&io->iter, &io->iter_state);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/*
|
/*
|
||||||
@ -861,11 +861,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
* above or inside this loop. Advance the iter by the bytes
|
* above or inside this loop. Advance the iter by the bytes
|
||||||
* that were consumed.
|
* that were consumed.
|
||||||
*/
|
*/
|
||||||
iov_iter_advance(&io->s.iter, ret);
|
iov_iter_advance(&io->iter, ret);
|
||||||
if (!iov_iter_count(&io->s.iter))
|
if (!iov_iter_count(&io->iter))
|
||||||
break;
|
break;
|
||||||
io->bytes_done += ret;
|
io->bytes_done += ret;
|
||||||
iov_iter_save_state(&io->s.iter, &io->s.iter_state);
|
iov_iter_save_state(&io->iter, &io->iter_state);
|
||||||
|
|
||||||
/* if we can retry, do so with the callbacks armed */
|
/* if we can retry, do so with the callbacks armed */
|
||||||
if (!io_rw_should_retry(req)) {
|
if (!io_rw_should_retry(req)) {
|
||||||
@ -873,19 +873,19 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->cqe.res = iov_iter_count(&io->s.iter);
|
req->cqe.res = iov_iter_count(&io->iter);
|
||||||
/*
|
/*
|
||||||
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
|
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
|
||||||
* we get -EIOCBQUEUED, then we'll get a notification when the
|
* we get -EIOCBQUEUED, then we'll get a notification when the
|
||||||
* desired page gets unlocked. We can also get a partial read
|
* desired page gets unlocked. We can also get a partial read
|
||||||
* here, and if we do, then just retry at the new offset.
|
* here, and if we do, then just retry at the new offset.
|
||||||
*/
|
*/
|
||||||
ret = io_iter_do_read(rw, &io->s.iter);
|
ret = io_iter_do_read(rw, &io->iter);
|
||||||
if (ret == -EIOCBQUEUED)
|
if (ret == -EIOCBQUEUED)
|
||||||
return IOU_ISSUE_SKIP_COMPLETE;
|
return IOU_ISSUE_SKIP_COMPLETE;
|
||||||
/* we got some bytes, but not all. retry. */
|
/* we got some bytes, but not all. retry. */
|
||||||
kiocb->ki_flags &= ~IOCB_WAITQ;
|
kiocb->ki_flags &= ~IOCB_WAITQ;
|
||||||
iov_iter_restore(&io->s.iter, &io->s.iter_state);
|
iov_iter_restore(&io->iter, &io->iter_state);
|
||||||
} while (ret > 0);
|
} while (ret > 0);
|
||||||
done:
|
done:
|
||||||
/* it's faster to check here then delegate to kfree */
|
/* it's faster to check here then delegate to kfree */
|
||||||
@ -989,7 +989,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
ret = io_rw_init_file(req, FMODE_WRITE);
|
ret = io_rw_init_file(req, FMODE_WRITE);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
req->cqe.res = iov_iter_count(&io->s.iter);
|
req->cqe.res = iov_iter_count(&io->iter);
|
||||||
|
|
||||||
if (force_nonblock) {
|
if (force_nonblock) {
|
||||||
/* If the file doesn't support async, just async punt */
|
/* If the file doesn't support async, just async punt */
|
||||||
@ -1019,9 +1019,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
kiocb->ki_flags |= IOCB_WRITE;
|
kiocb->ki_flags |= IOCB_WRITE;
|
||||||
|
|
||||||
if (likely(req->file->f_op->write_iter))
|
if (likely(req->file->f_op->write_iter))
|
||||||
ret2 = call_write_iter(req->file, kiocb, &io->s.iter);
|
ret2 = call_write_iter(req->file, kiocb, &io->iter);
|
||||||
else if (req->file->f_op->write)
|
else if (req->file->f_op->write)
|
||||||
ret2 = loop_rw_iter(WRITE, rw, &io->s.iter);
|
ret2 = loop_rw_iter(WRITE, rw, &io->iter);
|
||||||
else
|
else
|
||||||
ret2 = -EINVAL;
|
ret2 = -EINVAL;
|
||||||
|
|
||||||
@ -1053,7 +1053,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
* in the worker. Also update bytes_done to account for
|
* in the worker. Also update bytes_done to account for
|
||||||
* the bytes already written.
|
* the bytes already written.
|
||||||
*/
|
*/
|
||||||
iov_iter_save_state(&io->s.iter, &io->s.iter_state);
|
iov_iter_save_state(&io->iter, &io->iter_state);
|
||||||
io->bytes_done += ret2;
|
io->bytes_done += ret2;
|
||||||
|
|
||||||
if (kiocb->ki_flags & IOCB_WRITE)
|
if (kiocb->ki_flags & IOCB_WRITE)
|
||||||
@ -1064,7 +1064,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
ret = kiocb_done(req, ret2, issue_flags);
|
ret = kiocb_done(req, ret2, issue_flags);
|
||||||
} else {
|
} else {
|
||||||
ret_eagain:
|
ret_eagain:
|
||||||
iov_iter_restore(&io->s.iter, &io->s.iter_state);
|
iov_iter_restore(&io->iter, &io->iter_state);
|
||||||
if (kiocb->ki_flags & IOCB_WRITE)
|
if (kiocb->ki_flags & IOCB_WRITE)
|
||||||
io_req_end_write(req);
|
io_req_end_write(req);
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
@ -1164,5 +1164,6 @@ void io_rw_cache_free(struct io_cache_entry *entry)
|
|||||||
struct io_async_rw *rw;
|
struct io_async_rw *rw;
|
||||||
|
|
||||||
rw = container_of(entry, struct io_async_rw, cache);
|
rw = container_of(entry, struct io_async_rw, cache);
|
||||||
|
kfree(rw->free_iovec);
|
||||||
kfree(rw);
|
kfree(rw);
|
||||||
}
|
}
|
||||||
|
@ -2,18 +2,14 @@
|
|||||||
|
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
|
|
||||||
struct io_rw_state {
|
|
||||||
struct iov_iter iter;
|
|
||||||
struct iov_iter_state iter_state;
|
|
||||||
struct iovec fast_iov[UIO_FASTIOV];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct io_async_rw {
|
struct io_async_rw {
|
||||||
union {
|
union {
|
||||||
size_t bytes_done;
|
size_t bytes_done;
|
||||||
struct io_cache_entry cache;
|
struct io_cache_entry cache;
|
||||||
};
|
};
|
||||||
struct io_rw_state s;
|
struct iov_iter iter;
|
||||||
|
struct iov_iter_state iter_state;
|
||||||
|
struct iovec fast_iov[UIO_FASTIOV];
|
||||||
struct iovec *free_iovec;
|
struct iovec *free_iovec;
|
||||||
struct wait_page_queue wpq;
|
struct wait_page_queue wpq;
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user