mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
io_uring-6.12-20241018
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmcSk5EQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpix8EACerYNC/5U1MjZvJ9sFtH3i4WtGIx3AOpxk UZyzvrmZh8+LIT2Xfvs50/SfC4LAOLuJBWr5bjkJxtNWFc02R3n/1spOIV7PV5xc FL2mBfnViW9OxCiOOdG7hRNvhCGZ9Rb5HIo1c8OoXslXCEPFDLMNJ/PTrzm1mjHz VbxLfNOXPCHprRwi70UK4mdAE6VJkMXRgh5ysjDtL3Ux4mA0fQ4x/hifGIBTbJri BPVFQlRmWHuo9DKuimn3h6asYlygD15CJS/XIfXWyans6E/3vgVy918XlvH/4kMT zN0nl2HQgaEl+4b31pxzrELWkYgMo1MM3ENWiqfClDk5k0YCf1MH5DzrMAhrg6I/ GFQF1XPOYMld/vwsM1xE+75/xKkKCOrV2EZfBfPTb11v6zePEl1F6dHST7sZ5hd2 iNSuohL2MdAODlG+itqNZpuNUgc6gqdNOCUFwli+XgBzkaiZScS5lkssq1EKhoVi 1MjkIsQNx9eXfVLa/aiDZveu3YJNeLD2XVwqjtIXSxgaCZBNa4oa4rLvOCr+xnst wM0kI7EBIDQMzaupfs0hQ07iatkw/n2ILRjrs8bM2K0hSxMEuAlj+cJkWbVI7nCR /VeHeloS6f8OwSvRF1S1zHGNmA9UR7m2nbL5HfuurrMITUg4L4sNXexnfeYEKpA9 yaCaw41USA== =gi8g -----END PGP SIGNATURE----- Merge tag 'io_uring-6.12-20241018' of git://git.kernel.dk/linux Pull io_uring fixes from Jens Axboe: - Fix a regression this merge window where cloning of registered buffers didn't take into account the dummy_ubuf - Fix a race with reading how many SQRING entries are available, causing userspace to need to loop around io_uring_sqring_wait() rather than being able to rely on SQEs being available when it returned - Ensure that the SQPOLL thread is TASK_RUNNING before running task_work off the cancelation exit path * tag 'io_uring-6.12-20241018' of git://git.kernel.dk/linux: io_uring/sqpoll: ensure task state is TASK_RUNNING when running task_work io_uring/rsrc: ignore dummy_ubuf for buffer cloning io_uring/sqpoll: close race on waiting for sqring entries
This commit is contained in:
commit
a041f47898
@ -284,7 +284,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rings *r = ctx->rings;
|
||||
|
||||
return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
|
||||
/*
|
||||
* SQPOLL must use the actual sqring head, as using the cached_sq_head
|
||||
* is race prone if the SQPOLL thread has grabbed entries but not yet
|
||||
* committed them to the ring. For !SQPOLL, this doesn't matter, but
|
||||
* since this helper is just used for SQPOLL sqring waits (or POLLOUT),
|
||||
* just read the actual sqring head unconditionally.
|
||||
*/
|
||||
return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
|
||||
}
|
||||
|
||||
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
|
||||
@ -320,6 +327,7 @@ static inline int io_run_task_work(void)
|
||||
if (current->io_uring) {
|
||||
unsigned int count = 0;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
tctx_task_work_run(current->io_uring, UINT_MAX, &count);
|
||||
if (count)
|
||||
ret = true;
|
||||
|
@ -1176,7 +1176,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
|
||||
for (i = 0; i < nbufs; i++) {
|
||||
struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
|
||||
|
||||
refcount_inc(&src->refs);
|
||||
if (src != &dummy_ubuf)
|
||||
refcount_inc(&src->refs);
|
||||
user_bufs[i] = src;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user