mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
io_uring: rename ->resize_lock
->resize_lock is used for resizing rings, but it's a good idea to reuse it in other cases as well. Rename it into mmap_lock as it's protects from races with mmap. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/68f705306f3ac4d2fb999eb80ea1615015ce9f7f.1732886067.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
78d4f34e21
commit
bc4062d81c
@ -424,7 +424,7 @@ struct io_ring_ctx {
|
||||
* side will need to grab this lock, to prevent either side from
|
||||
* being run concurrently with the other.
|
||||
*/
|
||||
struct mutex resize_lock;
|
||||
struct mutex mmap_lock;
|
||||
|
||||
/*
|
||||
* If IORING_SETUP_NO_MMAP is used, then the below holds
|
||||
|
@ -350,7 +350,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
|
||||
INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
|
||||
io_napi_init(ctx);
|
||||
mutex_init(&ctx->resize_lock);
|
||||
mutex_init(&ctx->mmap_lock);
|
||||
|
||||
return ctx;
|
||||
|
||||
|
@ -329,7 +329,7 @@ __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
unsigned int npages;
|
||||
void *ptr;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
guard(mutex)(&ctx->mmap_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
|
||||
if (IS_ERR(ptr))
|
||||
@ -365,7 +365,7 @@ unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
if (addr)
|
||||
return -EINVAL;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
guard(mutex)(&ctx->mmap_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(filp, pgoff, len);
|
||||
if (IS_ERR(ptr))
|
||||
@ -415,7 +415,7 @@ unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
|
||||
struct io_ring_ctx *ctx = file->private_data;
|
||||
void *ptr;
|
||||
|
||||
guard(mutex)(&ctx->resize_lock);
|
||||
guard(mutex)(&ctx->mmap_lock);
|
||||
|
||||
ptr = io_uring_validate_mmap_request(file, pgoff, len);
|
||||
if (IS_ERR(ptr))
|
||||
|
@ -486,15 +486,15 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
||||
}
|
||||
|
||||
/*
|
||||
* We'll do the swap. Grab the ctx->resize_lock, which will exclude
|
||||
* We'll do the swap. Grab the ctx->mmap_lock, which will exclude
|
||||
* any new mmap's on the ring fd. Clear out existing mappings to prevent
|
||||
* mmap from seeing them, as we'll unmap them. Any attempt to mmap
|
||||
* existing rings beyond this point will fail. Not that it could proceed
|
||||
* at this point anyway, as the io_uring mmap side needs go grab the
|
||||
* ctx->resize_lock as well. Likewise, hold the completion lock over the
|
||||
* ctx->mmap_lock as well. Likewise, hold the completion lock over the
|
||||
* duration of the actual swap.
|
||||
*/
|
||||
mutex_lock(&ctx->resize_lock);
|
||||
mutex_lock(&ctx->mmap_lock);
|
||||
spin_lock(&ctx->completion_lock);
|
||||
o.rings = ctx->rings;
|
||||
ctx->rings = NULL;
|
||||
@ -561,7 +561,7 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
|
||||
ret = 0;
|
||||
out:
|
||||
spin_unlock(&ctx->completion_lock);
|
||||
mutex_unlock(&ctx->resize_lock);
|
||||
mutex_unlock(&ctx->mmap_lock);
|
||||
io_register_free_rings(&p, to_free);
|
||||
|
||||
if (ctx->sq_data)
|
||||
|
Loading…
Reference in New Issue
Block a user