mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 18:55:12 +00:00
io_uring-6.7-2023-12-08
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmVzOJkQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpmYVD/0cx0TTIrip959DLqF/V8sl2BIrt/mjAvS4 oeVUe5OmqyR2gjjEYewf21MUyzE4tSMO/LFTEr0744zENKNTL84YhIIq30ga+Gue n61c4WfPnhpvj8NQHuEf65cPosPSvKi6NSMLRJZCLqHtn8SrTQyCg8zk8GwjN/nl fJScbrj4XKfZNlizKfbHQexfi78DZ2braTds0pPZ+uFXDTIrOKAfixfV39qwTFYZ zI4FYKH8KzZzuMyyu2B+F3xCMdelUg26i2KMImKBaOsamnucIlyNvr/uWGs2G8tu Z7sWGXdY9bFlWfAFxGZeFRWbmqpFz15Mmi2Uqx8wiiYxBAaJKL+Qaq358KbTD0hB ZBKdy3AUw5J/445pwIepGp5XVxqn/qJFxGXzLAlncdhf9mXrjmFwNC/Yp5lnyDYy S3YhUsjpGX3Mymjd/gWkn1BTZh7zzpKI6LmWJjn89jmTpOzlWmfPu/uM/c/vKvE8 KajCkZ3nUCmr56GUxvSZcon7vwc8pLUyrF8Vo1vwEEVgiN+IjJVk3dMAz0hyGhtO 2HxSwOAHllAIyqjmazqESQnWEf1p8idnoR9qZXAiLzbwUFbUY/a/YrCul6vHM4yE czat+EGWdfJi0EX0z/bMUVRz05UbNt0JtKf3BnqxWtQlT8yKwCvMgHXuPJbY4y5g yXi7ep37JQ== =Xta7 -----END PGP SIGNATURE----- Merge tag 'io_uring-6.7-2023-12-08' of git://git.kernel.dk/linux Pull io_uring fixes from Jens Axboe: "Two minor fixes for issues introduced in this release cycle, and two fixes for issues or potential issues that are heading to stable. One of these ends up disabling passing io_uring file descriptors via SCM_RIGHTS. There really shouldn't be an overlap between that kind of historic use case and modern usage of io_uring, which is why this was deemed appropriate" * tag 'io_uring-6.7-2023-12-08' of git://git.kernel.dk/linux: io_uring/af_unix: disable sending io_uring over sockets io_uring/kbuf: check for buffer list readiness after NULL check io_uring/kbuf: Fix an NULL vs IS_ERR() bug in io_alloc_pbuf_ring() io_uring: fix mutex_unlock with unreferenced ctx
This commit is contained in:
commit
689659c988
@ -271,6 +271,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
|
||||
struct io_kiocb *req, *tmp;
|
||||
struct io_tw_state ts = { .locked = true, };
|
||||
|
||||
percpu_ref_get(&ctx->refs);
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
|
||||
req->io_task_work.func(req, &ts);
|
||||
@ -278,6 +279,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
|
||||
return;
|
||||
io_submit_flush_completions(ctx);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
||||
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
|
||||
@ -3146,12 +3148,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
|
||||
init_completion(&exit.completion);
|
||||
init_task_work(&exit.task_work, io_tctx_exit_cb);
|
||||
exit.ctx = ctx;
|
||||
/*
|
||||
* Some may use context even when all refs and requests have been put,
|
||||
* and they are free to do so while still holding uring_lock or
|
||||
* completion_lock, see io_req_task_submit(). Apart from other work,
|
||||
* this lock/unlock section also waits them to finish.
|
||||
*/
|
||||
|
||||
mutex_lock(&ctx->uring_lock);
|
||||
while (!list_empty(&ctx->tctx_list)) {
|
||||
WARN_ON_ONCE(time_after(jiffies, timeout));
|
||||
|
@ -636,8 +636,8 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
|
||||
ibf = io_lookup_buf_free_entry(ctx, ring_size);
|
||||
if (!ibf) {
|
||||
ptr = io_mem_alloc(ring_size);
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(ptr))
|
||||
return PTR_ERR(ptr);
|
||||
|
||||
/* Allocate and store deferred free entry */
|
||||
ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
|
||||
@ -756,6 +756,8 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
|
||||
|
||||
bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
|
||||
|
||||
if (!bl || !bl->is_mmap)
|
||||
return NULL;
|
||||
/*
|
||||
* Ensure the list is fully setup. Only strictly needed for RCU lookup
|
||||
* via mmap, and in that case only for the array indexed groups. For
|
||||
@ -763,8 +765,6 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
|
||||
*/
|
||||
if (!smp_load_acquire(&bl->is_ready))
|
||||
return NULL;
|
||||
if (!bl || !bl->is_mmap)
|
||||
return NULL;
|
||||
|
||||
return bl->buf_ring;
|
||||
}
|
||||
|
@ -77,17 +77,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
||||
|
||||
int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
static inline bool io_file_need_scm(struct file *filp)
|
||||
{
|
||||
return !!unix_get_socket(filp);
|
||||
}
|
||||
#else
|
||||
static inline bool io_file_need_scm(struct file *filp)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int io_scm_file_account(struct io_ring_ctx *ctx,
|
||||
struct file *file)
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/io_uring.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
@ -103,6 +104,11 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
|
||||
|
||||
if (fd < 0 || !(file = fget_raw(fd)))
|
||||
return -EBADF;
|
||||
/* don't allow io_uring files */
|
||||
if (io_uring_get_socket(file)) {
|
||||
fput(file);
|
||||
return -EINVAL;
|
||||
}
|
||||
*fpp++ = file;
|
||||
fpl->count++;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user