io_uring/rsrc: export io_check_coalesce_buffer

io_try_coalesce_buffer() is a useful helper collecting useful info about
a set of pages, I want to reuse it for analysing ring/etc. mappings. I
don't need the entire thing and only interested if it can be coalesced
into a single page, but that's better than duplicating the parsing.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/353b447953cd5d34c454a7d909bb6024c391d6e2.1732886067.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2024-11-29 13:34:23 +00:00 committed by Jens Axboe
parent bc4062d81c
commit b82a795d7d
2 changed files with 16 additions and 10 deletions

View File

@ -626,11 +626,12 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
return ret; return ret;
} }
static bool io_do_coalesce_buffer(struct page ***pages, int *nr_pages, static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
struct io_imu_folio_data *data, int nr_folios) struct io_imu_folio_data *data)
{ {
struct page **page_array = *pages, **new_array = NULL; struct page **page_array = *pages, **new_array = NULL;
int nr_pages_left = *nr_pages, i, j; int nr_pages_left = *nr_pages, i, j;
int nr_folios = data->nr_folios;
/* Store head pages only*/ /* Store head pages only*/
new_array = kvmalloc_array(nr_folios, sizeof(struct page *), new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
@ -667,15 +668,14 @@ static bool io_do_coalesce_buffer(struct page ***pages, int *nr_pages,
return true; return true;
} }
static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages, bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
struct io_imu_folio_data *data) struct io_imu_folio_data *data)
{ {
struct page **page_array = *pages;
struct folio *folio = page_folio(page_array[0]); struct folio *folio = page_folio(page_array[0]);
unsigned int count = 1, nr_folios = 1; unsigned int count = 1, nr_folios = 1;
int i; int i;
if (*nr_pages <= 1) if (nr_pages <= 1)
return false; return false;
data->nr_pages_mid = folio_nr_pages(folio); data->nr_pages_mid = folio_nr_pages(folio);
@ -687,7 +687,7 @@ static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
* Check if pages are contiguous inside a folio, and all folios have * Check if pages are contiguous inside a folio, and all folios have
* the same page count except for the head and tail. * the same page count except for the head and tail.
*/ */
for (i = 1; i < *nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
if (page_folio(page_array[i]) == folio && if (page_folio(page_array[i]) == folio &&
page_array[i] == page_array[i-1] + 1) { page_array[i] == page_array[i-1] + 1) {
count++; count++;
@ -715,7 +715,8 @@ static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
if (nr_folios == 1) if (nr_folios == 1)
data->nr_pages_head = count; data->nr_pages_head = count;
return io_do_coalesce_buffer(pages, nr_pages, data, nr_folios); data->nr_folios = nr_folios;
return true;
} }
static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx, static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
@ -729,7 +730,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
size_t size; size_t size;
int ret, nr_pages, i; int ret, nr_pages, i;
struct io_imu_folio_data data; struct io_imu_folio_data data;
bool coalesced; bool coalesced = false;
if (!iov->iov_base) if (!iov->iov_base)
return NULL; return NULL;
@ -749,7 +750,8 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
} }
/* If it's huge page(s), try to coalesce them into fewer bvec entries */ /* If it's huge page(s), try to coalesce them into fewer bvec entries */
coalesced = io_try_coalesce_buffer(&pages, &nr_pages, &data); if (io_check_coalesce_buffer(pages, nr_pages, &data))
coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
if (!imu) if (!imu)

View File

@ -40,6 +40,7 @@ struct io_imu_folio_data {
/* For non-head/tail folios, has to be fully included */ /* For non-head/tail folios, has to be fully included */
unsigned int nr_pages_mid; unsigned int nr_pages_mid;
unsigned int folio_shift; unsigned int folio_shift;
unsigned int nr_folios;
}; };
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type); struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
@ -66,6 +67,9 @@ int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
unsigned int size, unsigned int type); unsigned int size, unsigned int type);
bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
struct io_imu_folio_data *data);
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data, static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
int index) int index)
{ {