mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
block: shrink rq_map_data a bit
We don't need full ints for several of these members. Change the page_order and nr_entries to unsigned shorts, and the true/false from_user and null_mapped to booleans. This shrinks the struct from 32 to 24 bytes on 64-bit archs. Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d322f355e9
commit
f5d632d15e
@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
|
||||
|
||||
if (map_data) {
|
||||
nr_pages = 1 << map_data->page_order;
|
||||
nr_pages = 1U << map_data->page_order;
|
||||
i = map_data->offset / PAGE_SIZE;
|
||||
}
|
||||
while (len) {
|
||||
|
@ -963,11 +963,11 @@ blk_status_t blk_insert_cloned_request(struct request *rq);
|
||||
|
||||
struct rq_map_data {
|
||||
struct page **pages;
|
||||
int page_order;
|
||||
int nr_entries;
|
||||
unsigned long offset;
|
||||
int null_mapped;
|
||||
int from_user;
|
||||
unsigned short page_order;
|
||||
unsigned short nr_entries;
|
||||
bool null_mapped;
|
||||
bool from_user;
|
||||
};
|
||||
|
||||
int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
|
Loading…
Reference in New Issue
Block a user