mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
copy-struct-from-user-v5.4-rc4
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCXacV8gAKCRCRxhvAZXjc oqaZAQDG+ziyN6umUemQPEX1Ar+FOJPIwDrEJdMRmoz3ozTFQAEA0RxquU3LkVnR Rx9wX07ObZB5nMi/V4yANpuH7Vbzrg4= =7JJk -----END PGP SIGNATURE----- Merge tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux Pull usercopy test fixlets from Christian Brauner: "This contains two improvements for the copy_struct_from_user() tests: - a coding style change to get rid of the ugly "if ((ret |= test()))" pointed out when pulling the original patchset. - avoid a soft lockups when running the usercopy tests on machines with large page sizes by scanning only a 1024 byte region" * tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux: usercopy: Avoid soft lockups in test_check_nonzero_user() lib: test_user_copy: style cleanup
This commit is contained in:
commit
8eb4b3b0dd
@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size)
|
||||
static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
|
||||
{
|
||||
int ret = 0;
|
||||
size_t start, end, i;
|
||||
size_t zero_start = size / 4;
|
||||
size_t zero_end = size - zero_start;
|
||||
size_t start, end, i, zero_start, zero_end;
|
||||
|
||||
if (test(size < 2 * PAGE_SIZE, "buffer too small"))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We conduct a series of check_nonzero_user() tests on a block of memory
|
||||
* with the following byte-pattern (trying every possible [start,end]
|
||||
* pair):
|
||||
* We want to cross a page boundary to exercise the code more
|
||||
* effectively. We also don't want to make the size we scan too large,
|
||||
* otherwise the test can take a long time and cause soft lockups. So
|
||||
* scan a 1024 byte region across the page boundary.
|
||||
*/
|
||||
size = 1024;
|
||||
start = PAGE_SIZE - (size / 2);
|
||||
|
||||
kmem += start;
|
||||
umem += start;
|
||||
|
||||
zero_start = size / 4;
|
||||
zero_end = size - zero_start;
|
||||
|
||||
/*
|
||||
* We conduct a series of check_nonzero_user() tests on a block of
|
||||
* memory with the following byte-pattern (trying every possible
|
||||
* [start,end] pair):
|
||||
*
|
||||
* [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
|
||||
*
|
||||
* And we verify that check_nonzero_user() acts identically to memchr_inv().
|
||||
* And we verify that check_nonzero_user() acts identically to
|
||||
* memchr_inv().
|
||||
*/
|
||||
|
||||
memset(kmem, 0x0, size);
|
||||
@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem,
|
||||
size_t ksize, usize;
|
||||
|
||||
umem_src = kmalloc(size, GFP_KERNEL);
|
||||
if ((ret |= test(umem_src == NULL, "kmalloc failed")))
|
||||
ret = test(umem_src == NULL, "kmalloc failed");
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
expected = kmalloc(size, GFP_KERNEL);
|
||||
if ((ret |= test(expected == NULL, "kmalloc failed")))
|
||||
ret = test(expected == NULL, "kmalloc failed");
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/* Fill umem with a fixed byte pattern. */
|
||||
|
Loading…
Reference in New Issue
Block a user