mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
mm: use aligned address in copy_user_gigantic_page()
In current kernel, hugetlb_wp() calls copy_user_large_folio() with the
fault address. Where the fault address may be not aligned with the huge
page size. Then, copy_user_large_folio() may call
copy_user_gigantic_page() with the address, while
copy_user_gigantic_page() requires the address to be huge page size
aligned. So, this may cause memory corruption or information leak,
addtional, use more obvious naming 'addr_hint' instead of 'addr' for
copy_user_gigantic_page().
Link: https://lkml.kernel.org/r/20241028145656.932941-2-wangkefeng.wang@huawei.com
Fixes: 530dd9926d
("mm: memory: improve copy_user_large_folio()")
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8aca2bc96c
commit
f5d09de9f1
@ -5340,7 +5340,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ret = copy_user_large_folio(new_folio, pte_folio,
|
ret = copy_user_large_folio(new_folio, pte_folio,
|
||||||
ALIGN_DOWN(addr, sz), dst_vma);
|
addr, dst_vma);
|
||||||
folio_put(pte_folio);
|
folio_put(pte_folio);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
folio_put(new_folio);
|
folio_put(new_folio);
|
||||||
@ -6643,8 +6643,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
|||||||
*foliop = NULL;
|
*foliop = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
ret = copy_user_large_folio(folio, *foliop,
|
ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
|
||||||
ALIGN_DOWN(dst_addr, size), dst_vma);
|
|
||||||
folio_put(*foliop);
|
folio_put(*foliop);
|
||||||
*foliop = NULL;
|
*foliop = NULL;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -6852,13 +6852,14 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
|
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
|
||||||
unsigned long addr,
|
unsigned long addr_hint,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
unsigned int nr_pages)
|
unsigned int nr_pages)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst));
|
||||||
struct page *dst_page;
|
struct page *dst_page;
|
||||||
struct page *src_page;
|
struct page *src_page;
|
||||||
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
for (i = 0; i < nr_pages; i++) {
|
||||||
dst_page = folio_page(dst, i);
|
dst_page = folio_page(dst, i);
|
||||||
|
Loading…
Reference in New Issue
Block a user