mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 08:43:37 +00:00
24 hotfixes. 17 are cc:stable. 15 are MM and 9 are non-MM.
The usual bunch of singletons - please see the relevant changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZ1U/QwAKCRDdBJ7gKXxA jnE7AQC0eyNNvaL5pLCIxN/Vmr8YeuWP1dldgI29TjrH/JKjSQEAihZNqVZYjoIT Gf7Y+IKnc4LbfAXcTe+MfJFeDexM5AU= =U5LQ -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2024-12-07-22-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "24 hotfixes. 17 are cc:stable. 15 are MM and 9 are non-MM. The usual bunch of singletons - please see the relevant changelogs for details" * tag 'mm-hotfixes-stable-2024-12-07-22-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (24 commits) iio: magnetometer: yas530: use signed integer type for clamp limits sched/numa: fix memory leak due to the overwritten vma->numab_state mm/damon: fix order of arguments in damos_before_apply tracepoint lib: stackinit: hide never-taken branch from compiler mm/filemap: don't call folio_test_locked() without a reference in next_uptodate_folio() scatterlist: fix incorrect func name in kernel-doc mm: correct typo in MMAP_STATE() macro mm: respect mmap hint address when aligning for THP mm: memcg: declare do_memsw_account inline mm/codetag: swap tags when migrate pages ocfs2: update seq_file index in ocfs2_dlm_seq_next stackdepot: fix stack_depot_save_flags() in NMI context mm: open-code page_folio() in dump_page() mm: open-code PageTail in folio_flags() and const_folio_flags() mm: fix vrealloc()'s KASAN poisoning logic Revert "readahead: properly shorten readahead when falling back to do_page_cache_ra()" selftests/damon: add _damon_sysfs.py to TEST_FILES selftest: hugetlb_dio: fix test naming ocfs2: free inode when ocfs2_get_init_inode() fails nilfs2: fix potential out-of-bounds memory access in nilfs_find_entry() ...
This commit is contained in:
commit
553c89ec31
@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
|
||||
u8 data[8];
|
||||
u16 xy1y2[3];
|
||||
s32 h[3], s[3];
|
||||
int half_range = BIT(13);
|
||||
int i, ret;
|
||||
|
||||
mutex_lock(&yas5xx->lock);
|
||||
@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
|
||||
/* The second version of YAS537 needs to include calibration coefficients */
|
||||
if (yas5xx->version == YAS537_VERSION_1) {
|
||||
for (i = 0; i < 3; i++)
|
||||
s[i] = xy1y2[i] - BIT(13);
|
||||
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
|
||||
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
|
||||
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
|
||||
s[i] = xy1y2[i] - half_range;
|
||||
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range;
|
||||
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range;
|
||||
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range;
|
||||
for (i = 0; i < 3; i++) {
|
||||
clamp_val(h[i], -BIT(13), BIT(13) - 1);
|
||||
xy1y2[i] = h[i] + BIT(13);
|
||||
h[i] = clamp(h[i], -half_range, half_range - 1);
|
||||
xy1y2[i] = h[i] + half_range;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode)
|
||||
*/
|
||||
static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr)
|
||||
{
|
||||
unsigned int last_byte = inode->i_size;
|
||||
u64 last_byte = inode->i_size;
|
||||
|
||||
last_byte -= page_nr << PAGE_SHIFT;
|
||||
if (last_byte > PAGE_SIZE)
|
||||
|
@ -3110,6 +3110,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
struct ocfs2_lock_res *iter = v;
|
||||
struct ocfs2_lock_res *dummy = &priv->p_iter_res;
|
||||
|
||||
(*pos)++;
|
||||
spin_lock(&ocfs2_dlm_tracking_lock);
|
||||
iter = ocfs2_dlm_next_res(iter, priv);
|
||||
list_del_init(&dummy->l_debug_list);
|
||||
|
@ -200,8 +200,10 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
|
||||
mode = mode_strip_sgid(&nop_mnt_idmap, dir, mode);
|
||||
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
|
||||
status = dquot_initialize(inode);
|
||||
if (status)
|
||||
if (status) {
|
||||
iput(inode);
|
||||
return ERR_PTR(status);
|
||||
}
|
||||
|
||||
return inode;
|
||||
}
|
||||
|
@ -414,6 +414,34 @@ static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
|
||||
return __read_vmcore(iter, &iocb->ki_pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmcore_alloc_buf - allocate buffer in vmalloc memory
|
||||
* @size: size of buffer
|
||||
*
|
||||
* If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
|
||||
* the buffer to user-space by means of remap_vmalloc_range().
|
||||
*
|
||||
* If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
|
||||
* disabled and there's no need to allow users to mmap the buffer.
|
||||
*/
|
||||
static inline char *vmcore_alloc_buf(size_t size)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
return vmalloc_user(size);
|
||||
#else
|
||||
return vzalloc(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
|
||||
* essential for mmap_vmcore() in order to map physically
|
||||
* non-contiguous objects (ELF header, ELF note segment and memory
|
||||
* regions in the 1st kernel pointed to by PT_LOAD entries) into
|
||||
* virtually contiguous user-space in ELF layout.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
/*
|
||||
* The vmcore fault handler uses the page cache and fills data using the
|
||||
* standard __read_vmcore() function.
|
||||
@ -457,34 +485,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* vmcore_alloc_buf - allocate buffer in vmalloc memory
|
||||
* @size: size of buffer
|
||||
*
|
||||
* If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
|
||||
* the buffer to user-space by means of remap_vmalloc_range().
|
||||
*
|
||||
* If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
|
||||
* disabled and there's no need to allow users to mmap the buffer.
|
||||
*/
|
||||
static inline char *vmcore_alloc_buf(size_t size)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
return vmalloc_user(size);
|
||||
#else
|
||||
return vzalloc(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
|
||||
* essential for mmap_vmcore() in order to map physically
|
||||
* non-contiguous objects (ELF header, ELF note segment and memory
|
||||
* regions in the 1st kernel pointed to by PT_LOAD entries) into
|
||||
* virtually contiguous user-space in ELF layout.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
static const struct vm_operations_struct vmcore_mmap_ops = {
|
||||
.fault = mmap_vmcore_fault,
|
||||
};
|
||||
|
@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
|
||||
{
|
||||
const struct page *page = &folio->page;
|
||||
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
|
||||
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
|
||||
return &page[n].flags;
|
||||
}
|
||||
@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
|
||||
VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
|
||||
return &page[n].flags;
|
||||
}
|
||||
|
@ -231,7 +231,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
|
||||
}
|
||||
|
||||
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
|
||||
void pgalloc_tag_copy(struct folio *new, struct folio *old);
|
||||
void pgalloc_tag_swap(struct folio *new, struct folio *old);
|
||||
|
||||
void __init alloc_tag_sec_init(void);
|
||||
|
||||
@ -245,7 +245,7 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL
|
||||
static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
|
||||
static inline void alloc_tag_sec_init(void) {}
|
||||
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
|
||||
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {}
|
||||
static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
|
||||
|
||||
#endif /* CONFIG_MEM_ALLOC_PROFILING */
|
||||
|
||||
|
@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_unmark_bus_address - Unmark the scatterlist entry as a bus address
|
||||
* sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
|
||||
* @sg: SG entry
|
||||
*
|
||||
* Description:
|
||||
|
@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; }
|
||||
* If the provided stack trace comes from the interrupt context, only the part
|
||||
* up to the interrupt entry is saved.
|
||||
*
|
||||
* Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
|
||||
* Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
|
||||
* alloc_pages() cannot be used from the current context. Currently
|
||||
* this is the case for contexts where neither %GFP_ATOMIC nor
|
||||
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
|
||||
@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; }
|
||||
*/
|
||||
depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
|
||||
unsigned int nr_entries,
|
||||
gfp_t gfp_flags,
|
||||
gfp_t alloc_flags,
|
||||
depot_flags_t depot_flags);
|
||||
|
||||
/**
|
||||
@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
|
||||
* Return: Handle of the stack trace stored in depot, 0 on failure
|
||||
*/
|
||||
depot_stack_handle_t stack_depot_save(unsigned long *entries,
|
||||
unsigned int nr_entries, gfp_t gfp_flags);
|
||||
unsigned int nr_entries, gfp_t alloc_flags);
|
||||
|
||||
/**
|
||||
* __stack_depot_get_stack_record - Get a pointer to a stack_record struct
|
||||
|
@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply,
|
||||
unsigned int target_idx, struct damon_region *r,
|
||||
unsigned int nr_regions, bool do_trace),
|
||||
|
||||
TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
|
||||
TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
|
||||
|
||||
TP_CONDITION(do_trace),
|
||||
|
||||
|
@ -3399,11 +3399,17 @@ static void task_numa_work(struct callback_head *work)
|
||||
|
||||
/* Initialise new per-VMA NUMAB state. */
|
||||
if (!vma->numab_state) {
|
||||
vma->numab_state = kzalloc(sizeof(struct vma_numab_state),
|
||||
GFP_KERNEL);
|
||||
if (!vma->numab_state)
|
||||
struct vma_numab_state *ptr;
|
||||
|
||||
ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
continue;
|
||||
|
||||
if (cmpxchg(&vma->numab_state, NULL, ptr)) {
|
||||
kfree(ptr);
|
||||
continue;
|
||||
}
|
||||
|
||||
vma->numab_state->start_scan_seq = mm->numa_scan_seq;
|
||||
|
||||
vma->numab_state->next_scan = now +
|
||||
|
@ -189,26 +189,34 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
|
||||
}
|
||||
}
|
||||
|
||||
void pgalloc_tag_copy(struct folio *new, struct folio *old)
|
||||
void pgalloc_tag_swap(struct folio *new, struct folio *old)
|
||||
{
|
||||
union pgtag_ref_handle handle;
|
||||
union codetag_ref ref;
|
||||
struct alloc_tag *tag;
|
||||
union pgtag_ref_handle handle_old, handle_new;
|
||||
union codetag_ref ref_old, ref_new;
|
||||
struct alloc_tag *tag_old, *tag_new;
|
||||
|
||||
tag = pgalloc_tag_get(&old->page);
|
||||
if (!tag)
|
||||
tag_old = pgalloc_tag_get(&old->page);
|
||||
if (!tag_old)
|
||||
return;
|
||||
tag_new = pgalloc_tag_get(&new->page);
|
||||
if (!tag_new)
|
||||
return;
|
||||
|
||||
if (!get_page_tag_ref(&new->page, &ref, &handle))
|
||||
if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
|
||||
return;
|
||||
if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
|
||||
put_page_tag_ref(handle_old);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Clear the old ref to the original allocation tag. */
|
||||
clear_page_tag_ref(&old->page);
|
||||
/* Decrement the counters of the tag on get_new_folio. */
|
||||
alloc_tag_sub(&ref, folio_size(new));
|
||||
__alloc_tag_ref_set(&ref, tag);
|
||||
update_page_tag_ref(handle, &ref);
|
||||
put_page_tag_ref(handle);
|
||||
/* swap tags */
|
||||
__alloc_tag_ref_set(&ref_old, tag_new);
|
||||
update_page_tag_ref(handle_old, &ref_old);
|
||||
__alloc_tag_ref_set(&ref_new, tag_old);
|
||||
update_page_tag_ref(handle_new, &ref_new);
|
||||
|
||||
put_page_tag_ref(handle_old);
|
||||
put_page_tag_ref(handle_new);
|
||||
}
|
||||
|
||||
static void shutdown_mem_profiling(bool remove_file)
|
||||
|
@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
|
||||
prealloc = page_address(page);
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pool_lock, flags);
|
||||
if (in_nmi()) {
|
||||
/* We can never allocate in NMI context. */
|
||||
WARN_ON_ONCE(can_alloc);
|
||||
/* Best effort; bail if we fail to take the lock. */
|
||||
if (!raw_spin_trylock_irqsave(&pool_lock, flags))
|
||||
goto exit;
|
||||
} else {
|
||||
raw_spin_lock_irqsave(&pool_lock, flags);
|
||||
}
|
||||
printk_deferred_enter();
|
||||
|
||||
/* Try to find again, to avoid concurrently inserting duplicates. */
|
||||
|
@ -212,6 +212,7 @@ static noinline void test_ ## name (struct kunit *test) \
|
||||
static noinline DO_NOTHING_TYPE_ ## which(var_type) \
|
||||
do_nothing_ ## name(var_type *ptr) \
|
||||
{ \
|
||||
OPTIMIZER_HIDE_VAR(ptr); \
|
||||
/* Will always be true, but compiler doesn't know. */ \
|
||||
if ((unsigned long)ptr > 0x2) \
|
||||
return DO_NOTHING_RETURN_ ## which(ptr); \
|
||||
|
@ -124,19 +124,22 @@ static void __dump_page(const struct page *page)
|
||||
{
|
||||
struct folio *foliop, folio;
|
||||
struct page precise;
|
||||
unsigned long head;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
unsigned long idx, nr_pages = 1;
|
||||
int loops = 5;
|
||||
|
||||
again:
|
||||
memcpy(&precise, page, sizeof(*page));
|
||||
foliop = page_folio(&precise);
|
||||
if (foliop == (struct folio *)&precise) {
|
||||
head = precise.compound_head;
|
||||
if ((head & 1) == 0) {
|
||||
foliop = (struct folio *)&precise;
|
||||
idx = 0;
|
||||
if (!folio_test_large(foliop))
|
||||
goto dump;
|
||||
foliop = (struct folio *)page;
|
||||
} else {
|
||||
foliop = (struct folio *)(head - 1);
|
||||
idx = folio_page_idx(foliop, page);
|
||||
}
|
||||
|
||||
|
@ -3501,10 +3501,10 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
|
||||
continue;
|
||||
if (xa_is_value(folio))
|
||||
continue;
|
||||
if (folio_test_locked(folio))
|
||||
continue;
|
||||
if (!folio_try_get(folio))
|
||||
continue;
|
||||
if (folio_test_locked(folio))
|
||||
goto skip;
|
||||
/* Has the page moved or been split? */
|
||||
if (unlikely(folio != xas_reload(xas)))
|
||||
goto skip;
|
||||
|
11
mm/gup.c
11
mm/gup.c
@ -52,7 +52,12 @@ static inline void sanity_check_pinned_pages(struct page **pages,
|
||||
*/
|
||||
for (; npages; npages--, pages++) {
|
||||
struct page *page = *pages;
|
||||
struct folio *folio = page_folio(page);
|
||||
struct folio *folio;
|
||||
|
||||
if (!page)
|
||||
continue;
|
||||
|
||||
folio = page_folio(page);
|
||||
|
||||
if (is_zero_page(page) ||
|
||||
!folio_test_anon(folio))
|
||||
@ -409,6 +414,10 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
|
||||
|
||||
sanity_check_pinned_pages(pages, npages);
|
||||
for (i = 0; i < npages; i += nr) {
|
||||
if (!pages[i]) {
|
||||
nr = 1;
|
||||
continue;
|
||||
}
|
||||
folio = gup_folio_next(pages, npages, i, &nr);
|
||||
gup_put_folio(folio, nr, FOLL_PIN);
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ static inline void fail_non_kasan_kunit_test(void) { }
|
||||
|
||||
#endif /* CONFIG_KUNIT */
|
||||
|
||||
static DEFINE_SPINLOCK(report_lock);
|
||||
static DEFINE_RAW_SPINLOCK(report_lock);
|
||||
|
||||
static void start_report(unsigned long *flags, bool sync)
|
||||
{
|
||||
@ -212,7 +212,7 @@ static void start_report(unsigned long *flags, bool sync)
|
||||
lockdep_off();
|
||||
/* Make sure we don't end up in loop. */
|
||||
report_suppress_start();
|
||||
spin_lock_irqsave(&report_lock, *flags);
|
||||
raw_spin_lock_irqsave(&report_lock, *flags);
|
||||
pr_err("==================================================================\n");
|
||||
}
|
||||
|
||||
@ -222,7 +222,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write)
|
||||
trace_error_report_end(ERROR_DETECTOR_KASAN,
|
||||
(unsigned long)addr);
|
||||
pr_err("==================================================================\n");
|
||||
spin_unlock_irqrestore(&report_lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&report_lock, *flags);
|
||||
if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
|
||||
check_panic_on_warn("KASAN");
|
||||
switch (kasan_arg_fault) {
|
||||
|
@ -38,7 +38,7 @@ void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
|
||||
iter = mem_cgroup_iter(NULL, iter, NULL))
|
||||
|
||||
/* Whether legacy memory+swap accounting is active */
|
||||
static bool do_memsw_account(void)
|
||||
static inline bool do_memsw_account(void)
|
||||
{
|
||||
return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
|
||||
}
|
||||
|
@ -1080,6 +1080,10 @@ static long migrate_to_node(struct mm_struct *mm, int source, int dest,
|
||||
|
||||
mmap_read_lock(mm);
|
||||
vma = find_vma(mm, 0);
|
||||
if (unlikely(!vma)) {
|
||||
mmap_read_unlock(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This does not migrate the range, but isolates all pages that
|
||||
|
@ -745,7 +745,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
|
||||
folio_set_readahead(newfolio);
|
||||
|
||||
folio_copy_owner(newfolio, folio);
|
||||
pgalloc_tag_copy(newfolio, folio);
|
||||
pgalloc_tag_swap(newfolio, folio);
|
||||
|
||||
mem_cgroup_migrate(folio, newfolio);
|
||||
}
|
||||
|
@ -889,6 +889,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
||||
if (get_area) {
|
||||
addr = get_area(file, addr, len, pgoff, flags);
|
||||
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
&& !addr /* no hint */
|
||||
&& IS_ALIGNED(len, PMD_SIZE)) {
|
||||
/* Ensures that larger anonymous mappings are THP aligned. */
|
||||
addr = thp_get_unmapped_area_vmflags(file, addr, len,
|
||||
|
@ -458,8 +458,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
|
||||
struct file_ra_state *ra, unsigned int new_order)
|
||||
{
|
||||
struct address_space *mapping = ractl->mapping;
|
||||
pgoff_t start = readahead_index(ractl);
|
||||
pgoff_t index = start;
|
||||
pgoff_t index = readahead_index(ractl);
|
||||
unsigned int min_order = mapping_min_folio_order(mapping);
|
||||
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
|
||||
pgoff_t mark = index + ra->size - ra->async_size;
|
||||
@ -522,7 +521,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
|
||||
if (!err)
|
||||
return;
|
||||
fallback:
|
||||
do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size);
|
||||
do_page_cache_ra(ractl, ra->size, ra->async_size);
|
||||
}
|
||||
|
||||
static unsigned long ractl_max_pages(struct readahead_control *ractl,
|
||||
|
2
mm/vma.c
2
mm/vma.c
@ -35,7 +35,7 @@ struct mmap_state {
|
||||
.mm = mm_, \
|
||||
.vmi = vmi_, \
|
||||
.addr = addr_, \
|
||||
.end = (addr_) + len, \
|
||||
.end = (addr_) + (len_), \
|
||||
.pgoff = pgoff_, \
|
||||
.pglen = PHYS_PFN(len_), \
|
||||
.flags = flags_, \
|
||||
|
@ -4093,7 +4093,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
|
||||
/* Zero out spare memory. */
|
||||
if (want_init_on_alloc(flags))
|
||||
memset((void *)p + size, 0, old_size - size);
|
||||
|
||||
kasan_poison_vmalloc(p + size, old_size - size);
|
||||
kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL);
|
||||
return (void *)p;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race
|
||||
TEST_GEN_FILES += debugfs_target_ids_pid_leak
|
||||
TEST_GEN_FILES += access_memory access_memory_even
|
||||
|
||||
TEST_FILES = _chk_dependency.sh _debugfs_common.sh
|
||||
TEST_FILES = _chk_dependency.sh _debugfs_common.sh _damon_sysfs.py
|
||||
|
||||
# functionality tests
|
||||
TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
|
||||
|
@ -76,19 +76,15 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off)
|
||||
/* Get the free huge pages after unmap*/
|
||||
free_hpage_a = get_free_hugepages();
|
||||
|
||||
ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
|
||||
ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
|
||||
|
||||
/*
|
||||
* If the no. of free hugepages before allocation and after unmap does
|
||||
* not match - that means there could still be a page which is pinned.
|
||||
*/
|
||||
if (free_hpage_a != free_hpage_b) {
|
||||
ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
|
||||
ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
|
||||
ksft_test_result_fail(": Huge pages not freed!\n");
|
||||
} else {
|
||||
ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b);
|
||||
ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a);
|
||||
ksft_test_result_pass(": Huge pages freed successfully !\n");
|
||||
}
|
||||
ksft_test_result(free_hpage_a == free_hpage_b,
|
||||
"free huge pages from %u-%u\n", start_off, end_off);
|
||||
}
|
||||
|
||||
int main(void)
|
||||
|
Loading…
Reference in New Issue
Block a user