mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-15 09:55:36 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/fadvise.c: do not discard partial pages with POSIX_FADV_DONTNEED mm: introduce dedicated WQ_MEM_RECLAIM workqueue to do lru_add_drain_all kernel/relay.c: fix potential memory leak mm: thp: broken page count after commit aa88b68c3b1d revert "mm: memcontrol: fix possible css ref leak on oom" kasan: change memory hot-add error messages to info messages mm/hugetlb: fix huge page reserve accounting for private mappings
This commit is contained in:
commit
9557c3cfda
@ -614,6 +614,7 @@ free_bufs:
|
||||
|
||||
kref_put(&chan->kref, relay_destroy_channel);
|
||||
mutex_unlock(&relay_channels_mutex);
|
||||
kfree(chan);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(relay_open);
|
||||
|
11
mm/fadvise.c
11
mm/fadvise.c
@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
|
||||
*/
|
||||
start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
|
||||
end_index = (endbyte >> PAGE_SHIFT);
|
||||
if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
|
||||
/* First page is tricky as 0 - 1 = -1, but pgoff_t
|
||||
* is unsigned, so the end_index >= start_index
|
||||
* check below would be true and we'll discard the whole
|
||||
* file cache which is not what was asked.
|
||||
*/
|
||||
if (end_index == 0)
|
||||
break;
|
||||
|
||||
end_index--;
|
||||
}
|
||||
|
||||
if (end_index >= start_index) {
|
||||
unsigned long count = invalidate_mapping_pages(mapping,
|
||||
|
42
mm/hugetlb.c
42
mm/hugetlb.c
@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
|
||||
* Only the process that called mmap() has reserves for
|
||||
* private mappings.
|
||||
*/
|
||||
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
return true;
|
||||
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
||||
/*
|
||||
* Like the shared case above, a hole punch or truncate
|
||||
* could have been performed on the private mapping.
|
||||
* Examine the value of chg to determine if reserves
|
||||
* actually exist or were previously consumed.
|
||||
* Very Subtle - The value of chg comes from a previous
|
||||
* call to vma_needs_reserves(). The reserve map for
|
||||
* private mappings has different (opposite) semantics
|
||||
* than that of shared mappings. vma_needs_reserves()
|
||||
* has already taken this difference in semantics into
|
||||
* account. Therefore, the meaning of chg is the same
|
||||
* as in the shared case above. Code could easily be
|
||||
* combined, but keeping it separate draws attention to
|
||||
* subtle differences.
|
||||
*/
|
||||
if (chg)
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -1816,6 +1835,25 @@ static long __vma_reservation_common(struct hstate *h,
|
||||
|
||||
if (vma->vm_flags & VM_MAYSHARE)
|
||||
return ret;
|
||||
else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
|
||||
/*
|
||||
* In most cases, reserves always exist for private mappings.
|
||||
* However, a file associated with mapping could have been
|
||||
* hole punched or truncated after reserves were consumed.
|
||||
* As subsequent fault on such a range will not use reserves.
|
||||
* Subtle - The reserve map for private mappings has the
|
||||
* opposite meaning than that of shared mappings. If NO
|
||||
* entry is in the reserve map, it means a reservation exists.
|
||||
* If an entry exists in the reserve map, it means the
|
||||
* reservation has already been consumed. As a result, the
|
||||
* return value of this routine is the opposite of the
|
||||
* value returned from reserve map manipulation routines above.
|
||||
*/
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
|
||||
|
||||
static int __init kasan_memhotplug_init(void)
|
||||
{
|
||||
pr_err("WARNING: KASAN doesn't support memory hot-add\n");
|
||||
pr_err("Memory hot-add will be disabled\n");
|
||||
pr_info("WARNING: KASAN doesn't support memory hot-add\n");
|
||||
pr_info("Memory hot-add will be disabled\n");
|
||||
|
||||
hotplug_memory_notifier(kasan_mem_notifier, 0);
|
||||
|
||||
|
@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
|
||||
|
||||
static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
|
||||
{
|
||||
if (!current->memcg_may_oom || current->memcg_in_oom)
|
||||
if (!current->memcg_may_oom)
|
||||
return;
|
||||
/*
|
||||
* We are in the middle of the charge context here, so we
|
||||
|
20
mm/swap.c
20
mm/swap.c
@ -667,6 +667,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
|
||||
|
||||
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
|
||||
|
||||
/*
|
||||
* lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
|
||||
* workqueue, aiding in getting memory freed.
|
||||
*/
|
||||
static struct workqueue_struct *lru_add_drain_wq;
|
||||
|
||||
static int __init lru_init(void)
|
||||
{
|
||||
lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
|
||||
|
||||
if (WARN(!lru_add_drain_wq,
|
||||
"Failed to create workqueue lru_add_drain_wq"))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(lru_init);
|
||||
|
||||
void lru_add_drain_all(void)
|
||||
{
|
||||
static DEFINE_MUTEX(lock);
|
||||
@ -686,7 +704,7 @@ void lru_add_drain_all(void)
|
||||
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
|
||||
need_activate_page_drain(cpu)) {
|
||||
INIT_WORK(work, lru_add_drain_per_cpu);
|
||||
schedule_work_on(cpu, work);
|
||||
queue_work_on(cpu, lru_add_drain_wq, work);
|
||||
cpumask_set_cpu(cpu, &has_work);
|
||||
}
|
||||
}
|
||||
|
@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
|
||||
void free_page_and_swap_cache(struct page *page)
|
||||
{
|
||||
free_swap_cache(page);
|
||||
put_page(page);
|
||||
if (is_huge_zero_page(page))
|
||||
put_huge_zero_page();
|
||||
else
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
x
Reference in New Issue
Block a user