mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "11 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: .mailmap: add Christophe Ricard Make CONFIG_FHANDLE default y mm/page_isolation.c: fix the function comments oom, oom_reaper: do not enqueue task if it is on the oom_reaper_list head mm/page_isolation: fix tracepoint to mirror check function behavior mm/rmap: batched invalidations should use existing api x86/mm: TLB_REMOTE_SEND_IPI should count pages mm: fix invalid node in alloc_migrate_target() include/linux/huge_mm.h: return NULL instead of false for pmd_trans_huge_lock() mm, kasan: fix compilation for CONFIG_SLAB MAINTAINERS: orangefs mailing list is subscribers-only
This commit is contained in:
commit
4e19fd9395
1
.mailmap
1
.mailmap
@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||
Brian Avery <b.avery@hp.com>
|
||||
Brian King <brking@us.ibm.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
Damian Hobson-Garcia <dhobsong@igel.co.jp>
|
||||
David Brownell <david-b@pacbell.net>
|
||||
|
@ -8253,7 +8253,7 @@ F: Documentation/filesystems/overlayfs.txt
|
||||
|
||||
ORANGEFS FILESYSTEM
|
||||
M: Mike Marshall <hubcap@omnibond.com>
|
||||
L: pvfs2-developers@beowulf-underground.org
|
||||
L: pvfs2-developers@beowulf-underground.org (subscribers-only)
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
|
||||
S: Supported
|
||||
F: fs/orangefs/
|
||||
|
@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
/* Not inlined due to inc_irq_stat not being defined yet */
|
||||
#define flush_tlb_local() { \
|
||||
inc_irq_stat(irq_tlb_count); \
|
||||
local_flush_tlb(); \
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
#define flush_tlb_others(mask, mm, start, end) \
|
||||
native_flush_tlb_others(mask, mm, start, end)
|
||||
|
@ -104,10 +104,8 @@ static void flush_tlb_func(void *info)
|
||||
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
|
||||
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
|
||||
if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
|
||||
return;
|
||||
if (!f->flush_end)
|
||||
f->flush_end = f->flush_start + PAGE_SIZE;
|
||||
|
||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
||||
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
|
||||
@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
||||
unsigned long end)
|
||||
{
|
||||
struct flush_tlb_info info;
|
||||
|
||||
if (end == 0)
|
||||
end = start + PAGE_SIZE;
|
||||
info.flush_mm = mm;
|
||||
info.flush_start = start;
|
||||
info.flush_end = end;
|
||||
|
||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
|
||||
trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
|
||||
if (end == TLB_FLUSH_ALL)
|
||||
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
|
||||
else
|
||||
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
|
||||
(end - start) >> PAGE_SHIFT);
|
||||
|
||||
if (is_uv_system()) {
|
||||
unsigned int cpu;
|
||||
|
||||
|
@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
|
||||
return __pmd_trans_huge_lock(pmd, vma);
|
||||
else
|
||||
return false;
|
||||
return NULL;
|
||||
}
|
||||
static inline int hpage_nr_pages(struct page *page)
|
||||
{
|
||||
|
@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
|
||||
|
||||
TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
|
||||
__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
|
||||
__entry->end_pfn == __entry->fin_pfn ? "success" : "fail")
|
||||
__entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
|
||||
);
|
||||
|
||||
#endif /* _TRACE_PAGE_ISOLATION_H */
|
||||
|
@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH
|
||||
See the man page for more details.
|
||||
|
||||
config FHANDLE
|
||||
bool "open by fhandle syscalls"
|
||||
bool "open by fhandle syscalls" if EXPERT
|
||||
select EXPORTFS
|
||||
default y
|
||||
help
|
||||
If you say Y here, a user level program will be able to map
|
||||
file names to handle and then later use the handle for
|
||||
|
@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
|
||||
struct kasan_alloc_meta *alloc_info =
|
||||
get_alloc_info(cache, object);
|
||||
alloc_info->state = KASAN_STATE_FREE;
|
||||
set_track(&free_info->track);
|
||||
set_track(&free_info->track, GFP_NOWAIT);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -547,7 +547,11 @@ static int oom_reaper(void *unused)
|
||||
|
||||
static void wake_oom_reaper(struct task_struct *tsk)
|
||||
{
|
||||
if (!oom_reaper_th || tsk->oom_reaper_list)
|
||||
if (!oom_reaper_th)
|
||||
return;
|
||||
|
||||
/* tsk is already queued? */
|
||||
if (tsk == oom_reaper_list || tsk->oom_reaper_list)
|
||||
return;
|
||||
|
||||
get_task_struct(tsk);
|
||||
|
@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
|
||||
* all pages in [start_pfn...end_pfn) must be in the same zone.
|
||||
* zone->lock must be held before call this.
|
||||
*
|
||||
* Returns 1 if all pages in the range are isolated.
|
||||
* Returns the last tested pfn.
|
||||
*/
|
||||
static unsigned long
|
||||
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
|
||||
@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
|
||||
* now as a simple work-around, we use the next node for destination.
|
||||
*/
|
||||
if (PageHuge(page)) {
|
||||
nodemask_t src = nodemask_of_node(page_to_nid(page));
|
||||
nodemask_t dst;
|
||||
nodes_complement(dst, src);
|
||||
int node = next_online_node(page_to_nid(page));
|
||||
if (node == MAX_NUMNODES)
|
||||
node = first_online_node;
|
||||
return alloc_huge_page_node(page_hstate(compound_head(page)),
|
||||
next_node(page_to_nid(page), dst));
|
||||
node);
|
||||
}
|
||||
|
||||
if (PageHighMem(page))
|
||||
|
28
mm/rmap.c
28
mm/rmap.c
@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
static void percpu_flush_tlb_batch_pages(void *data)
|
||||
{
|
||||
/*
|
||||
* All TLB entries are flushed on the assumption that it is
|
||||
* cheaper to flush all TLBs and let them be refilled than
|
||||
* flushing individual PFNs. Note that we do not track mm's
|
||||
* to flush as that might simply be multiple full TLB flushes
|
||||
* for no gain.
|
||||
*/
|
||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
||||
flush_tlb_local();
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
|
||||
* important if a PTE was dirty when it was unmapped that it's flushed
|
||||
@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
|
||||
|
||||
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
|
||||
percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
|
||||
|
||||
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
|
||||
smp_call_function_many(&tlb_ubc->cpumask,
|
||||
percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
|
||||
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
|
||||
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
||||
local_flush_tlb();
|
||||
trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
|
||||
}
|
||||
|
||||
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
|
||||
flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
|
||||
cpumask_clear(&tlb_ubc->cpumask);
|
||||
tlb_ubc->flush_required = false;
|
||||
tlb_ubc->writable = false;
|
||||
|
Loading…
Reference in New Issue
Block a user