mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 00:32:00 +00:00
19fbad905e
Convert mm_lock_seq to be seqcount_t and change all mmap_write_lock variants to increment it, in-line with the usual seqcount usage pattern. This lets us check whether the mmap_lock is write-locked by checking mm_lock_seq.sequence counter (odd=locked, even=unlocked). This will be used when implementing mmap_lock speculation functions. As a result vm_lock_seq is also change to be unsigned to match the type of mm_lock_seq.sequence. Link: https://lkml.kernel.org/r/20241122174416.1367052-2-surenb@google.com Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Liam R. Howlett <Liam.Howlett@Oracle.com> Cc: Christian Brauner <brauner@kernel.org> Cc: David Hildenbrand <david@redhat.com> Cc: David Howells <dhowells@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Hillf Danton <hdanton@sina.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jann Horn <jannh@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Mateusz Guzik <mjguzik@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Peter Xu <peterx@redhat.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Sourav Panda <souravpanda@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
58 lines
1.8 KiB
C
58 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/mm_types.h>
|
|
#include <linux/maple_tree.h>
|
|
#include <linux/rwsem.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/list.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/pgtable.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/user_namespace.h>
|
|
#include <linux/iommu.h>
|
|
#include <asm/mmu.h>
|
|
|
|
#ifndef INIT_MM_CONTEXT
|
|
#define INIT_MM_CONTEXT(name)
|
|
#endif
|
|
|
|
const struct vm_operations_struct vma_dummy_vm_ops;
|
|
|
|
/*
|
|
* For dynamically allocated mm_structs, there is a dynamically sized cpumask
|
|
* at the end of the structure, the size of which depends on the maximum CPU
|
|
* number the system can see. That way we allocate only as much memory for
|
|
* mm_cpumask() as needed for the hundreds, or thousands of processes that
|
|
* a system typically runs.
|
|
*
|
|
* Since there is only one init_mm in the entire system, keep it simple
|
|
* and size this cpu_bitmask to NR_CPUS.
|
|
*/
|
|
struct mm_struct init_mm = {
|
|
.mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
|
.pgd = swapper_pg_dir,
|
|
.mm_users = ATOMIC_INIT(2),
|
|
.mm_count = ATOMIC_INIT(1),
|
|
.write_protect_seq = SEQCNT_ZERO(init_mm.write_protect_seq),
|
|
MMAP_LOCK_INITIALIZER(init_mm)
|
|
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
|
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
|
|
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
|
|
#ifdef CONFIG_PER_VMA_LOCK
|
|
.mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq),
|
|
#endif
|
|
.user_ns = &init_user_ns,
|
|
.cpu_bitmap = CPU_BITS_NONE,
|
|
INIT_MM_CONTEXT(init_mm)
|
|
};
|
|
|
|
void setup_initial_init_mm(void *start_code, void *end_code,
|
|
void *end_data, void *brk)
|
|
{
|
|
init_mm.start_code = (unsigned long)start_code;
|
|
init_mm.end_code = (unsigned long)end_code;
|
|
init_mm.end_data = (unsigned long)end_data;
|
|
init_mm.brk = (unsigned long)brk;
|
|
}
|