[PATCH] Swap Migration V5: sys_migrate_pages interface

sys_migrate_pages implementation using swap based page migration

This is the original API proposed by Ray Bryant in his posts during the first
half of 2005 on linux-mm@kvack.org and linux-kernel@vger.kernel.org.

The intent of sys_migrate is to migrate memory of a process.  A process may
have migrated to another node.  Memory was allocated optimally for the prior
context.  sys_migrate_pages allows to shift the memory to the new node.

sys_migrate_pages is also useful if the processes available memory nodes have
changed through cpuset operations to manually move the processes memory.  Paul
Jackson is working on an automated mechanism that will allow an automatic
migration if the cpuset of a process is changed.  However, a user may decide
to manually control the migration.

This implementation is put into the policy layer since it uses concepts and
functions that are also needed for mbind and friends.  The patch also provides
a do_migrate_pages function that may be useful for cpusets to automatically
move memory.  sys_migrate_pages does not modify policies in contrast to Ray's
implementation.

The current code here is based on the swap based page migration capability and
thus is not able to preserve the physical layout relative to it containing
nodeset (which may be a cpuset).  When direct page migration becomes available
then the implementation needs to be changed to do a isomorphic move of pages
between different nodesets.  The current implementation simply evicts all
pages in source nodeset that are not in the target nodeset.

Patch supports ia64, i386 and x86_64.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-01-08 01:00:51 -08:00 committed by Linus Torvalds
parent dc9aa5b9d6
commit 39743889aa
11 changed files with 111 additions and 5 deletions

View File

@ -293,3 +293,4 @@ ENTRY(sys_call_table)
.long sys_inotify_init .long sys_inotify_init
.long sys_inotify_add_watch .long sys_inotify_add_watch
.long sys_inotify_rm_watch .long sys_inotify_rm_watch
.long sys_migrate_pages

View File

@ -1600,5 +1600,6 @@ sys_call_table:
data8 sys_inotify_init data8 sys_inotify_init
data8 sys_inotify_add_watch data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch data8 sys_inotify_rm_watch
data8 sys_migrate_pages // 1280
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls

View File

@ -643,6 +643,7 @@ ia32_sys_call_table:
.quad sys_inotify_init .quad sys_inotify_init
.quad sys_inotify_add_watch .quad sys_inotify_add_watch
.quad sys_inotify_rm_watch .quad sys_inotify_rm_watch
.quad sys_migrate_pages
ia32_syscall_end: ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall .quad ni_syscall

View File

@ -299,8 +299,9 @@
#define __NR_inotify_init 291 #define __NR_inotify_init 291
#define __NR_inotify_add_watch 292 #define __NR_inotify_add_watch 292
#define __NR_inotify_rm_watch 293 #define __NR_inotify_rm_watch 293
#define __NR_migrate_pages 294
#define NR_syscalls 294 #define NR_syscalls 295
/* /*
* user-visible error numbers are in the range -1 - -128: see * user-visible error numbers are in the range -1 - -128: see

View File

@ -269,12 +269,13 @@
#define __NR_inotify_init 1277 #define __NR_inotify_init 1277
#define __NR_inotify_add_watch 1278 #define __NR_inotify_add_watch 1278
#define __NR_inotify_rm_watch 1279 #define __NR_inotify_rm_watch 1279
#define __NR_migrate_pages 1280
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/config.h> #include <linux/config.h>
#define NR_syscalls 256 /* length of syscall table */ #define NR_syscalls 270 /* length of syscall table */
#define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGACTION

View File

@ -299,7 +299,8 @@
#define __NR_ia32_inotify_init 291 #define __NR_ia32_inotify_init 291
#define __NR_ia32_inotify_add_watch 292 #define __NR_ia32_inotify_add_watch 292
#define __NR_ia32_inotify_rm_watch 293 #define __NR_ia32_inotify_rm_watch 293
#define __NR_ia32_migrate_pages 294
#define IA32_NR_syscalls 294 /* must be > than biggest syscall! */ #define IA32_NR_syscalls 295 /* must be > than biggest syscall! */
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */ #endif /* _ASM_X86_64_IA32_UNISTD_H_ */

View File

@ -571,8 +571,10 @@ __SYSCALL(__NR_inotify_init, sys_inotify_init)
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 255 #define __NR_inotify_rm_watch 255
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
#define __NR_migrate_pages 256
__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_syscall_max __NR_inotify_rm_watch #define __NR_syscall_max __NR_migrate_pages
#ifndef __NO_STUBS #ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */ /* user-visible error numbers are in the range -1 - -4095 */

View File

@ -162,6 +162,9 @@ static inline void check_highest_zone(int k)
policy_zone = k; policy_zone = k;
} }
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
#else #else
struct mempolicy {}; struct mempolicy {};

View File

@ -511,5 +511,7 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who); asmlinkage long sys_ioprio_get(int which, int who);
asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
unsigned long maxnode); unsigned long maxnode);
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *from, const unsigned long __user *to);
#endif #endif

View File

@ -82,6 +82,7 @@ cond_syscall(compat_sys_socketcall);
cond_syscall(sys_inotify_init); cond_syscall(sys_inotify_init);
cond_syscall(sys_inotify_add_watch); cond_syscall(sys_inotify_add_watch);
cond_syscall(sys_inotify_rm_watch); cond_syscall(sys_inotify_rm_watch);
cond_syscall(sys_migrate_pages);
/* arch-specific weak syscall entries */ /* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read); cond_syscall(sys_pciconfig_read);

View File

@ -614,12 +614,42 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
return err; return err;
} }
/*
* For now migrate_pages simply swaps out the pages from nodes that are in
* the source set but not in the target set. In the future, we would
* want a function that moves pages between the two nodesets in such
* a way as to preserve the physical layout as much as possible.
*
* Returns the number of page that could not be moved.
*/
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
{
LIST_HEAD(pagelist);
int count = 0;
nodemask_t nodes;
nodes_andnot(nodes, *from_nodes, *to_nodes);
nodes_complement(nodes, nodes);
down_read(&mm->mmap_sem);
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
migrate_pages(&pagelist, NULL);
if (!list_empty(&pagelist))
count = putback_lru_pages(&pagelist);
}
up_read(&mm->mmap_sem);
return count;
}
/* /*
* User space interface with variable sized bitmaps for nodelists. * User space interface with variable sized bitmaps for nodelists.
*/ */
/* Copy a node mask from user space. */ /* Copy a node mask from user space. */
static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask, static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode) unsigned long maxnode)
{ {
unsigned long k; unsigned long k;
@ -708,6 +738,68 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
return do_set_mempolicy(mode, &nodes); return do_set_mempolicy(mode, &nodes);
} }
/* Macro needed until Paul implements this function in kernel/cpusets.c */
#define cpuset_mems_allowed(task) node_online_map
asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
const unsigned long __user *old_nodes,
const unsigned long __user *new_nodes)
{
struct mm_struct *mm;
struct task_struct *task;
nodemask_t old;
nodemask_t new;
nodemask_t task_nodes;
int err;
err = get_nodes(&old, old_nodes, maxnode);
if (err)
return err;
err = get_nodes(&new, new_nodes, maxnode);
if (err)
return err;
/* Find the mm_struct */
read_lock(&tasklist_lock);
task = pid ? find_task_by_pid(pid) : current;
if (!task) {
read_unlock(&tasklist_lock);
return -ESRCH;
}
mm = get_task_mm(task);
read_unlock(&tasklist_lock);
if (!mm)
return -EINVAL;
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
* capabilities, superuser priviledges or the same
* userid as the target process.
*/
if ((current->euid != task->suid) && (current->euid != task->uid) &&
(current->uid != task->suid) && (current->uid != task->uid) &&
!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto out;
}
task_nodes = cpuset_mems_allowed(task);
/* Is the user allowed to access the target nodes? */
if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto out;
}
err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE);
out:
mmput(mm);
return err;
}
/* Retrieve NUMA policy */ /* Retrieve NUMA policy */
asmlinkage long sys_get_mempolicy(int __user *policy, asmlinkage long sys_get_mempolicy(int __user *policy,
unsigned long __user *nmask, unsigned long __user *nmask,