diff options
author | David Rientjes <rientjes@google.com> | 2008-04-28 02:12:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 08:58:20 -0700 |
commit | 1d0d2680a01c4f9e292ec6d4714884da939053a1 (patch) | |
tree | 1377ed40ec15ffecc584b308a671be47b5145db3 /mm/mempolicy.c | |
parent | 65d66fc02ed9433b957588071b60425b12628e25 (diff) | |
download | kernel-common-1d0d2680a01c4f9e292ec6d4714884da939053a1.tar.gz kernel-common-1d0d2680a01c4f9e292ec6d4714884da939053a1.tar.bz2 kernel-common-1d0d2680a01c4f9e292ec6d4714884da939053a1.zip |
mempolicy: move rebind functions
Move the mpol_rebind_{policy,task,mm}() functions after mpol_new() to avoid
having to declare function prototypes.
Cc: Paul Jackson <pj@sgi.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 185 |
1 files changed, 91 insertions, 94 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ffd3be66b255..d44c524e5ae4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -110,9 +110,6 @@ struct mempolicy default_policy = { .policy = MPOL_DEFAULT, }; -static void mpol_rebind_policy(struct mempolicy *pol, - const nodemask_t *newmask); - /* Check that the nodemask contains at least one populated zone */ static int is_valid_nodemask(nodemask_t *nodemask) { @@ -203,6 +200,97 @@ free: return ERR_PTR(-EINVAL); } +/* Migrate a policy to a different set of nodes */ +static void mpol_rebind_policy(struct mempolicy *pol, + const nodemask_t *newmask) +{ + nodemask_t tmp; + int static_nodes; + int relative_nodes; + + if (!pol) + return; + static_nodes = pol->flags & MPOL_F_STATIC_NODES; + relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES; + if (!mpol_store_user_nodemask(pol) && + nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) + return; + + switch (pol->policy) { + case MPOL_DEFAULT: + break; + case MPOL_BIND: + /* Fall through */ + case MPOL_INTERLEAVE: + if (static_nodes) + nodes_and(tmp, pol->w.user_nodemask, *newmask); + else if (relative_nodes) + mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, + newmask); + else { + nodes_remap(tmp, pol->v.nodes, + pol->w.cpuset_mems_allowed, *newmask); + pol->w.cpuset_mems_allowed = *newmask; + } + pol->v.nodes = tmp; + if (!node_isset(current->il_next, tmp)) { + current->il_next = next_node(current->il_next, tmp); + if (current->il_next >= MAX_NUMNODES) + current->il_next = first_node(tmp); + if (current->il_next >= MAX_NUMNODES) + current->il_next = numa_node_id(); + } + break; + case MPOL_PREFERRED: + if (static_nodes) { + int node = first_node(pol->w.user_nodemask); + + if (node_isset(node, *newmask)) + pol->v.preferred_node = node; + else + pol->v.preferred_node = -1; + } else if (relative_nodes) { + mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, + newmask); + pol->v.preferred_node = first_node(tmp); + } else { + pol->v.preferred_node = node_remap(pol->v.preferred_node, + pol->w.cpuset_mems_allowed, *newmask); + pol->w.cpuset_mems_allowed = *newmask; + } + break; + default: + BUG(); + break; + } +} + +/* + * Wrapper for mpol_rebind_policy() that just requires task + * pointer, and updates task mempolicy. + */ + +void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) +{ + mpol_rebind_policy(tsk->mempolicy, new); +} + +/* + * Rebind each vma in mm to new nodemask. + * + * Call holding a reference to mm. Takes mm->mmap_sem during call. + */ + +void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) +{ + struct vm_area_struct *vma; + + down_write(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) + mpol_rebind_policy(vma->vm_policy, new); + up_write(&mm->mmap_sem); +} + static void gather_stats(struct page *, void *, int pte_dirty); static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags); @@ -1757,97 +1845,6 @@ void numa_default_policy(void) do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } -/* Migrate a policy to a different set of nodes */ -static void mpol_rebind_policy(struct mempolicy *pol, - const nodemask_t *newmask) -{ - nodemask_t tmp; - int static_nodes; - int relative_nodes; - - if (!pol) - return; - static_nodes = pol->flags & MPOL_F_STATIC_NODES; - relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES; - if (!mpol_store_user_nodemask(pol) && - nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) - return; - - switch (pol->policy) { - case MPOL_DEFAULT: - break; - case MPOL_BIND: - /* Fall through */ - case MPOL_INTERLEAVE: - if (static_nodes) - nodes_and(tmp, pol->w.user_nodemask, *newmask); - else if (relative_nodes) - mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, - newmask); - else { - nodes_remap(tmp, pol->v.nodes, - pol->w.cpuset_mems_allowed, *newmask); - pol->w.cpuset_mems_allowed = *newmask; - } - pol->v.nodes = tmp; - if (!node_isset(current->il_next, tmp)) { - current->il_next = next_node(current->il_next, tmp); - if (current->il_next >= MAX_NUMNODES) - current->il_next = first_node(tmp); - if (current->il_next >= MAX_NUMNODES) - current->il_next = numa_node_id(); - } - break; - case MPOL_PREFERRED: - if (static_nodes) { - int node = first_node(pol->w.user_nodemask); - - if (node_isset(node, *newmask)) - pol->v.preferred_node = node; - else - pol->v.preferred_node = -1; - } else if (relative_nodes) { - mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, - newmask); - pol->v.preferred_node = first_node(tmp); - } else { - pol->v.preferred_node = node_remap(pol->v.preferred_node, - pol->w.cpuset_mems_allowed, *newmask); - pol->w.cpuset_mems_allowed = *newmask; - } - break; - default: - BUG(); - break; - } -} - -/* - * Wrapper for mpol_rebind_policy() that just requires task - * pointer, and updates task mempolicy. - */ - -void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) -{ - mpol_rebind_policy(tsk->mempolicy, new); -} - -/* - * Rebind each vma in mm to new nodemask. - * - * Call holding a reference to mm. Takes mm->mmap_sem during call. - */ - -void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) -{ - struct vm_area_struct *vma; - - down_write(&mm->mmap_sem); - for (vma = mm->mmap; vma; vma = vma->vm_next) - mpol_rebind_policy(vma->vm_policy, new); - up_write(&mm->mmap_sem); -} - /* * Display pages allocated per node and memory policy via /proc. */ |