From af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 14 Dec 2009 17:58:59 -0800 Subject: mm: CONFIG_MMU for PG_mlocked Remove three degrees of obfuscation, left over from when we had CONFIG_UNEVICTABLE_LRU. MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is CONFIG_HAVE_MLOCK is CONFIG_MMU. rmap.o (and memory-failure.o) are only built when CONFIG_MMU, so don't need such conditions at all. Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from 169 defconfigs: leave those to evolve in due course. Signed-off-by: Hugh Dickins Cc: Izik Eidus Cc: Andrea Arcangeli Cc: Nick Piggin Reviewed-by: KOSAKI Motohiro Cc: Rik van Riel Cc: Lee Schermerhorn Cc: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Wu Fengguang Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/internal.h | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'mm/internal.h') diff --git a/mm/internal.h b/mm/internal.h index 22ec8d2b0fb..cb7d92d0a46 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page) return page_private(page); } -#ifdef CONFIG_HAVE_MLOCK -extern long mlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -extern void munlock_vma_pages_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -static inline void munlock_vma_pages_all(struct vm_area_struct *vma) -{ - munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); -} -#endif - /* * unevictable_migrate_page() called only from migrate_page_copy() to * migrate unevictable flag to new page. @@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old) SetPageUnevictable(new); } -#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT +#ifdef CONFIG_MMU +extern long mlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +extern void munlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static inline void munlock_vma_pages_all(struct vm_area_struct *vma) +{ + munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); +} + /* * Called only in fault path via page_evictable() for a new page * to determine if it's being mapped into a LOCKED vma. @@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) } } -#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#else /* !CONFIG_MMU */ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) { return 0; @@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } static inline void mlock_migrate_page(struct page *new, struct page *old) { } -#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ +#endif /* !CONFIG_MMU */ /* * Return the mem_map entry representing the 'offset' subpage within -- cgit v1.2.3