From 6aab341e0a28aff100a09831c5300a2994b8b986 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 28 Nov 2005 14:34:23 -0800 Subject: mm: re-architect the VM_UNPAGED logic This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9ab97cef0da..50bd5a8f044 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -402,12 +402,11 @@ struct numa_maps { /* * Calculate numa node maps for a vma */ -static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) +static struct numa_maps *get_numa_maps(struct vm_area_struct *vma) { + int i; struct page *page; unsigned long vaddr; - struct mm_struct *mm = vma->vm_mm; - int i; struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); if (!md) @@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) md->node[i] =0; for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { - page = follow_page(mm, vaddr, 0); + page = follow_page(vma, vaddr, 0); if (page) { int count = page_mapcount(page); -- cgit v1.2.3