From f98782ddd31ac6f938386b79d8bd7aa7c8a78c50 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Mon, 29 Apr 2013 15:07:34 -0700 Subject: mm, vmalloc: iterate vmap_area_list in get_vmalloc_info() This patch is a preparatory step for removing vmlist entirely. For above purpose, we change iterating a vmap_list codes to iterating a vmap_area_list. It is somewhat trivial change, but just one thing should be noticed. vmlist is lack of information about some areas in vmalloc address space. For example, vm_map_ram() allocate area in vmalloc address space, but it doesn't make a link with vmlist. To provide full information about vmalloc address space is better idea, so we don't use va->vm and use vmap_area directly. This makes get_vmalloc_info() more precise. Signed-off-by: Joonsoo Kim Signed-off-by: Joonsoo Kim Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Atsushi Kumagai Cc: Chris Metcalf Cc: Dave Anderson Cc: Eric Biederman Cc: Guan Xuetao Cc: Ingo Molnar Cc: Vivek Goyal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 56 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 59aa328007b..aee1f61727a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2671,46 +2671,50 @@ module_init(proc_vmalloc_init); void get_vmalloc_info(struct vmalloc_info *vmi) { - struct vm_struct *vma; + struct vmap_area *va; unsigned long free_area_size; unsigned long prev_end; vmi->used = 0; + vmi->largest_chunk = 0; - if (!vmlist) { - vmi->largest_chunk = VMALLOC_TOTAL; - } else { - vmi->largest_chunk = 0; + prev_end = VMALLOC_START; - prev_end = VMALLOC_START; - - read_lock(&vmlist_lock); + spin_lock(&vmap_area_lock); - for (vma = vmlist; vma; vma = vma->next) { - unsigned long addr = (unsigned long) vma->addr; + if (list_empty(&vmap_area_list)) { + vmi->largest_chunk = VMALLOC_TOTAL; + goto out; + } - /* - * Some archs keep another range for modules in vmlist - */ - if (addr < VMALLOC_START) - continue; - if (addr >= VMALLOC_END) - break; + list_for_each_entry(va, &vmap_area_list, list) { + unsigned long addr = va->va_start; - vmi->used += vma->size; + /* + * Some archs keep another range for modules in vmalloc space + */ + if (addr < VMALLOC_START) + continue; + if (addr >= VMALLOC_END) + break; - free_area_size = addr - prev_end; - if (vmi->largest_chunk < free_area_size) - vmi->largest_chunk = free_area_size; + if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) + continue; - prev_end = vma->size + addr; - } + vmi->used += (va->va_end - va->va_start); - if (VMALLOC_END - prev_end > vmi->largest_chunk) - vmi->largest_chunk = VMALLOC_END - prev_end; + free_area_size = addr - prev_end; + if (vmi->largest_chunk < free_area_size) + vmi->largest_chunk = free_area_size; - read_unlock(&vmlist_lock); + prev_end = va->va_end; } + + if (VMALLOC_END - prev_end > vmi->largest_chunk) + vmi->largest_chunk = VMALLOC_END - prev_end; + +out: + spin_unlock(&vmap_area_lock); } #endif -- cgit v1.2.3