diff options
author | Baolin Wang <baolin.wang@linux.alibaba.com> | 2022-06-20 19:47:15 +0800 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-07-03 18:08:49 -0700 |
commit | 0506c31d0a8443a9f55eb69d81db426f2eb3296e (patch) | |
tree | 625213fe266ba3572e6e09ed3855c7153e416803 | |
parent | f7cc67ae7f6221abe57857bd6efd21e06a05bd45 (diff) | |
download | linux-rpi-0506c31d0a8443a9f55eb69d81db426f2eb3296e.tar.gz linux-rpi-0506c31d0a8443a9f55eb69d81db426f2eb3296e.tar.bz2 linux-rpi-0506c31d0a8443a9f55eb69d81db426f2eb3296e.zip |
mm: rmap: simplify the hugetlb handling when unmapping or migration
According to previous discussion [1], there are so many levels of
indenting to handle the hugetlb case when unmapping or migration. We can
combine folio_test_anon() and huge_pmd_unshare() to save one level of
indenting, by adding a local variable and moving the VM_BUG_ON() a little
forward.
No intended functional changes in this patch.
[1] https://lore.kernel.org/all/0b986dc4-5843-3e2d-c2df-5a2e9f13e6ab@oracle.com/
Link: https://lkml.kernel.org/r/28414b1b96f095e838c1e548074f8e0fc70d78cf.1655724713.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/rmap.c | 90 |
1 files changed, 44 insertions, 46 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 65e0a767b837..56134cdc5ca3 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { + bool anon = folio_test_anon(folio); + /* * The try_to_unmap() is only passed a hugetlb page * in the case where the hugetlb page is poisoned. @@ -1551,31 +1553,28 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, */ flush_cache_range(vma, range.start, range.end); - if (!folio_test_anon(folio)) { + /* + * To call huge_pmd_unshare, i_mmap_rwsem must be + * held in write mode. Caller needs to explicitly + * do this outside rmap routines. + */ + VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); + if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { + flush_tlb_range(vma, range.start, range.end); + mmu_notifier_invalidate_range(mm, range.start, + range.end); + /* - * To call huge_pmd_unshare, i_mmap_rwsem must be - * held in write mode. Caller needs to explicitly - * do this outside rmap routines. + * The ref count of the PMD page was dropped + * which is part of the way map counting + * is done for shared PMDs. Return 'true' + * here. When there is no other sharing, + * huge_pmd_unshare returns false and we will + * unmap the actual page and drop map count + * to zero. */ - VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); - - if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { - flush_tlb_range(vma, range.start, range.end); - mmu_notifier_invalidate_range(mm, range.start, - range.end); - - /* - * The ref count of the PMD page was dropped - * which is part of the way map counting - * is done for shared PMDs. Return 'true' - * here. When there is no other sharing, - * huge_pmd_unshare returns false and we will - * unmap the actual page and drop map count - * to zero. - */ - page_vma_mapped_walk_done(&pvmw); - break; - } + page_vma_mapped_walk_done(&pvmw); + break; } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { @@ -1906,6 +1905,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { + bool anon = folio_test_anon(folio); + /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may @@ -1915,31 +1916,28 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ flush_cache_range(vma, range.start, range.end); - if (!folio_test_anon(folio)) { + /* + * To call huge_pmd_unshare, i_mmap_rwsem must be + * held in write mode. Caller needs to explicitly + * do this outside rmap routines. + */ + VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED)); + if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { + flush_tlb_range(vma, range.start, range.end); + mmu_notifier_invalidate_range(mm, range.start, + range.end); + /* - * To call huge_pmd_unshare, i_mmap_rwsem must be - * held in write mode. Caller needs to explicitly - * do this outside rmap routines. + * The ref count of the PMD page was dropped + * which is part of the way map counting + * is done for shared PMDs. Return 'true' + * here. When there is no other sharing, + * huge_pmd_unshare returns false and we will + * unmap the actual page and drop map count + * to zero. */ - VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); - - if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { - flush_tlb_range(vma, range.start, range.end); - mmu_notifier_invalidate_range(mm, range.start, - range.end); - - /* - * The ref count of the PMD page was dropped - * which is part of the way map counting - * is done for shared PMDs. Return 'true' - * here. When there is no other sharing, - * huge_pmd_unshare returns false and we will - * unmap the actual page and drop map count - * to zero. - */ - page_vma_mapped_walk_done(&pvmw); - break; - } + page_vma_mapped_walk_done(&pvmw); + break; } /* Nuke the hugetlb page table entry */ |