diff options
author | Lorenzo Stoakes <lstoakes@gmail.com> | 2016-10-13 01:20:12 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-12-17 21:55:16 +0100 |
commit | ab424c8eb71ee9ea4ba798faaeaf62e84048cb9a (patch) | |
tree | 24ed72e49c33b6aade395525b9710f19cf96984d | |
parent | 6a3c9524df93e9597d30c2d32114950772fce58e (diff) | |
download | linux-artik7-ab424c8eb71ee9ea4ba798faaeaf62e84048cb9a.tar.gz linux-artik7-ab424c8eb71ee9ea4ba798faaeaf62e84048cb9a.tar.bz2 linux-artik7-ab424c8eb71ee9ea4ba798faaeaf62e84048cb9a.zip |
mm: remove write/force parameters from __get_user_pages_unlocked()
commit d4944b0ecec0af882483fe44b66729316e575208 upstream.
This removes the redundant 'write' and 'force' parameters from
__get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in
callers as use of this flag can result in surprising behaviour (and
hence bugs) within the mm subsystem.
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 4.4:
- Defer changes in process_vm_rw_single_vec() and async_pf_execute() since
they use get_user_pages_unlocked() here
- Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | include/linux/mm.h | 3 | ||||
-rw-r--r-- | mm/gup.c | 19 | ||||
-rw-r--r-- | mm/nommu.c | 14 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 11 |
4 files changed, 29 insertions, 18 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index d4e8077fca96..391ae8a3d5f9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1207,8 +1207,7 @@ long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, int *locked); long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - unsigned int gup_flags); + struct page **pages, unsigned int gup_flags); long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages); @@ -764,17 +764,11 @@ EXPORT_SYMBOL(get_user_pages_locked); */ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - unsigned int gup_flags) + struct page **pages, unsigned int gup_flags) { long ret; int locked = 1; - if (write) - gup_flags |= FOLL_WRITE; - if (force) - gup_flags |= FOLL_FORCE; - down_read(&mm->mmap_sem); ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, &locked, false, gup_flags); @@ -805,8 +799,15 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { - return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, - force, pages, FOLL_TOUCH); + unsigned int flags = FOLL_TOUCH; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return __get_user_pages_unlocked(tsk, mm, start, nr_pages, + pages, flags); } EXPORT_SYMBOL(get_user_pages_unlocked); diff --git a/mm/nommu.c b/mm/nommu.c index 92be862c859b..5462ed88a548 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -211,8 +211,7 @@ EXPORT_SYMBOL(get_user_pages_locked); long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, - int write, int force, struct page **pages, - unsigned int gup_flags) + struct page **pages, unsigned int gup_flags) { long ret; down_read(&mm->mmap_sem); @@ -227,8 +226,15 @@ long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { - return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, - force, pages, 0); + unsigned int flags = 0; + + if (write) + flags |= FOLL_WRITE; + if (force) + flags |= FOLL_FORCE; + + return __get_user_pages_unlocked(tsk, mm, start, nr_pages, + pages, flags); } EXPORT_SYMBOL(get_user_pages_unlocked); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b814ae6822b6..e4be695eb789 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1352,10 +1352,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, npages = get_user_page_nowait(current, current->mm, addr, write_fault, page); up_read(¤t->mm->mmap_sem); - } else + } else { + unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; + + if (write_fault) + flags |= FOLL_WRITE; + npages = __get_user_pages_unlocked(current, current->mm, addr, 1, - write_fault, 0, page, - FOLL_TOUCH|FOLL_HWPOISON); + page, flags); + } if (npages != 1) return npages; |