diff options
author | Hugh Dickins <hughd@google.com> | 2014-08-06 16:06:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 18:01:19 -0700 |
commit | eb39d618f9e80f81cfc5788cf1b252d141c2f0c3 (patch) | |
tree | 96ae0e0408c4f27125904fe81c1f21b13df0dc76 /mm | |
parent | c2ea2181db43ced2e5945b9596bb3bb9935ce92e (diff) | |
download | linux-exynos-eb39d618f9e80f81cfc5788cf1b252d141c2f0c3.tar.gz linux-exynos-eb39d618f9e80f81cfc5788cf1b252d141c2f0c3.tar.bz2 linux-exynos-eb39d618f9e80f81cfc5788cf1b252d141c2f0c3.zip |
mm: replace init_page_accessed by __SetPageReferenced
Do we really need an exported alias for __SetPageReferenced()? Its
callers better know what they're doing, in which case the page would not
be already marked referenced. Kill init_page_accessed(), just
__SetPageReferenced() inline.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Prabhakar Lad <prabhakar.csengg@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 14 |
3 files changed, 6 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 65d44fd88c78..7e85c8147e1b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1091,9 +1091,9 @@ no_page: if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) fgp_flags |= FGP_LOCK; - /* Init accessed so avoit atomic mark_page_accessed later */ + /* Init accessed so avoid atomic mark_page_accessed later */ if (fgp_flags & FGP_ACCESSED) - init_page_accessed(page); + __SetPageReferenced(page); err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); if (unlikely(err)) { diff --git a/mm/shmem.c b/mm/shmem.c index 57fd82a5af7a..fe15d96c3166 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1166,7 +1166,7 @@ repeat: __SetPageSwapBacked(page); __set_page_locked(page); if (sgp == SGP_WRITE) - init_page_accessed(page); + __SetPageReferenced(page); error = mem_cgroup_charge_file(page, current->mm, gfp & GFP_RECLAIM_MASK); diff --git a/mm/swap.c b/mm/swap.c index 9e8e3472248b..d8eb4d09ffa2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -589,6 +589,9 @@ static void __lru_cache_activate_page(struct page *page) * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced + * + * When a newly allocated page is not yet visible, so safe for non-atomic ops, + * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). */ void mark_page_accessed(struct page *page) { @@ -614,17 +617,6 @@ void mark_page_accessed(struct page *page) } EXPORT_SYMBOL(mark_page_accessed); -/* - * Used to mark_page_accessed(page) that is not visible yet and when it is - * still safe to use non-atomic ops - */ -void init_page_accessed(struct page *page) -{ - if (!PageReferenced(page)) - __SetPageReferenced(page); -} -EXPORT_SYMBOL(init_page_accessed); - static void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |