summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 17:29:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 17:29:08 -0700
commit73154383f02998fdd6a1f26c7ef33bfc3785a101 (patch)
tree85a4c10cf32172b99aed01e95ded7269afcc9d7d /include
parent362ed48dee509abe24cf84b7e137c7a29a8f4d2d (diff)
parentca0dde97178e75ed1370b8616326f5496a803d65 (diff)
downloadlinux-3.10-73154383f02998fdd6a1f26c7ef33bfc3785a101.tar.gz
linux-3.10-73154383f02998fdd6a1f26c7ef33bfc3785a101.tar.bz2
linux-3.10-73154383f02998fdd6a1f26c7ef33bfc3785a101.zip
Merge branch 'akpm' (incoming from Andrew)
Merge first batch of fixes from Andrew Morton: - A couple of kthread changes - A few minor audit patches - A number of fbdev patches. Florian remains AWOL so I'm picking up some of these. - A few kbuild things - ocfs2 updates - Almost all of the MM queue (And in the meantime, I already have the second big batch from Andrew pending in my mailbox ;^) * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (149 commits) memcg: take reference before releasing rcu_read_lock mem hotunplug: fix kfree() of bootmem memory mmKconfig: add an option to disable bounce mm, nobootmem: do memset() after memblock_reserve() mm, nobootmem: clean-up of free_low_memory_core_early() fs/buffer.c: remove unnecessary init operation after allocating buffer_head. numa, cpu hotplug: change links of CPU and node when changing node number by onlining CPU mm: fix memory_hotplug.c printk format warning mm: swap: mark swap pages writeback before queueing for direct IO swap: redirty page if page write fails on swap file mm, memcg: give exiting processes access to memory reserves thp: fix huge zero page logic for page with pfn == 0 memcg: avoid accessing memcg after releasing reference fs: fix fsync() error reporting memblock: fix missing comment of memblock_insert_region() mm: Remove unused parameter of pages_correctly_reserved() firmware, memmap: fix firmware_map_entry leak mm/vmstat: add note on safety of drain_zonestat mm: thp: add split tail pages to shrink page list in page reclaim mm: allow for outstanding swap writeback accounting ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/hugetlb.h40
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cgroup.h7
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/huge_mm.h11
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/hyperv.h11
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/memory.h17
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/mm.h79
-rw-r--r--include/linux/notifier.h5
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/ramfs.h8
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/vmalloc.h34
-rw-r--r--include/linux/vmpressure.h47
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--include/trace/events/filemap.h58
-rw-r--r--include/uapi/linux/fs.h1
23 files changed, 328 insertions, 38 deletions
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
new file mode 100644
index 00000000000..d06079c774a
--- /dev/null
+++ b/include/asm-generic/hugetlb.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_GENERIC_HUGETLB_H
+#define _ASM_GENERIC_HUGETLB_H
+
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+{
+ return mk_pte(page, pgprot);
+}
+
+static inline int huge_pte_write(pte_t pte)
+{
+ return pte_write(pte);
+}
+
+static inline int huge_pte_dirty(pte_t pte)
+{
+ return pte_dirty(pte);
+}
+
+static inline pte_t huge_pte_mkwrite(pte_t pte)
+{
+ return pte_mkwrite(pte);
+}
+
+static inline pte_t huge_pte_mkdirty(pte_t pte)
+{
+ return pte_mkdirty(pte);
+}
+
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return pte_modify(pte, newprot);
+}
+
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_clear(mm, addr, ptep);
+}
+
+#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index bfd87685fc1..a59ff51b016 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -7,6 +7,16 @@
#include <linux/mm_types.h>
#include <linux/bug.h>
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cdf11191e64..22990cf4439 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -111,12 +111,13 @@ struct bio {
#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
#define BIO_QUIET 10 /* Make BIO Quiet */
#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
+#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
/*
* Flags starting here get preserved by bio_reset() - this includes
* BIO_POOL_IDX()
*/
-#define BIO_RESET_BITS 12
+#define BIO_RESET_BITS 13
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index cdc3bab0183..5f0b0e1f7c0 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -44,7 +44,6 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
-extern unsigned long free_low_memory_core_early(int nodeid);
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
extern unsigned long free_all_bootmem(void);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 5afc4f94d11..4c16c4a88d4 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -181,6 +181,7 @@ void ll_rw_block(int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
void write_dirty_buffer(struct buffer_head *bh, int rw);
+int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 900af5964f5..470073bf93d 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -687,13 +687,6 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
-/*
- * Get a cgroup whose id is greater than or equal to id under tree of root.
- * Returning a cgroup_subsys_state or NULL.
- */
-struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
- struct cgroup_subsys_state *root, int *foundid);
-
/* Returns true if root is ancestor of cg */
bool css_is_ancestor(struct cgroup_subsys_state *cg,
const struct cgroup_subsys_state *root);
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 3bd46f76675..21ca773f77b 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -27,7 +27,7 @@ extern int debug_locks_off(void);
\
if (!oops_in_progress && unlikely(c)) { \
if (debug_locks_off() && !debug_locks_silent) \
- WARN_ON(1); \
+ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
__ret = 1; \
} \
__ret; \
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ee1c244a62a..528454c2caa 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -99,7 +99,11 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
extern int handle_pte_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
pte_t *pte, pmd_t *pmd, unsigned int flags);
-extern int split_huge_page(struct page *page);
+extern int split_huge_page_to_list(struct page *page, struct list_head *list);
+static inline int split_huge_page(struct page *page)
+{
+ return split_huge_page_to_list(page, NULL);
+}
extern void __split_huge_page_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd);
#define split_huge_page_pmd(__vma, __address, __pmd) \
@@ -186,6 +190,11 @@ extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vm
#define transparent_hugepage_enabled(__vma) 0
#define transparent_hugepage_flags 0UL
+static inline int
+split_huge_page_to_list(struct page *page, struct list_head *list)
+{
+ return 0;
+}
static inline int split_huge_page(struct page *page)
{
return 0;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 16e4e9a643f..3a62df310f2 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -58,6 +58,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
+void hugetlb_show_meminfo(void);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
@@ -114,6 +115,9 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
#define hugetlb_report_node_meminfo(n, buf) 0
+static inline void hugetlb_show_meminfo(void)
+{
+}
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define follow_huge_pud(mm, addr, pud, write) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 95d0850584d..c2559847d7e 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1318,6 +1318,17 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver);
0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \
}
/*
+ * Synthetic Video GUID
+ * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
+ */
+#define HV_SYNTHVID_GUID \
+ .guid = { \
+ 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \
+ 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \
+ }
+
+
+/*
* Common header for Hyper-V ICs
*/
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 85ac9b9b72a..89b7c24a36e 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -192,6 +192,10 @@ extern struct resource * __request_region(struct resource *,
extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern int release_mem_region_adjustable(struct resource *, resource_size_t,
+ resource_size_t);
+#endif
static inline int __deprecated check_region(resource_size_t s,
resource_size_t n)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 45e93b46887..73817af8b48 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -18,6 +18,7 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
+#include <linux/notifier.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
@@ -114,9 +115,10 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
+#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
+#endif
extern int memory_dev_init(void);
-extern int remove_memory_block(unsigned long, struct mem_section *, int);
extern int memory_notify(unsigned long val, void *v);
extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block_hinted(struct mem_section *,
@@ -127,13 +129,18 @@ enum mem_add_context { BOOT, HOTPLUG };
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#ifdef CONFIG_MEMORY_HOTPLUG
-#define hotplug_memory_notifier(fn, pri) { \
+#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
- { .notifier_call = fn, .priority = pri }; \
+ { .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
-}
+})
+#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
+#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
#else
-#define hotplug_memory_notifier(fn, pri) do { } while (0)
+#define hotplug_memory_notifier(fn, pri) (0)
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
#endif
/*
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index b6a3be7d47b..3e622c61092 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -97,13 +97,13 @@ extern void __online_page_free(struct page *page);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
extern int arch_remove_memory(u64 start, u64 size);
+extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
#endif /* CONFIG_MEMORY_HOTREMOVE */
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
-extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
#ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2091b88d24..c05d7cfbb6b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -44,6 +44,9 @@ extern int sysctl_legacy_va_layout;
#include <asm/pgtable.h>
#include <asm/processor.h>
+extern unsigned long sysctl_user_reserve_kbytes;
+extern unsigned long sysctl_admin_reserve_kbytes;
+
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
/* to align the pointer to the (next) page boundary */
@@ -899,7 +902,8 @@ extern void pagefault_out_of_memory(void);
* Flags passed to show_mem() and show_free_areas() to suppress output in
* various contexts.
*/
-#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
+#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
+#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
@@ -1294,6 +1298,61 @@ extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);
+/*
+ * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
+ * into the buddy system. The freed pages will be poisoned with pattern
+ * "poison" if it's non-zero.
+ * Return pages freed into the buddy system.
+ */
+extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
+ int poison, char *s);
+#ifdef CONFIG_HIGHMEM
+/*
+ * Free a highmem page into the buddy system, adjusting totalhigh_pages
+ * and totalram_pages.
+ */
+extern void free_highmem_page(struct page *page);
+#endif
+
+static inline void adjust_managed_page_count(struct page *page, long count)
+{
+ totalram_pages += count;
+}
+
+/* Free the reserved page into the buddy system, so it gets managed. */
+static inline void __free_reserved_page(struct page *page)
+{
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+}
+
+static inline void free_reserved_page(struct page *page)
+{
+ __free_reserved_page(page);
+ adjust_managed_page_count(page, 1);
+}
+
+static inline void mark_page_reserved(struct page *page)
+{
+ SetPageReserved(page);
+ adjust_managed_page_count(page, -1);
+}
+
+/*
+ * Default method to free all the __init memory into the buddy system.
+ * The freed pages will be poisoned with pattern "poison" if it is
+ * non-zero. Return pages freed into the buddy system.
+ */
+static inline unsigned long free_initmem_default(int poison)
+{
+ extern char __init_begin[], __init_end[];
+
+ return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
+ ((unsigned long)&__init_end) & PAGE_MASK,
+ poison, "unused kernel");
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
* With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
@@ -1675,8 +1734,12 @@ int in_gate_area_no_mm(unsigned long addr);
#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
#endif /* __HAVE_ARCH_GATE_AREA */
+#ifdef CONFIG_SYSCTL
+extern int sysctl_drop_caches;
int drop_caches_sysctl_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+#endif
+
unsigned long shrink_slab(struct shrink_control *shrink,
unsigned long nr_pages_scanned,
unsigned long lru_pages);
@@ -1704,12 +1767,12 @@ pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
void *vmemmap_alloc_block_buf(unsigned long size, int node);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
-int vmemmap_populate_basepages(struct page *start_page,
- unsigned long pages, int node);
-int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+int vmemmap_populate_basepages(unsigned long start, unsigned long end,
+ int node);
+int vmemmap_populate(unsigned long start, unsigned long end, int node);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
-void vmemmap_free(struct page *memmap, unsigned long nr_pages);
+void vmemmap_free(unsigned long start, unsigned long end);
#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
unsigned long size);
@@ -1756,5 +1819,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool page_is_guard(struct page *page) { return false; }
#endif /* CONFIG_DEBUG_PAGEALLOC */
+#if MAX_NUMNODES > 1
+void __init setup_nr_node_ids(void);
+#else
+static inline void setup_nr_node_ids(void) {}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index d65746efc95..d14a4c36246 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -47,8 +47,11 @@
* runtime initialization.
*/
+typedef int (*notifier_fn_t)(struct notifier_block *nb,
+ unsigned long action, void *data);
+
struct notifier_block {
- int (*notifier_call)(struct notifier_block *, unsigned long, void *);
+ notifier_fn_t notifier_call;
struct notifier_block __rcu *next;
int priority;
};
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0e38e13eb24..e3dea75a078 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -149,7 +149,7 @@ static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
-#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
+#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 5bf5500db83..69e37c2d1ea 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -6,7 +6,13 @@ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
-#ifndef CONFIG_MMU
+#ifdef CONFIG_MMU
+static inline int
+ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+{
+ return 0;
+}
+#else
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long addr,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2818a123f3e..1701ce4be74 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -236,7 +236,7 @@ extern unsigned long nr_free_pagecache_pages(void);
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
- struct lruvec *lruvec);
+ struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
@@ -330,6 +330,9 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
+extern void end_swap_bio_write(struct bio *bio, int err);
+extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
+ void (*end_write_func)(struct bio *, int));
extern int swap_set_page_dirty(struct page *page);
extern void end_swap_bio_read(struct bio *bio, int err);
@@ -343,8 +346,9 @@ extern struct address_space swapper_spaces[];
#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *);
+extern int add_to_swap(struct page *, struct list_head *list);
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
+extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
@@ -461,7 +465,7 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}
-static inline int add_to_swap(struct page *page)
+static inline int add_to_swap(struct page *page, struct list_head *list)
{
return 0;
}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 6071e911c7f..7d5773a99f2 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -3,7 +3,9 @@
#include <linux/spinlock.h>
#include <linux/init.h>
+#include <linux/list.h>
#include <asm/page.h> /* pgprot_t */
+#include <linux/rbtree.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -35,6 +37,17 @@ struct vm_struct {
const void *caller;
};
+struct vmap_area {
+ unsigned long va_start;
+ unsigned long va_end;
+ unsigned long flags;
+ struct rb_node rb_node; /* address sorted rbtree */
+ struct list_head list; /* address sorted list */
+ struct list_head purge_list; /* "lazy purge" list */
+ struct vm_struct *vm;
+ struct rcu_head rcu_head;
+};
+
/*
* Highlevel APIs for driver use
*/
@@ -130,8 +143,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
/*
* Internals. Dont't use..
*/
-extern rwlock_t vmlist_lock;
-extern struct vm_struct *vmlist;
+extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
@@ -158,4 +170,22 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
# endif
#endif
+struct vmalloc_info {
+ unsigned long used;
+ unsigned long largest_chunk;
+};
+
+#ifdef CONFIG_MMU
+#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+extern void get_vmalloc_info(struct vmalloc_info *vmi);
+#else
+
+#define VMALLOC_TOTAL 0UL
+#define get_vmalloc_info(vmi) \
+do { \
+ (vmi)->used = 0; \
+ (vmi)->largest_chunk = 0; \
+} while (0)
+#endif
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
new file mode 100644
index 00000000000..76be077340e
--- /dev/null
+++ b/include/linux/vmpressure.h
@@ -0,0 +1,47 @@
+#ifndef __LINUX_VMPRESSURE_H
+#define __LINUX_VMPRESSURE_H
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/cgroup.h>
+
+struct vmpressure {
+ unsigned long scanned;
+ unsigned long reclaimed;
+ /* The lock is used to keep the scanned/reclaimed above in sync. */
+ struct mutex sr_lock;
+
+ /* The list of vmpressure_event structs. */
+ struct list_head events;
+ /* Have to grab the lock on events traversal or modifications. */
+ struct mutex events_lock;
+
+ struct work_struct work;
+};
+
+struct mem_cgroup;
+
+#ifdef CONFIG_MEMCG
+extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+ unsigned long scanned, unsigned long reclaimed);
+extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
+
+extern void vmpressure_init(struct vmpressure *vmpr);
+extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
+extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
+extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
+extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
+ struct eventfd_ctx *eventfd,
+ const char *args);
+extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+ struct eventfd_ctx *eventfd);
+#else
+static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+ unsigned long scanned, unsigned long reclaimed) {}
+static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
+ int prio) {}
+#endif /* CONFIG_MEMCG */
+#endif /* __LINUX_VMPRESSURE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 5fd71a7d0df..c586679b6fe 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -48,13 +48,8 @@ static inline void count_vm_events(enum vm_event_item item, long delta)
}
extern void all_vm_events(unsigned long *);
-#ifdef CONFIG_HOTPLUG
+
extern void vm_events_fold_cpu(int cpu);
-#else
-static inline void vm_events_fold_cpu(int cpu)
-{
-}
-#endif
#else
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
new file mode 100644
index 00000000000..0421f49a20f
--- /dev/null
+++ b/include/trace/events/filemap.h
@@ -0,0 +1,58 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM filemap
+
+#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FILEMAP_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
+
+ TP_PROTO(struct page *page),
+
+ TP_ARGS(page),
+
+ TP_STRUCT__entry(
+ __field(struct page *, page)
+ __field(unsigned long, i_ino)
+ __field(unsigned long, index)
+ __field(dev_t, s_dev)
+ ),
+
+ TP_fast_assign(
+ __entry->page = page;
+ __entry->i_ino = page->mapping->host->i_ino;
+ __entry->index = page->index;
+ if (page->mapping->host->i_sb)
+ __entry->s_dev = page->mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = page->mapping->host->i_rdev;
+ ),
+
+ TP_printk("dev %d:%d ino %lx page=%p pfn=%lu ofs=%lu",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
+ __entry->i_ino,
+ __entry->page,
+ page_to_pfn(__entry->page),
+ __entry->index << PAGE_SHIFT)
+);
+
+DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page)
+ );
+
+DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
+ TP_PROTO(struct page *page),
+ TP_ARGS(page)
+ );
+
+#endif /* _TRACE_FILEMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index c7fc1e6517c..a4ed56cf0ea 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -88,7 +88,6 @@ struct inodes_stat_t {
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
/* These sb flags are internal to the kernel */
-#define MS_SNAP_STABLE (1<<27) /* Snapshot pages during writeback, if needed */
#define MS_NOSEC (1<<28)
#define MS_BORN (1<<29)
#define MS_ACTIVE (1<<30)