summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig.debug6
-rw-r--r--mm/Makefile1
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/mmu_context.c2
-rw-r--r--mm/mmu_notifier.c2
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/rodata_test.c56
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/zswap.c109
15 files changed, 161 insertions, 45 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index afcc550877ff..79d0fd13b5b3 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -90,3 +90,9 @@ config DEBUG_PAGE_REF
careful when enabling this feature because it adds about 30 KB to the
kernel code. However the runtime performance overhead is virtually
nil until the tracepoints are actually enabled.
+
+config DEBUG_RODATA_TEST
+ bool "Testcase for the marking rodata read-only"
+ depends on STRICT_KERNEL_RWX
+ ---help---
+ This option enables a testcase for the setting rodata read-only.
diff --git a/mm/Makefile b/mm/Makefile
index aa0aa17cb413..026f6a828a50 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -85,6 +85,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
+obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
obj-$(CONFIG_PAGE_OWNER) += page_owner.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
diff --git a/mm/dmapool.c b/mm/dmapool.c
index cef82b8a9291..4d90a64b2fdc 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -93,7 +93,7 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf)
spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
- temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
+ temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
pool->name, blocks,
pages * (pool->allocation / pool->size),
pool->size, pages);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 77ae3239c3de..34bce5c308e3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
diff --git a/mm/ksm.c b/mm/ksm.c
index cf211c01ceac..520e4c37fec7 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1854,7 +1854,7 @@ int __ksm_enter(struct mm_struct *mm)
spin_unlock(&ksm_mmlist_lock);
set_bit(MMF_VM_MERGEABLE, &mm->flags);
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
if (needs_wakeup)
wake_up_interruptible(&ksm_thread_wait);
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
index 6f4d27c5bb32..daf67bb02b4a 100644
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
@@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
task_lock(tsk);
active_mm = tsk->active_mm;
if (active_mm != mm) {
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
tsk->active_mm = mm;
}
tsk->mm = mm;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f4259e496f83..32bc9f2ff7eb 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
}
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
/*
* Serialize the update against mmu_notifier_unregister. A
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 578321f1c070..51c091849dcb 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -653,7 +653,7 @@ static void mark_oom_victim(struct task_struct *tsk)
/* oom_mm is bound to the signal struct life time. */
if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
- atomic_inc(&tsk->signal->oom_mm->mm_count);
+ mmgrab(tsk->signal->oom_mm);
/*
* Make sure that the task is woken up from uninterruptible sleep
@@ -870,7 +870,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
/* Get a reference to safely compare mm after task_unlock(victim) */
mm = victim->mm;
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
/*
* We should send SIGKILL before setting TIF_MEMDIE in order to prevent
* the OOM victim from depleting the memory reserves from the user
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ae6e601f0a58..26a60818a8fc 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1797,7 +1797,7 @@ pause:
* pages exceeds dirty_thresh, give the other good wb's a pipe
* to go through, so that tasks on them still remain responsive.
*
- * In theory 1 page is enough to keep the comsumer-producer
+ * In theory 1 page is enough to keep the consumer-producer
* pipe going: the flusher cleans 1 page => the task dirties 1
* more page. However wb_dirty has accounting errors. So use
* the larger and more IO friendly wb_stat_error.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9f9623d690d6..a7a6aac95a6d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5925,7 +5925,7 @@ static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
* the zone and SPARSEMEM is in use. If there are holes within the
* zone, each populated memory region may cost us one or two extra
* memmap pages due to alignment because memmap pages for each
- * populated regions may not naturally algined on page boundary.
+ * populated regions may not be naturally aligned on page boundary.
* So the (present_pages >> 4) heuristic is a tradeoff for that.
*/
if (spanned_pages > present_pages + (present_pages >> 4) &&
diff --git a/mm/percpu.c b/mm/percpu.c
index 0686f566d347..5696039b5c07 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -43,7 +43,7 @@
* Chunks can be determined from the address using the index field
* in the page struct. The index field contains a pointer to the chunk.
*
- * To use this allocator, arch code should do the followings.
+ * To use this allocator, arch code should do the following:
*
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
* regular address to percpu pointer and back if they need to be
diff --git a/mm/rodata_test.c b/mm/rodata_test.c
new file mode 100644
index 000000000000..0fd21670b513
--- /dev/null
+++ b/mm/rodata_test.c
@@ -0,0 +1,56 @@
+/*
+ * rodata_test.c: functional test for mark_rodata_ro function
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+
+const int rodata_test_data = 0xC3;
+EXPORT_SYMBOL_GPL(rodata_test_data);
+
+void rodata_test(void)
+{
+ unsigned long start, end;
+ int zero = 0;
+
+ /* test 1: read the value */
+ /* If this test fails, some previous testrun has clobbered the state */
+ if (!rodata_test_data) {
+ pr_err("rodata_test: test 1 fails (start data)\n");
+ return;
+ }
+
+ /* test 2: write to the variable; this should fault */
+ if (!probe_kernel_write((void *)&rodata_test_data,
+ (void *)&zero, sizeof(zero))) {
+ pr_err("rodata_test: test data was not read only\n");
+ return;
+ }
+
+ /* test 3: check the value hasn't changed */
+ if (rodata_test_data == zero) {
+ pr_err("rodata_test: test data was changed\n");
+ return;
+ }
+
+ /* test 4: check if the rodata section is PAGE_SIZE aligned */
+ start = (unsigned long)__start_rodata;
+ end = (unsigned long)__end_rodata;
+ if (start & (PAGE_SIZE - 1)) {
+ pr_err("rodata_test: start of .rodata is not page size aligned\n");
+ return;
+ }
+ if (end & (PAGE_SIZE - 1)) {
+ pr_err("rodata_test: end of .rodata is not page size aligned\n");
+ return;
+ }
+
+ pr_info("rodata_test: all tests were successful\n");
+}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2cac12cc9abe..fadc6a1c0da0 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
* that.
*/
start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
+ mmget(&init_mm);
/*
* Keep on scanning until all entries have gone. Usually,
@@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (atomic_read(&start_mm->mm_users) == 1) {
mmput(start_mm);
start_mm = &init_mm;
- atomic_inc(&init_mm.mm_users);
+ mmget(&init_mm);
}
/*
@@ -1757,13 +1757,13 @@ int try_to_unuse(unsigned int type, bool frontswap,
struct mm_struct *prev_mm = start_mm;
struct mm_struct *mm;
- atomic_inc(&new_start_mm->mm_users);
- atomic_inc(&prev_mm->mm_users);
+ mmget(new_start_mm);
+ mmget(prev_mm);
spin_lock(&mmlist_lock);
while (swap_count(*swap_map) && !retval &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
- if (!atomic_inc_not_zero(&mm->mm_users))
+ if (!mmget_not_zero(mm))
continue;
spin_unlock(&mmlist_lock);
mmput(prev_mm);
@@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
if (set_start_mm && *swap_map < swcount) {
mmput(new_start_mm);
- atomic_inc(&mm->mm_users);
+ mmget(mm);
new_start_mm = mm;
set_start_mm = 0;
}
diff --git a/mm/truncate.c b/mm/truncate.c
index f2db67465495..6263affdef88 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -786,7 +786,7 @@ EXPORT_SYMBOL(truncate_setsize);
*/
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
- int bsize = 1 << inode->i_blkbits;
+ int bsize = i_blocksize(inode);
loff_t rounded_from;
struct page *page;
pgoff_t index;
diff --git a/mm/zswap.c b/mm/zswap.c
index cabf09e0128b..eedc27894b10 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -76,6 +76,8 @@ static u64 zswap_duplicate_entry;
* tunables
**********************************/
+#define ZSWAP_PARAM_UNSET ""
+
/* Enable/disable zswap (disabled by default) */
static bool zswap_enabled;
static int zswap_enabled_param_set(const char *,
@@ -185,6 +187,9 @@ static bool zswap_init_started;
/* fatal error during init */
static bool zswap_init_failed;
+/* init completed, but couldn't create the initial pool */
+static bool zswap_has_pool;
+
/*********************************
* helpers and fwd declarations
**********************************/
@@ -424,7 +429,8 @@ static struct zswap_pool *__zswap_pool_current(void)
struct zswap_pool *pool;
pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
- WARN_ON(!pool);
+ WARN_ONCE(!pool && zswap_has_pool,
+ "%s: no page storage pool!\n", __func__);
return pool;
}
@@ -443,7 +449,7 @@ static struct zswap_pool *zswap_pool_current_get(void)
rcu_read_lock();
pool = __zswap_pool_current();
- if (!pool || !zswap_pool_get(pool))
+ if (!zswap_pool_get(pool))
pool = NULL;
rcu_read_unlock();
@@ -459,7 +465,9 @@ static struct zswap_pool *zswap_pool_last_get(void)
list_for_each_entry_rcu(pool, &zswap_pools, list)
last = pool;
- if (!WARN_ON(!last) && !zswap_pool_get(last))
+ WARN_ONCE(!last && zswap_has_pool,
+ "%s: no page storage pool!\n", __func__);
+ if (!zswap_pool_get(last))
last = NULL;
rcu_read_unlock();
@@ -495,6 +503,17 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
int ret;
+ if (!zswap_has_pool) {
+ /* if either are unset, pool initialization failed, and we
+ * need both params to be set correctly before trying to
+ * create a pool.
+ */
+ if (!strcmp(type, ZSWAP_PARAM_UNSET))
+ return NULL;
+ if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
+ return NULL;
+ }
+
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
pr_err("pool alloc failed\n");
@@ -544,29 +563,41 @@ error:
static __init struct zswap_pool *__zswap_pool_create_fallback(void)
{
- if (!crypto_has_comp(zswap_compressor, 0, 0)) {
- if (!strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
- pr_err("default compressor %s not available\n",
- zswap_compressor);
- return NULL;
- }
+ bool has_comp, has_zpool;
+
+ has_comp = crypto_has_comp(zswap_compressor, 0, 0);
+ if (!has_comp && strcmp(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT)) {
pr_err("compressor %s not available, using default %s\n",
zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT);
param_free_charp(&zswap_compressor);
zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT;
+ has_comp = crypto_has_comp(zswap_compressor, 0, 0);
}
- if (!zpool_has_pool(zswap_zpool_type)) {
- if (!strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
- pr_err("default zpool %s not available\n",
- zswap_zpool_type);
- return NULL;
- }
+ if (!has_comp) {
+ pr_err("default compressor %s not available\n",
+ zswap_compressor);
+ param_free_charp(&zswap_compressor);
+ zswap_compressor = ZSWAP_PARAM_UNSET;
+ }
+
+ has_zpool = zpool_has_pool(zswap_zpool_type);
+ if (!has_zpool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) {
pr_err("zpool %s not available, using default %s\n",
zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT);
param_free_charp(&zswap_zpool_type);
zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT;
+ has_zpool = zpool_has_pool(zswap_zpool_type);
+ }
+ if (!has_zpool) {
+ pr_err("default zpool %s not available\n",
+ zswap_zpool_type);
+ param_free_charp(&zswap_zpool_type);
+ zswap_zpool_type = ZSWAP_PARAM_UNSET;
}
+ if (!has_comp || !has_zpool)
+ return NULL;
+
return zswap_pool_create(zswap_zpool_type, zswap_compressor);
}
@@ -582,6 +613,9 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
static int __must_check zswap_pool_get(struct zswap_pool *pool)
{
+ if (!pool)
+ return 0;
+
return kref_get_unless_zero(&pool->kref);
}
@@ -639,7 +673,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
}
/* no change required */
- if (!strcmp(s, *(char **)kp->arg))
+ if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
return 0;
/* if this is load-time (pre-init) param setting,
@@ -670,21 +704,26 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
pool = zswap_pool_find_get(type, compressor);
if (pool) {
zswap_pool_debug("using existing", pool);
+ WARN_ON(pool == zswap_pool_current());
list_del_rcu(&pool->list);
- } else {
- spin_unlock(&zswap_pools_lock);
- pool = zswap_pool_create(type, compressor);
- spin_lock(&zswap_pools_lock);
}
+ spin_unlock(&zswap_pools_lock);
+
+ if (!pool)
+ pool = zswap_pool_create(type, compressor);
+
if (pool)
ret = param_set_charp(s, kp);
else
ret = -EINVAL;
+ spin_lock(&zswap_pools_lock);
+
if (!ret) {
put_pool = zswap_pool_current();
list_add_rcu(&pool->list, &zswap_pools);
+ zswap_has_pool = true;
} else if (pool) {
/* add the possibly pre-existing pool to the end of the pools
* list; if it's new (and empty) then it'll be removed and
@@ -696,6 +735,17 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
spin_unlock(&zswap_pools_lock);
+ if (!zswap_has_pool && !pool) {
+ /* if initial pool creation failed, and this pool creation also
+ * failed, maybe both compressor and zpool params were bad.
+ * Allow changing this param, so pool creation will succeed
+ * when the other param is changed. We already verified this
+ * param is ok in the zpool_has_pool() or crypto_has_comp()
+ * checks above.
+ */
+ ret = param_set_charp(s, kp);
+ }
+
/* drop the ref from either the old current pool,
* or the new pool we failed to add
*/
@@ -724,6 +774,10 @@ static int zswap_enabled_param_set(const char *val,
pr_err("can't enable, initialization failed\n");
return -ENODEV;
}
+ if (!zswap_has_pool && zswap_init_started) {
+ pr_err("can't enable, no pool configured\n");
+ return -ENODEV;
+ }
return param_set_bool(val, kp);
}
@@ -1205,22 +1259,21 @@ static int __init init_zswap(void)
goto hp_fail;
pool = __zswap_pool_create_fallback();
- if (!pool) {
+ if (pool) {
+ pr_info("loaded using pool %s/%s\n", pool->tfm_name,
+ zpool_get_type(pool->zpool));
+ list_add(&pool->list, &zswap_pools);
+ zswap_has_pool = true;
+ } else {
pr_err("pool creation failed\n");
- goto pool_fail;
+ zswap_enabled = false;
}
- pr_info("loaded using pool %s/%s\n", pool->tfm_name,
- zpool_get_type(pool->zpool));
-
- list_add(&pool->list, &zswap_pools);
frontswap_register_ops(&zswap_frontswap_ops);
if (zswap_debugfs_init())
pr_warn("debugfs initialization failed\n");
return 0;
-pool_fail:
- cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE);
hp_fail:
cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
dstmem_fail: