diff options
author | Xishi Qiu <qiuxishi@huawei.com> | 2013-02-22 16:33:59 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 17:50:15 -0800 |
commit | 0ebff32c3637e0ed551c017eb9599ac108ab36aa (patch) | |
tree | c6c85f235dac0a0f9f00b7f54f271ddb0a666f4b /mm/memory-failure.c | |
parent | 194159fbcc0d6ac1351837d3cd7a27a4af0219a6 (diff) | |
download | linux-3.10-0ebff32c3637e0ed551c017eb9599ac108ab36aa.tar.gz linux-3.10-0ebff32c3637e0ed551c017eb9599ac108ab36aa.tar.bz2 linux-3.10-0ebff32c3637e0ed551c017eb9599ac108ab36aa.zip |
memory-failure: fix an error of mce_bad_pages statistics
When doing
$ echo paddr > /sys/devices/system/memory/soft_offline_page
to offline a *free* page, the value of mce_bad_pages will be added, and
the page is set HWPoison flag, but it is still managed by page buddy
alocator.
$ cat /proc/meminfo | grep HardwareCorrupted
shows the value.
If we offline the same page, the value of mce_bad_pages will be added
*again*, this means the value is incorrect now. Assume the page is
still free during this short time.
soft_offline_page()
get_any_page()
"else if (is_free_buddy_page(p))" branch return 0
"goto done";
"atomic_long_add(1, &mce_bad_pages);"
This patch:
Move poisoned page check at the beginning of the function in order to
fix the error.
Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Tested-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r-- | mm/memory-failure.c | 38 |
1 files changed, 17 insertions, 21 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c6e4dd3e1c0..1a9242c5331 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1419,18 +1419,17 @@ static int soft_offline_huge_page(struct page *page, int flags) unsigned long pfn = page_to_pfn(page); struct page *hpage = compound_head(page); + if (PageHWPoison(hpage)) { + pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); + return -EBUSY; + } + ret = get_any_page(page, pfn, flags); if (ret < 0) return ret; if (ret == 0) goto done; - if (PageHWPoison(hpage)) { - put_page(hpage); - pr_info("soft offline: %#lx hugepage already poisoned\n", pfn); - return -EBUSY; - } - /* Keep page count to indicate a given hugepage is isolated. */ ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false, MIGRATE_SYNC); @@ -1441,12 +1440,11 @@ static int soft_offline_huge_page(struct page *page, int flags) return ret; } done: - if (!PageHWPoison(hpage)) - atomic_long_add(1 << compound_trans_order(hpage), - &mce_bad_pages); + /* keep elevated page count for bad page */ + atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); - /* keep elevated page count for bad page */ + return ret; } @@ -1488,6 +1486,11 @@ int soft_offline_page(struct page *page, int flags) } } + if (PageHWPoison(page)) { + pr_info("soft offline: %#lx page already poisoned\n", pfn); + return -EBUSY; + } + ret = get_any_page(page, pfn, flags); if (ret < 0) return ret; @@ -1519,19 +1522,11 @@ int soft_offline_page(struct page *page, int flags) return -EIO; } - lock_page(page); - wait_on_page_writeback(page); - /* * Synchronized using the page lock with memory_failure() */ - if (PageHWPoison(page)) { - unlock_page(page); - put_page(page); - pr_info("soft offline: %#lx page already poisoned\n", pfn); - return -EBUSY; - } - + lock_page(page); + wait_on_page_writeback(page); /* * Try to invalidate first. This should work for * non dirty unmapped page cache pages. @@ -1583,8 +1578,9 @@ int soft_offline_page(struct page *page, int flags) return ret; done: + /* keep elevated page count for bad page */ atomic_long_add(1, &mce_bad_pages); SetPageHWPoison(page); - /* keep elevated page count for bad page */ + return ret; } |