summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlbert Herranz <albert_herranz@yahoo.es>2010-06-04 14:14:56 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-06-04 15:21:45 -0700
commit3f505ca45735c35576dab4ceb3e3736d528b6672 (patch)
tree447973161e0589a39c2831c7ef3225f49ae4f85b
parent1da083c9b23dafd6bcb08dcfec443e66e90efff0 (diff)
downloadlinux-3.10-3f505ca45735c35576dab4ceb3e3736d528b6672.tar.gz
linux-3.10-3f505ca45735c35576dab4ceb3e3736d528b6672.tar.bz2
linux-3.10-3f505ca45735c35576dab4ceb3e3736d528b6672.zip
Revert "fb_defio: fix for non-dirty ptes"
This reverts commit 49bbd815fd8ba26d0354900b783b767c7f47c816 ("fb_defio: fix for non-dirty ptes"). Although the fix provided is correct, it's been suggested to avoid the underlying race in the same way as it is currently done in filesystems like NFS, for maintainability. A following patch "fb_defio: redo fix for non-dirty ptes" will provide such an alternate fix. Signed-off-by: Albert Herranz <albert_herranz@yahoo.es> Cc: Jaya Kumar <jayakumar.lkml@gmail.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/video/fb_defio.c40
1 files changed, 8 insertions, 32 deletions
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 073c9b408cf..137100ea8ad 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -155,41 +155,25 @@ static void fb_deferred_io_work(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info,
deferred_work.work);
+ struct list_head *node, *next;
+ struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct page *page, *tmp_page;
- struct list_head *node, *tmp_node;
- struct list_head non_dirty;
-
- INIT_LIST_HEAD(&non_dirty);
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
- list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) {
- lock_page(page);
- /*
- * The workqueue callback can be triggered after a
- * ->page_mkwrite() call but before the PTE has been marked
- * dirty. In this case page_mkclean() won't "rearm" the page.
- *
- * To avoid this, remove those "non-dirty" pages from the
- * pagelist before calling the driver's callback, then add
- * them back to get processed on the next work iteration.
- * At that time, their PTEs will hopefully be dirty for real.
- */
- if (!page_mkclean(page))
- list_move_tail(&page->lru, &non_dirty);
- unlock_page(page);
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ lock_page(cur);
+ page_mkclean(cur);
+ unlock_page(cur);
}
/* driver's callback with pagelist */
fbdefio->deferred_io(info, &fbdefio->pagelist);
- /* clear the list... */
- list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
+ /* clear the list */
+ list_for_each_safe(node, next, &fbdefio->pagelist) {
list_del(node);
}
- /* ... and add back the "non-dirty" pages to the list */
- list_splice_tail(&non_dirty, &fbdefio->pagelist);
mutex_unlock(&fbdefio->lock);
}
@@ -218,7 +202,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_cleanup(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct list_head *node, *tmp_node;
struct page *page;
int i;
@@ -226,13 +209,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
cancel_delayed_work(&info->deferred_work);
flush_scheduled_work();
- /* the list may have still some non-dirty pages at this point */
- mutex_lock(&fbdefio->lock);
- list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
- list_del(node);
- }
- mutex_unlock(&fbdefio->lock);
-
/* clear out the mapping that we setup */
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
page = fb_deferred_io_page(info, i);