summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-08-20 14:09:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-20 15:40:32 -0700
commit14bac5acfdb6a40be64acc042c6db73f1a68f6a4 (patch)
tree5148a0440674ef618a5af4e1b549794db9f783c1 /mm
parent538f8ea6c85232d00bfa5edd9ba85f16c01057c9 (diff)
downloadlinux-3.10-14bac5acfdb6a40be64acc042c6db73f1a68f6a4.tar.gz
linux-3.10-14bac5acfdb6a40be64acc042c6db73f1a68f6a4.tar.bz2
linux-3.10-14bac5acfdb6a40be64acc042c6db73f1a68f6a4.zip
mm: xip/ext2 fix block allocation race
XIP can call into get_xip_mem concurrently with the same file,offset with create=1. This usually maps down to get_block, which expects the page lock to prevent such a situation. This causes ext2 to explode for one reason or another. Serialise those calls for the moment. For common usages today, I suspect get_xip_mem rarely is called to create new blocks. In future as XIP technologies evolve we might need to look at which operations require scalability, and rework the locking to suit. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Jared Hulbert <jaredeh@gmail.com> Acked-by: Carsten Otte <cotte@freenet.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap_xip.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 5b9ec47ea25..b5167dfb2f2 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -248,15 +248,16 @@ again:
int err;
/* maybe shared writable, allocate new block */
+ mutex_lock(&xip_sparse_mutex);
error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
&xip_mem, &xip_pfn);
+ mutex_unlock(&xip_sparse_mutex);
if (error)
return VM_FAULT_SIGBUS;
/* unmap sparse mappings at pgoff from all other vmas */
__xip_unmap(mapping, vmf->pgoff);
found:
- printk("%s insert %lx@%lx\n", current->comm, (unsigned long)vmf->virtual_address, xip_pfn);
err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
xip_pfn);
if (err == -ENOMEM)
@@ -340,8 +341,10 @@ __xip_file_write(struct file *filp, const char __user *buf,
&xip_mem, &xip_pfn);
if (status == -ENODATA) {
/* we allocate a new page unmap it */
+ mutex_lock(&xip_sparse_mutex);
status = a_ops->get_xip_mem(mapping, index, 1,
&xip_mem, &xip_pfn);
+ mutex_unlock(&xip_sparse_mutex);
if (!status)
/* unmap page at pgoff from all other vmas */
__xip_unmap(mapping, index);