summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile5
-rw-r--r--fs/adfs/Kconfig1
-rw-r--r--fs/adfs/super.c8
-rw-r--r--fs/affs/super.c14
-rw-r--r--fs/afs/flock.c5
-rw-r--r--fs/afs/mntpt.c1
-rw-r--r--fs/afs/super.c5
-rw-r--r--fs/autofs/Kconfig1
-rw-r--r--fs/autofs/root.c2
-rw-r--r--fs/autofs4/dev-ioctl.c1
-rw-r--r--fs/autofs4/root.c14
-rw-r--r--fs/bfs/inode.c5
-rw-r--r--fs/binfmt_misc.c3
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/btrfs/volumes.h1
-rw-r--r--fs/buffer.c7
-rw-r--r--fs/cachefiles/daemon.c1
-rw-r--r--fs/char_dev.c1
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/cifs_debug.c12
-rw-r--r--fs/cifs/cifs_debug.h2
-rw-r--r--fs/cifs/cifs_dfs_ref.c24
-rw-r--r--fs/cifs/cifs_fs_sb.h13
-rw-r--r--fs/cifs/cifsacl.c46
-rw-r--r--fs/cifs/cifsencrypt.c214
-rw-r--r--fs/cifs/cifsfs.c105
-rw-r--r--fs/cifs/cifsfs.h10
-rw-r--r--fs/cifs/cifsglob.h91
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h22
-rw-r--r--fs/cifs/cifssmb.c30
-rw-r--r--fs/cifs/cn_cifs.h37
-rw-r--r--fs/cifs/connect.c534
-rw-r--r--fs/cifs/dir.c212
-rw-r--r--fs/cifs/file.c791
-rw-r--r--fs/cifs/fscache.c13
-rw-r--r--fs/cifs/inode.c237
-rw-r--r--fs/cifs/ioctl.c17
-rw-r--r--fs/cifs/link.c372
-rw-r--r--fs/cifs/misc.c32
-rw-r--r--fs/cifs/ntlmssp.h15
-rw-r--r--fs/cifs/readdir.c79
-rw-r--r--fs/cifs/sess.c167
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/cifs/xattr.c60
-rw-r--r--fs/coda/cache.c17
-rw-r--r--fs/coda/cnode.c19
-rw-r--r--fs/coda/dir.c155
-rw-r--r--fs/coda/file.c31
-rw-r--r--fs/coda/inode.c57
-rw-r--r--fs/coda/pioctl.c23
-rw-r--r--fs/coda/psdev.c42
-rw-r--r--fs/coda/symlink.c3
-rw-r--r--fs/coda/upcall.c89
-rw-r--r--fs/compat.c2
-rw-r--r--fs/compat_ioctl.c70
-rw-r--r--fs/debugfs/file.c3
-rw-r--r--fs/dlm/debug_fs.c3
-rw-r--r--fs/dlm/lock.c3
-rw-r--r--fs/dlm/plock.c3
-rw-r--r--fs/dlm/user.c3
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/file.c4
-rw-r--r--fs/ecryptfs/miscdev.c1
-rw-r--r--fs/eventfd.c1
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/exofs/inode.c78
-rw-r--r--fs/exofs/ios.c10
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext2/super.c6
-rw-r--r--fs/ext3/fsync.c3
-rw-r--r--fs/ext3/super.c13
-rw-r--r--fs/ext4/fsync.c5
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/fat/fatent.c3
-rw-r--r--fs/fat/inode.c5
-rw-r--r--fs/fat/misc.c5
-rw-r--r--fs/fat/namei_msdos.c6
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/fifo.c1
-rw-r--r--fs/freevxfs/vxfs_lookup.c14
-rw-r--r--fs/freevxfs/vxfs_super.c7
-rw-r--r--fs/fuse/control.c4
-rw-r--r--fs/fuse/cuse.c1
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/log.c19
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/rgrp.c6
-rw-r--r--fs/hfs/super.c6
-rw-r--r--fs/hostfs/hostfs.h7
-rw-r--r--fs/hpfs/Kconfig1
-rw-r--r--fs/hpfs/super.c8
-rw-r--r--fs/hppfs/hppfs.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/isofs/dir.c6
-rw-r--r--fs/isofs/inode.c9
-rw-r--r--fs/isofs/isofs.h1
-rw-r--r--fs/isofs/namei.c8
-rw-r--r--fs/isofs/rock.c10
-rw-r--r--fs/jbd/commit.c32
-rw-r--r--fs/jbd2/checkpoint.c3
-rw-r--r--fs/jbd2/commit.c76
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/super.c9
-rw-r--r--fs/jfs/jfs_logmgr.c6
-rw-r--r--fs/jfs/jfs_mount.c4
-rw-r--r--fs/jfs/super.c23
-rw-r--r--fs/lockd/clntlock.c15
-rw-r--r--fs/lockd/clntproc.c13
-rw-r--r--fs/lockd/host.c1
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/lockd/svc4proc.c2
-rw-r--r--fs/lockd/svclock.c31
-rw-r--r--fs/lockd/svcproc.c2
-rw-r--r--fs/locks.c112
-rw-r--r--fs/logfs/dir.c1
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ncpfs/dir.c221
-rw-r--r--fs/ncpfs/file.c25
-rw-r--r--fs/ncpfs/inode.c55
-rw-r--r--fs/ncpfs/ioctl.c470
-rw-r--r--fs/ncpfs/ncplib_kernel.c101
-rw-r--r--fs/ncpfs/ncplib_kernel.h15
-rw-r--r--fs/ncpfs/ncpsign_kernel.c10
-rw-r--r--fs/ncpfs/sock.c1
-rw-r--r--fs/nfs/Kconfig20
-rw-r--r--fs/nfs/Makefile4
-rw-r--r--fs/nfs/callback.c4
-rw-r--r--fs/nfs/callback_proc.c8
-rw-r--r--fs/nfs/client.c28
-rw-r--r--fs/nfs/delegation.c10
-rw-r--r--fs/nfs/dir.c1013
-rw-r--r--fs/nfs/dns_resolve.c6
-rw-r--r--fs/nfs/file.c86
-rw-r--r--fs/nfs/idmap.c211
-rw-r--r--fs/nfs/inode.c39
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/mount_clnt.c4
-rw-r--r--fs/nfs/nfs2xdr.c107
-rw-r--r--fs/nfs/nfs3proc.c62
-rw-r--r--fs/nfs/nfs3xdr.c196
-rw-r--r--fs/nfs/nfs4_fs.h4
-rw-r--r--fs/nfs/nfs4filelayout.c280
-rw-r--r--fs/nfs/nfs4filelayout.h94
-rw-r--r--fs/nfs/nfs4filelayoutdev.c448
-rw-r--r--fs/nfs/nfs4proc.c497
-rw-r--r--fs/nfs/nfs4state.c52
-rw-r--r--fs/nfs/nfs4xdr.c700
-rw-r--r--fs/nfs/nfsroot.c566
-rw-r--r--fs/nfs/pnfs.c783
-rw-r--r--fs/nfs/pnfs.h189
-rw-r--r--fs/nfs/proc.c35
-rw-r--r--fs/nfs/read.c4
-rw-r--r--fs/nfs/super.c72
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/unlink.c259
-rw-r--r--fs/nfs/write.c18
-rw-r--r--fs/nfsd/Kconfig13
-rw-r--r--fs/nfsd/export.c73
-rw-r--r--fs/nfsd/nfs4callback.c245
-rw-r--r--fs/nfsd/nfs4idmap.c105
-rw-r--r--fs/nfsd/nfs4proc.c7
-rw-r--r--fs/nfsd/nfs4state.c499
-rw-r--r--fs/nfsd/nfs4xdr.c18
-rw-r--r--fs/nfsd/nfsctl.c27
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfssvc.c5
-rw-r--r--fs/nfsd/state.h52
-rw-r--r--fs/nilfs2/Makefile2
-rw-r--r--fs/nilfs2/bmap.c22
-rw-r--r--fs/nilfs2/bmap.h10
-rw-r--r--fs/nilfs2/btnode.c17
-rw-r--r--fs/nilfs2/cpfile.c72
-rw-r--r--fs/nilfs2/cpfile.h4
-rw-r--r--fs/nilfs2/dat.c92
-rw-r--r--fs/nilfs2/dat.h4
-rw-r--r--fs/nilfs2/export.h17
-rw-r--r--fs/nilfs2/gcdat.c87
-rw-r--r--fs/nilfs2/gcinode.c134
-rw-r--r--fs/nilfs2/ifile.c51
-rw-r--r--fs/nilfs2/ifile.h4
-rw-r--r--fs/nilfs2/inode.c167
-rw-r--r--fs/nilfs2/ioctl.c24
-rw-r--r--fs/nilfs2/mdt.c313
-rw-r--r--fs/nilfs2/mdt.h32
-rw-r--r--fs/nilfs2/namei.c139
-rw-r--r--fs/nilfs2/nilfs.h38
-rw-r--r--fs/nilfs2/page.c55
-rw-r--r--fs/nilfs2/page.h6
-rw-r--r--fs/nilfs2/recovery.c19
-rw-r--r--fs/nilfs2/sb.h10
-rw-r--r--fs/nilfs2/segbuf.c3
-rw-r--r--fs/nilfs2/segment.c102
-rw-r--r--fs/nilfs2/segment.h10
-rw-r--r--fs/nilfs2/sufile.c77
-rw-r--r--fs/nilfs2/sufile.h6
-rw-r--r--fs/nilfs2/super.c629
-rw-r--r--fs/nilfs2/the_nilfs.c346
-rw-r--r--fs/nilfs2/the_nilfs.h101
-rw-r--r--fs/no-block.c1
-rw-r--r--fs/notify/fanotify/fanotify_user.c1
-rw-r--r--fs/notify/inotify/inotify_user.c1
-rw-r--r--fs/ntfs/super.c24
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c1
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/stack_user.c4
-rw-r--r--fs/ocfs2/super.c7
-rw-r--r--fs/partitions/check.c42
-rw-r--r--fs/partitions/check.h3
-rw-r--r--fs/partitions/efi.c25
-rw-r--r--fs/partitions/ldm.c2
-rw-r--r--fs/partitions/ldm.h2
-rw-r--r--fs/pipe.c2
-rw-r--r--fs/proc/base.c8
-rw-r--r--fs/proc/proc_sysctl.c1
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/qnx4/dir.c4
-rw-r--r--fs/qnx4/inode.c6
-rw-r--r--fs/qnx4/namei.c4
-rw-r--r--fs/read_write.c5
-rw-r--r--fs/reiserfs/Kconfig6
-rw-r--r--fs/reiserfs/README2
-rw-r--r--fs/reiserfs/file.c3
-rw-r--r--fs/reiserfs/journal.c106
-rw-r--r--fs/romfs/super.c1
-rw-r--r--fs/seq_file.c6
-rw-r--r--fs/signalfd.c1
-rw-r--r--fs/smbfs/Kconfig1
-rw-r--r--fs/smbfs/inode.c5
-rw-r--r--fs/squashfs/dir.c3
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/sysfs/bin.c68
-rw-r--r--fs/timerfd.c1
-rw-r--r--fs/ubifs/commit.c4
-rw-r--r--fs/ubifs/debug.c157
-rw-r--r--fs/ubifs/debug.h4
-rw-r--r--fs/ubifs/file.c7
-rw-r--r--fs/ubifs/gc.c82
-rw-r--r--fs/ubifs/io.c20
-rw-r--r--fs/ubifs/journal.c3
-rw-r--r--fs/ubifs/key.h14
-rw-r--r--fs/ubifs/log.c6
-rw-r--r--fs/ubifs/lpt.c7
-rw-r--r--fs/ubifs/lpt_commit.c3
-rw-r--r--fs/ubifs/master.c3
-rw-r--r--fs/ubifs/misc.h9
-rw-r--r--fs/ubifs/recovery.c11
-rw-r--r--fs/ubifs/replay.c20
-rw-r--r--fs/ubifs/sb.c9
-rw-r--r--fs/ubifs/scan.c6
-rw-r--r--fs/ubifs/shrinker.c2
-rw-r--r--fs/ubifs/super.c80
-rw-r--r--fs/ubifs/tnc.c5
-rw-r--r--fs/ubifs/ubifs.h23
-rw-r--r--fs/udf/Kconfig1
-rw-r--r--fs/udf/super.c8
-rw-r--r--fs/ufs/Kconfig1
-rw-r--r--fs/ufs/super.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c237
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h81
-rw-r--r--fs/xfs/linux-2.6/xfs_cred.h28
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c31
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_globals.h23
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c39
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c27
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c413
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_version.h29
-rw-r--r--fs/xfs/quota/xfs_dquot.c164
-rw-r--r--fs/xfs/quota/xfs_qm.c221
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c16
-rw-r--r--fs/xfs/xfs_ag.h9
-rw-r--r--fs/xfs/xfs_alloc.c4
-rw-r--r--fs/xfs/xfs_alloc_btree.c33
-rw-r--r--fs/xfs/xfs_attr.c37
-rw-r--r--fs/xfs/xfs_bmap.c44
-rw-r--r--fs/xfs/xfs_bmap.h9
-rw-r--r--fs/xfs/xfs_btree.c56
-rw-r--r--fs/xfs/xfs_btree.h14
-rw-r--r--fs/xfs/xfs_buf_item.c7
-rw-r--r--fs/xfs/xfs_da_btree.c2
-rw-r--r--fs/xfs/xfs_dinode.h5
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_fs.h7
-rw-r--r--fs/xfs/xfs_fsops.c14
-rw-r--r--fs/xfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.c33
-rw-r--r--fs/xfs/xfs_iget.c4
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_inode.h30
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--fs/xfs/xfs_log.c18
-rw-r--r--fs/xfs/xfs_log_cil.c232
-rw-r--r--fs/xfs/xfs_log_recover.c25
-rw-r--r--fs/xfs/xfs_mount.c308
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_refcache.h52
-rw-r--r--fs/xfs/xfs_rename.c14
-rw-r--r--fs/xfs/xfs_rtalloc.c29
-rw-r--r--fs/xfs/xfs_sb.h10
-rw-r--r--fs/xfs/xfs_trans.c91
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_trans_buf.c2
-rw-r--r--fs/xfs/xfs_trans_inode.c30
-rw-r--r--fs/xfs/xfs_types.h2
-rw-r--r--fs/xfs/xfs_utils.c9
-rw-r--r--fs/xfs/xfs_utils.h3
-rw-r--r--fs/xfs/xfs_vnodeops.c65
-rw-r--r--fs/xfs/xfs_vnodeops.h6
328 files changed, 11130 insertions, 7076 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 3d185308ec8..65781de44fc 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -50,6 +50,7 @@ endif # BLOCK
config FILE_LOCKING
bool "Enable POSIX file locking API" if EMBEDDED
default y
+ select BKL # while lockd still uses it.
help
This option enables standard file locking support, required
for filesystems like NFS and for the flock() system
diff --git a/fs/Makefile b/fs/Makefile
index e6ec1d309b1..26956fcec91 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -29,10 +29,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
-
-nfsd-$(CONFIG_NFSD) := nfsctl.o
-obj-y += $(nfsd-y) $(nfsd-m)
-
+obj-$(CONFIG_NFSD_DEPRECATED) += nfsctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
diff --git a/fs/adfs/Kconfig b/fs/adfs/Kconfig
index e55182a7460..1dd5f34b3cf 100644
--- a/fs/adfs/Kconfig
+++ b/fs/adfs/Kconfig
@@ -1,6 +1,7 @@
config ADFS_FS
tristate "ADFS file system support (EXPERIMENTAL)"
depends on BLOCK && EXPERIMENTAL
+ depends on BKL # need to fix
help
The Acorn Disc Filing System is the standard file system of the
RiscOS operating system which runs on Acorn's ARM-based Risc PC
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 4a3af7075c1..d9803f73236 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -352,11 +352,15 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
struct adfs_sb_info *asb;
struct inode *root;
+ lock_kernel();
+
sb->s_flags |= MS_NODIRATIME;
asb = kzalloc(sizeof(*asb), GFP_KERNEL);
- if (!asb)
+ if (!asb) {
+ unlock_kernel();
return -ENOMEM;
+ }
sb->s_fs_info = asb;
/* set default options */
@@ -474,6 +478,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
goto error;
} else
sb->s_root->d_op = &adfs_dentry_operations;
+ unlock_kernel();
return 0;
error_free_bh:
@@ -481,6 +486,7 @@ error_free_bh:
error:
sb->s_fs_info = NULL;
kfree(asb);
+ unlock_kernel();
return -EINVAL;
}
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 9581ea94d5a..fa4fbe1e238 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -16,7 +16,6 @@
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/slab.h>
#include "affs.h"
@@ -46,8 +45,6 @@ affs_put_super(struct super_block *sb)
struct affs_sb_info *sbi = AFFS_SB(sb);
pr_debug("AFFS: put_super()\n");
- lock_kernel();
-
if (!(sb->s_flags & MS_RDONLY) && sb->s_dirt)
affs_commit_super(sb, 1, 1);
@@ -56,8 +53,6 @@ affs_put_super(struct super_block *sb)
affs_brelse(sbi->s_root_bh);
kfree(sbi);
sb->s_fs_info = NULL;
-
- unlock_kernel();
}
static void
@@ -302,6 +297,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
+
sb->s_fs_info = sbi;
mutex_init(&sbi->s_bmlock);
spin_lock_init(&sbi->symlink_lock);
@@ -527,7 +523,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
kfree(new_opts);
return -EINVAL;
}
- lock_kernel();
+
replace_mount_options(sb, new_opts);
sbi->s_flags = mount_flags;
@@ -543,17 +539,15 @@ affs_remount(struct super_block *sb, int *flags, char *data)
memcpy(sbi->s_volume, volume, 32);
spin_unlock(&sbi->symlink_lock);
- if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
- unlock_kernel();
+ if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
- }
+
if (*flags & MS_RDONLY) {
affs_write_super(sb);
affs_free_bitmap(sb);
} else
res = affs_init_bitmap(sb, flags);
- unlock_kernel();
return res;
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0931bc1325e..757d664575d 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/smp_lock.h>
#include "internal.h"
#define AFS_LOCK_GRANTED 0
@@ -274,7 +273,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
- lock_kernel();
+ lock_flocks();
/* make sure we've got a callback on this file and that our view of the
* data version is up to date */
@@ -421,7 +420,7 @@ given_lock:
afs_vnode_fetch_status(vnode, NULL, key);
error:
- unlock_kernel();
+ unlock_flocks();
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 6d552686c49..6153417caf5 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -29,6 +29,7 @@ static void afs_mntpt_expiry_timed_out(struct work_struct *work);
const struct file_operations afs_mntpt_file_operations = {
.open = afs_mntpt_open,
+ .llseek = noop_llseek,
};
const struct inode_operations afs_mntpt_inode_operations = {
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 77e1e5a6115..eacf76d98ae 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -19,7 +19,6 @@
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/parser.h>
@@ -453,12 +452,8 @@ static void afs_put_super(struct super_block *sb)
_enter("");
- lock_kernel();
-
afs_put_volume(as->volume);
- unlock_kernel();
-
_leave("");
}
diff --git a/fs/autofs/Kconfig b/fs/autofs/Kconfig
index 5f3bea90911..480e210c83a 100644
--- a/fs/autofs/Kconfig
+++ b/fs/autofs/Kconfig
@@ -1,5 +1,6 @@
config AUTOFS_FS
tristate "Kernel automounter support"
+ depends on BKL # unfixable, just use autofs4
help
The automounter is a tool to automatically mount remote file systems
on demand. This implementation is partially kernel-based to reduce
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 11b1ea786d0..0c4ca81aeae 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -27,7 +27,9 @@ static int autofs_root_unlink(struct inode *,struct dentry *);
static int autofs_root_rmdir(struct inode *,struct dentry *);
static int autofs_root_mkdir(struct inode *,struct dentry *,int);
static long autofs_root_ioctl(struct file *,unsigned int,unsigned long);
+#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,unsigned int,unsigned long);
+#endif
const struct file_operations autofs_root_operations = {
.llseek = generic_file_llseek,
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index ba4a38b9c22..eff9a419469 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -724,6 +724,7 @@ static const struct file_operations _dev_ioctl_fops = {
.unlocked_ioctl = autofs_dev_ioctl,
.compat_ioctl = autofs_dev_ioctl_compat,
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice _autofs_dev_ioctl_misc = {
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index cb1bd38dc08..d5c1401f003 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -19,7 +19,7 @@
#include <linux/param.h>
#include <linux/time.h>
#include <linux/compat.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include "autofs_i.h"
@@ -28,7 +28,9 @@ static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
+#ifdef CONFIG_COMPAT
static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
+#endif
static int autofs4_dir_open(struct inode *inode, struct file *file);
static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
static void *autofs4_follow_link(struct dentry *, struct nameidata *);
@@ -978,15 +980,17 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
}
}
+static DEFINE_MUTEX(autofs4_ioctl_mutex);
+
static long autofs4_root_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
long ret;
struct inode *inode = filp->f_dentry->d_inode;
- lock_kernel();
+ mutex_lock(&autofs4_ioctl_mutex);
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
- unlock_kernel();
+ mutex_unlock(&autofs4_ioctl_mutex);
return ret;
}
@@ -998,13 +1002,13 @@ static long autofs4_root_compat_ioctl(struct file *filp,
struct inode *inode = filp->f_path.dentry->d_inode;
int ret;
- lock_kernel();
+ mutex_lock(&autofs4_ioctl_mutex);
if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
else
ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
(unsigned long)compat_ptr(arg));
- unlock_kernel();
+ mutex_unlock(&autofs4_ioctl_mutex);
return ret;
}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index c4daf0f5fc0..883e77acd5a 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
@@ -215,14 +214,10 @@ static void bfs_put_super(struct super_block *s)
if (!info)
return;
- lock_kernel();
-
mutex_destroy(&info->bfs_lock);
kfree(info->si_imap);
kfree(info);
s->s_fs_info = NULL;
-
- unlock_kernel();
}
static int bfs_statfs(struct dentry *dentry, struct kstatfs *buf)
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index fd0cc0bf9a4..139fc8083f5 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -576,6 +576,7 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
static const struct file_operations bm_entry_operations = {
.read = bm_entry_read,
.write = bm_entry_write,
+ .llseek = default_llseek,
};
/* /register */
@@ -643,6 +644,7 @@ out:
static const struct file_operations bm_register_operations = {
.write = bm_register_write,
+ .llseek = noop_llseek,
};
/* /status */
@@ -680,6 +682,7 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
static const struct file_operations bm_status_operations = {
.read = bm_status_read,
.write = bm_status_write,
+ .llseek = default_llseek,
};
/* Superblock handling */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 50e8c8582fa..b737451e2e9 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -370,7 +370,7 @@ int blkdev_fsync(struct file *filp, int datasync)
*/
mutex_unlock(&bd_inode->i_mutex);
- error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
+ error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
if (error == -EOPNOTSUPP)
error = 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 64f10082f04..5e789f4a3ed 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2063,7 +2063,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
+ if (printk_ratelimit()) {
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
@@ -2200,21 +2200,10 @@ static int write_dev_supers(struct btrfs_device *device,
bh->b_end_io = btrfs_end_buffer_write_sync;
}
- if (i == last_barrier && do_barriers && device->barriers) {
- ret = submit_bh(WRITE_BARRIER, bh);
- if (ret == -EOPNOTSUPP) {
- printk("btrfs: disabling barriers on dev %s\n",
- device->name);
- set_buffer_uptodate(bh);
- device->barriers = 0;
- /* one reference for submit_bh */
- get_bh(bh);
- lock_buffer(bh);
- ret = submit_bh(WRITE_SYNC, bh);
- }
- } else {
+ if (i == last_barrier && do_barriers)
+ ret = submit_bh(WRITE_FLUSH_FUA, bh);
+ else
ret = submit_bh(WRITE_SYNC, bh);
- }
if (ret)
errors++;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 32d094002a5..0b81ecdb101 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1695,8 +1695,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
static void btrfs_issue_discard(struct block_device *bdev,
u64 start, u64 len)
{
- blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
}
static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 1776dbd8dc9..144f8a5730f 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -815,6 +815,7 @@ static const struct file_operations btrfs_ctl_fops = {
.unlocked_ioctl = btrfs_control_ioctl,
.compat_ioctl = btrfs_control_ioctl,
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice btrfs_misc = {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index dd318ff280b..e25e46a8b4e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -398,7 +398,6 @@ static noinline int device_list_add(const char *path,
device->work.func = pending_bios_fn;
memcpy(device->uuid, disk_super->dev_item.uuid,
BTRFS_UUID_SIZE);
- device->barriers = 1;
spin_lock_init(&device->io_lock);
device->name = kstrdup(path, GFP_NOFS);
if (!device->name) {
@@ -462,7 +461,6 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
device->devid = orig_dev->devid;
device->work.func = pending_bios_fn;
memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
- device->barriers = 1;
spin_lock_init(&device->io_lock);
INIT_LIST_HEAD(&device->dev_list);
INIT_LIST_HEAD(&device->dev_alloc_list);
@@ -1489,7 +1487,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
trans = btrfs_start_transaction(root, 0);
lock_chunks(root);
- device->barriers = 1;
device->writeable = 1;
device->work.func = pending_bios_fn;
generate_random_uuid(device->uuid);
@@ -3084,7 +3081,6 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
return NULL;
list_add(&device->dev_list,
&fs_devices->devices);
- device->barriers = 1;
device->dev_root = root->fs_info->dev_root;
device->devid = devid;
device->work.func = pending_bios_fn;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 31b0fabdd2e..2b638b6e4ee 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -42,7 +42,6 @@ struct btrfs_device {
int running_pending;
u64 generation;
- int barriers;
int writeable;
int in_fs_metadata;
diff --git a/fs/buffer.c b/fs/buffer.c
index 3e7dca279d1..7f0b9b083f7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -156,7 +156,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
+ if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
@@ -2891,7 +2891,6 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- set_bit(BH_Eopnotsupp, &bh->b_state);
}
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
@@ -3031,10 +3030,6 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw)
bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(rw, bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- ret = -EOPNOTSUPP;
- }
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
} else {
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 727caedcdd9..0a1467b1551 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -55,6 +55,7 @@ const struct file_operations cachefiles_daemon_fops = {
.read = cachefiles_daemon_read,
.write = cachefiles_daemon_write,
.poll = cachefiles_daemon_poll,
+ .llseek = noop_llseek,
};
struct cachefiles_daemon_cmd {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 143d393881c..e5b9df993b9 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -456,6 +456,7 @@ static void cdev_purge(struct cdev *cdev)
*/
const struct file_operations def_chr_fops = {
.open = chrdev_open,
+ .llseek = noop_llseek,
};
static struct kobject *exact_match(dev_t dev, int *part, void *data)
diff --git a/fs/cifs/README b/fs/cifs/README
index 7099a526f77..ee68d103654 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -527,6 +527,11 @@ A partial list of the supported mount options follows:
SFU does). In the future the bottom 9 bits of the
mode also will be emulated using queries of the security
descriptor (ACL).
+ mfsymlinks Enable support for Minshall+French symlinks
+ (see http://wiki.samba.org/index.php/UNIX_Extensions#Minshall.2BFrench_symlinks)
+ This option is ignored when specified together with the
+ 'sfu' option. Minshall+French symlinks are used even if
+ the server supports the CIFS Unix Extensions.
sign Must use packet signing (helps avoid unwanted data modification
by intermediate systems in the route). Note that signing
does not work with lanman or plaintext authentication.
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index eb1ba493489..103ab8b605b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -148,7 +148,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Servers:");
i = 0;
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -230,7 +230,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
spin_unlock(&GlobalMid_Lock);
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
seq_putc(m, '\n');
/* BB add code to dump additional info such as TCP session info now */
@@ -270,7 +270,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -303,7 +303,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
}
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
}
return count;
@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
GlobalCurrentXid, GlobalMaxActiveXid);
i = 0;
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
@@ -397,7 +397,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
}
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
seq_putc(m, '\n');
return 0;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index aa316891ac0..8942b28cf80 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -34,7 +34,7 @@ void cifs_dump_mids(struct TCP_Server_Info *);
extern int traceSMB; /* flag which enables the function below */
void dump_smb(struct smb_hdr *, int);
#define CIFS_INFO 0x01
-#define CIFS_RC 0x02
+#define CIFS_RC 0x02
#define CIFS_TIMER 0x04
/*
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index d6ced7aa23c..c68a056f27f 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -44,8 +44,7 @@ static void cifs_dfs_expire_automounts(struct work_struct *work)
void cifs_dfs_release_automount_timer(void)
{
BUG_ON(!list_empty(&cifs_dfs_automount_list));
- cancel_delayed_work(&cifs_dfs_automount_task);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&cifs_dfs_automount_task);
}
/**
@@ -306,6 +305,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
int xid, i;
int rc = 0;
struct vfsmount *mnt = ERR_PTR(-ENOENT);
+ struct tcon_link *tlink;
cFYI(1, "in %s", __func__);
BUG_ON(IS_ROOT(dentry));
@@ -315,14 +315,6 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
dput(nd->path.dentry);
nd->path.dentry = dget(dentry);
- cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
- ses = cifs_sb->tcon->ses;
-
- if (!ses) {
- rc = -EINVAL;
- goto out_err;
- }
-
/*
* The MSDFS spec states that paths in DFS referral requests and
* responses must be prefixed by a single '\' character instead of
@@ -335,10 +327,20 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
goto out_err;
}
- rc = get_dfs_path(xid, ses , full_path + 1, cifs_sb->local_nls,
+ cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ goto out_err;
+ }
+ ses = tlink_tcon(tlink)->ses;
+
+ rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
+
for (i = 0; i < num_referrals; i++) {
int len;
dump_referral(referrals+i);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9e771450c3b..525ba59a410 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,6 +15,8 @@
* the GNU Lesser General Public License for more details.
*
*/
+#include <linux/radix-tree.h>
+
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
@@ -36,23 +38,28 @@
#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */
#define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/
#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */
+#define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
+#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
struct cifs_sb_info {
- struct cifsTconInfo *tcon; /* primary mount */
- struct list_head nested_tcon_q;
+ struct radix_tree_root tlink_tree;
+#define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */
+ spinlock_t tlink_tree_lock;
struct nls_table *local_nls;
unsigned int rsize;
unsigned int wsize;
+ atomic_t active;
uid_t mnt_uid;
gid_t mnt_gid;
mode_t mnt_file_mode;
mode_t mnt_dir_mode;
- int mnt_cifs_flags;
+ unsigned int mnt_cifs_flags;
int prepathlen;
char *prepath; /* relative path under the share to mount to */
#ifdef CONFIG_CIFS_DFS_UPCALL
char *mountdata; /* mount options received at mount time */
#endif
struct backing_dev_info bdi;
+ struct delayed_work prune_tlinks;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 85d7cf7ff2c..c9b4792ae82 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -557,11 +557,16 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
{
struct cifs_ntsd *pntsd = NULL;
int xid, rc;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+
+ if (IS_ERR(tlink))
+ return NULL;
xid = GetXid();
- rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen);
+ rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
FreeXid(xid);
+ cifs_put_tlink(tlink);
cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
return pntsd;
@@ -574,10 +579,16 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
int oplock = 0;
int xid, rc;
__u16 fid;
+ struct cifsTconInfo *tcon;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+
+ if (IS_ERR(tlink))
+ return NULL;
+ tcon = tlink_tcon(tlink);
xid = GetXid();
- rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, READ_CONTROL, 0,
+ rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
@@ -585,11 +596,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
goto out;
}
- rc = CIFSSMBGetCIFSACL(xid, cifs_sb->tcon, fid, &pntsd, pacllen);
+ rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
- CIFSSMBClose(xid, cifs_sb->tcon, fid);
+ CIFSSMBClose(xid, tcon, fid);
out:
+ cifs_put_tlink(tlink);
FreeXid(xid);
return pntsd;
}
@@ -603,7 +615,7 @@ static struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct cifsFileInfo *open_file = NULL;
if (inode)
- open_file = find_readable_file(CIFS_I(inode));
+ open_file = find_readable_file(CIFS_I(inode), true);
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen);
@@ -616,10 +628,15 @@ static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid,
struct cifs_ntsd *pnntsd, u32 acllen)
{
int xid, rc;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
xid = GetXid();
- rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
+ rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen);
FreeXid(xid);
+ cifs_put_tlink(tlink);
cFYI(DBG2, "SetCIFSACL rc = %d", rc);
return rc;
@@ -631,10 +648,16 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
int oplock = 0;
int xid, rc;
__u16 fid;
+ struct cifsTconInfo *tcon;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+
+ tcon = tlink_tcon(tlink);
xid = GetXid();
- rc = CIFSSMBOpen(xid, cifs_sb->tcon, path, FILE_OPEN, WRITE_DAC, 0,
+ rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
&fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
@@ -642,12 +665,13 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
goto out;
}
- rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
+ rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
cFYI(DBG2, "SetCIFSACL rc = %d", rc);
- CIFSSMBClose(xid, cifs_sb->tcon, fid);
- out:
+ CIFSSMBClose(xid, tcon, fid);
+out:
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -661,7 +685,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
- open_file = find_readable_file(CIFS_I(inode));
+ open_file = find_readable_file(CIFS_I(inode), true);
if (!open_file)
return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 35042d8f733..7ac0056294c 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -27,6 +27,7 @@
#include "md5.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
+#include "ntlmssp.h"
#include <linux/ctype.h>
#include <linux/random.h>
@@ -42,7 +43,7 @@ extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
unsigned char *p24);
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
- const struct mac_key *key, char *signature)
+ const struct session_key *key, char *signature)
{
struct MD5Context context;
@@ -78,7 +79,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
- rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
+ rc = cifs_calculate_signature(cifs_pdu, &server->session_key,
smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
@@ -89,7 +90,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
}
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
- const struct mac_key *key, char *signature)
+ const struct session_key *key, char *signature)
{
struct MD5Context context;
int i;
@@ -145,7 +146,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
- rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
+ rc = cifs_calc_signature2(iov, n_vec, &server->session_key,
smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
@@ -156,14 +157,14 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
}
int cifs_verify_signature(struct smb_hdr *cifs_pdu,
- const struct mac_key *mac_key,
+ const struct session_key *session_key,
__u32 expected_sequence_number)
{
unsigned int rc;
char server_response_sig[8];
char what_we_think_sig_should_be[20];
- if ((cifs_pdu == NULL) || (mac_key == NULL))
+ if (cifs_pdu == NULL || session_key == NULL)
return -EINVAL;
if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
@@ -192,7 +193,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
- rc = cifs_calculate_signature(cifs_pdu, mac_key,
+ rc = cifs_calculate_signature(cifs_pdu, session_key,
what_we_think_sig_should_be);
if (rc)
@@ -209,7 +210,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
}
/* We fill in key by putting in 40 byte array which was allocated by caller */
-int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+int cifs_calculate_session_key(struct session_key *key, const char *rn,
const char *password)
{
char temp_key[16];
@@ -262,6 +263,148 @@ void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
}
#endif /* CIFS_WEAK_PW_HASH */
+/* Build a proper attribute value/target info pairs blob.
+ * Fill in netbios and dns domain name and workstation name
+ * and client time (total five av pairs and + one end of fields indicator.
+ * Allocate domain name which gets freed when session struct is deallocated.
+ */
+static int
+build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+{
+ unsigned int dlen;
+ unsigned int wlen;
+ unsigned int size = 6 * sizeof(struct ntlmssp2_name);
+ __le64 curtime;
+ char *defdmname = "WORKGROUP";
+ unsigned char *blobptr;
+ struct ntlmssp2_name *attrptr;
+
+ if (!ses->domainName) {
+ ses->domainName = kstrdup(defdmname, GFP_KERNEL);
+ if (!ses->domainName)
+ return -ENOMEM;
+ }
+
+ dlen = strlen(ses->domainName);
+ wlen = strlen(ses->server->hostname);
+
+ /* The length of this blob is a size which is
+ * six times the size of a structure which holds name/size +
+ * two times the unicode length of a domain name +
+ * two times the unicode length of a server name +
+ * size of a timestamp (which is 8 bytes).
+ */
+ ses->tilen = size + 2 * (2 * dlen) + 2 * (2 * wlen) + 8;
+ ses->tiblob = kzalloc(ses->tilen, GFP_KERNEL);
+ if (!ses->tiblob) {
+ ses->tilen = 0;
+ cERROR(1, "Challenge target info allocation failure");
+ return -ENOMEM;
+ }
+
+ blobptr = ses->tiblob;
+ attrptr = (struct ntlmssp2_name *) blobptr;
+
+ attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
+ attrptr->length = cpu_to_le16(2 * dlen);
+ blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+ cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
+
+ blobptr += 2 * dlen;
+ attrptr = (struct ntlmssp2_name *) blobptr;
+
+ attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_COMPUTER_NAME);
+ attrptr->length = cpu_to_le16(2 * wlen);
+ blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+ cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
+
+ blobptr += 2 * wlen;
+ attrptr = (struct ntlmssp2_name *) blobptr;
+
+ attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_DOMAIN_NAME);
+ attrptr->length = cpu_to_le16(2 * dlen);
+ blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+ cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
+
+ blobptr += 2 * dlen;
+ attrptr = (struct ntlmssp2_name *) blobptr;
+
+ attrptr->type = cpu_to_le16(NTLMSSP_AV_DNS_COMPUTER_NAME);
+ attrptr->length = cpu_to_le16(2 * wlen);
+ blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+ cifs_strtoUCS((__le16 *)blobptr, ses->server->hostname, wlen, nls_cp);
+
+ blobptr += 2 * wlen;
+ attrptr = (struct ntlmssp2_name *) blobptr;
+
+ attrptr->type = cpu_to_le16(NTLMSSP_AV_TIMESTAMP);
+ attrptr->length = cpu_to_le16(sizeof(__le64));
+ blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
+ curtime = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ memcpy(blobptr, &curtime, sizeof(__le64));
+
+ return 0;
+}
+
+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find netbios domain name to be used
+ * as part of ntlmv2 authentication (in Target String), if not already
+ * specified on the command line.
+ * If this function returns without any error but without fetching
+ * domain name, authentication may fail against some server but
+ * may not fail against other (those who are not very particular
+ * about target string i.e. for some, just user name might suffice.
+ */
+static int
+find_domain_name(struct cifsSesInfo *ses)
+{
+ unsigned int attrsize;
+ unsigned int type;
+ unsigned int onesize = sizeof(struct ntlmssp2_name);
+ unsigned char *blobptr;
+ unsigned char *blobend;
+ struct ntlmssp2_name *attrptr;
+
+ if (!ses->tilen || !ses->tiblob)
+ return 0;
+
+ blobptr = ses->tiblob;
+ blobend = ses->tiblob + ses->tilen;
+
+ while (blobptr + onesize < blobend) {
+ attrptr = (struct ntlmssp2_name *) blobptr;
+ type = le16_to_cpu(attrptr->type);
+ if (type == NTLMSSP_AV_EOL)
+ break;
+ blobptr += 2; /* advance attr type */
+ attrsize = le16_to_cpu(attrptr->length);
+ blobptr += 2; /* advance attr size */
+ if (blobptr + attrsize > blobend)
+ break;
+ if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
+ if (!attrsize)
+ break;
+ if (!ses->domainName) {
+ struct nls_table *default_nls;
+ ses->domainName =
+ kmalloc(attrsize + 1, GFP_KERNEL);
+ if (!ses->domainName)
+ return -ENOMEM;
+ default_nls = load_nls_default();
+ cifs_from_ucs2(ses->domainName,
+ (__le16 *)blobptr, attrsize, attrsize,
+ default_nls, false);
+ unload_nls(default_nls);
+ break;
+ }
+ }
+ blobptr += attrsize; /* advance attr value */
+ }
+
+ return 0;
+}
+
static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
@@ -315,13 +458,14 @@ calc_exit_1:
calc_exit_2:
/* BB FIXME what about bytes 24 through 40 of the signing key?
compare with the NTLM example */
- hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
+ hmac_md5_final(ses->ntlmv2_hash, pctxt);
kfree(pctxt);
return rc;
}
-void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
+int
+setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
const struct nls_table *nls_cp)
{
int rc;
@@ -333,25 +477,48 @@ void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
buf->reserved2 = 0;
- buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
- buf->names[0].length = 0;
- buf->names[1].type = 0;
- buf->names[1].length = 0;
+
+ if (ses->server->secType == RawNTLMSSP) {
+ if (!ses->domainName) {
+ rc = find_domain_name(ses);
+ if (rc) {
+ cERROR(1, "error %d finding domain name", rc);
+ goto setup_ntlmv2_rsp_ret;
+ }
+ }
+ } else {
+ rc = build_avpair_blob(ses, nls_cp);
+ if (rc) {
+ cERROR(1, "error %d building av pair blob", rc);
+ return rc;
+ }
+ }
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
- if (rc)
+ if (rc) {
cERROR(1, "could not get v2 hash rc %d", rc);
+ goto setup_ntlmv2_rsp_ret;
+ }
CalcNTLMv2_response(ses, resp_buf);
- /* now calculate the MAC key for NTLMv2 */
- hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+ /* now calculate the session key for NTLMv2 */
+ hmac_md5_init_limK_to_64(ses->ntlmv2_hash, 16, &context);
hmac_md5_update(resp_buf, 16, &context);
- hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
+ hmac_md5_final(ses->auth_key.data.ntlmv2.key, &context);
- memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
+ memcpy(&ses->auth_key.data.ntlmv2.resp, resp_buf,
sizeof(struct ntlmv2_resp));
- ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
+ ses->auth_key.len = 16 + sizeof(struct ntlmv2_resp);
+
+ return 0;
+
+setup_ntlmv2_rsp_ret:
+ kfree(ses->tiblob);
+ ses->tiblob = NULL;
+ ses->tilen = 0;
+
+ return rc;
}
void CalcNTLMv2_response(const struct cifsSesInfo *ses,
@@ -359,12 +526,15 @@ void CalcNTLMv2_response(const struct cifsSesInfo *ses,
{
struct HMACMD5Context context;
/* rest of v2 struct already generated */
- memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
- hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
+ memcpy(v2_session_response + 8, ses->cryptKey, 8);
+ hmac_md5_init_limK_to_64(ses->ntlmv2_hash, 16, &context);
hmac_md5_update(v2_session_response+8,
sizeof(struct ntlmv2_resp) - 8, &context);
+ if (ses->tilen)
+ hmac_md5_update(ses->tiblob, ses->tilen, &context);
+
hmac_md5_final(v2_session_response, &context);
/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index b7431afdd76..34371637f21 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -35,7 +35,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <linux/smp_lock.h>
+#include <net/ipv6.h>
#include "cifsfs.h"
#include "cifspdu.h"
#define DECLARE_GLOBALS_HERE
@@ -82,6 +82,24 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
+void
+cifs_sb_active(struct super_block *sb)
+{
+ struct cifs_sb_info *server = CIFS_SB(sb);
+
+ if (atomic_inc_return(&server->active) == 1)
+ atomic_inc(&sb->s_active);
+}
+
+void
+cifs_sb_deactive(struct super_block *sb)
+{
+ struct cifs_sb_info *server = CIFS_SB(sb);
+
+ if (atomic_dec_and_test(&server->active))
+ deactivate_super(sb);
+}
+
static int
cifs_read_super(struct super_block *sb, void *data,
const char *devname, int silent)
@@ -97,6 +115,9 @@ cifs_read_super(struct super_block *sb, void *data,
if (cifs_sb == NULL)
return -ENOMEM;
+ spin_lock_init(&cifs_sb->tlink_tree_lock);
+ INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
+
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc) {
kfree(cifs_sb);
@@ -136,9 +157,6 @@ cifs_read_super(struct super_block *sb, void *data,
sb->s_magic = CIFS_MAGIC_NUMBER;
sb->s_op = &cifs_super_ops;
sb->s_bdi = &cifs_sb->bdi;
-/* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
- sb->s_blocksize =
- cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
inode = cifs_root_iget(sb, ROOT_I);
@@ -200,8 +218,6 @@ cifs_put_super(struct super_block *sb)
return;
}
- lock_kernel();
-
rc = cifs_umount(sb, cifs_sb);
if (rc)
cERROR(1, "cifs_umount failed with return code %d", rc);
@@ -215,8 +231,6 @@ cifs_put_super(struct super_block *sb)
unload_nls(cifs_sb->local_nls);
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb);
-
- unlock_kernel();
}
static int
@@ -224,7 +238,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
int rc = -EOPNOTSUPP;
int xid;
@@ -366,14 +380,36 @@ static int
cifs_show_options(struct seq_file *s, struct vfsmount *m)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct sockaddr *srcaddr;
+ srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
seq_printf(s, ",unc=%s", tcon->treeName);
- if (tcon->ses->userName)
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
+ seq_printf(s, ",multiuser");
+ else if (tcon->ses->userName)
seq_printf(s, ",username=%s", tcon->ses->userName);
+
if (tcon->ses->domainName)
seq_printf(s, ",domain=%s", tcon->ses->domainName);
+ if (srcaddr->sa_family != AF_UNSPEC) {
+ struct sockaddr_in *saddr4;
+ struct sockaddr_in6 *saddr6;
+ saddr4 = (struct sockaddr_in *)srcaddr;
+ saddr6 = (struct sockaddr_in6 *)srcaddr;
+ if (srcaddr->sa_family == AF_INET6)
+ seq_printf(s, ",srcaddr=%pI6c",
+ &saddr6->sin6_addr);
+ else if (srcaddr->sa_family == AF_INET)
+ seq_printf(s, ",srcaddr=%pI4",
+ &saddr4->sin_addr.s_addr);
+ else
+ seq_printf(s, ",srcaddr=BAD-AF:%i",
+ (int)(srcaddr->sa_family));
+ }
+
seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
seq_printf(s, ",forceuid");
@@ -422,6 +458,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
seq_printf(s, ",dynperm");
if (m->mnt_sb->s_flags & MS_POSIXACL)
seq_printf(s, ",acl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+ seq_printf(s, ",mfsymlinks");
seq_printf(s, ",rsize=%d", cifs_sb->rsize);
seq_printf(s, ",wsize=%d", cifs_sb->wsize);
@@ -437,20 +475,18 @@ static void cifs_umount_begin(struct super_block *sb)
if (cifs_sb == NULL)
return;
- tcon = cifs_sb->tcon;
- if (tcon == NULL)
- return;
+ tcon = cifs_sb_master_tcon(cifs_sb);
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
/* we have other mounts to same share or we have
already tried to force umount this and woken up
all waiting network requests, nothing to do */
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
} else if (tcon->tc_count == 1)
tcon->tidStatus = CifsExiting;
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
/* cancel_notify_requests(tcon); */
@@ -514,7 +550,9 @@ cifs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
int rc;
- struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
+ struct super_block *sb;
+
+ sb = sget(fs_type, NULL, set_anon_super, NULL);
cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
@@ -565,9 +603,10 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
{
- /* note that this is called by vfs setlease with the BKL held
- although I doubt that BKL is needed here in cifs */
+ /* note that this is called by vfs setlease with lock_flocks held
+ to protect *lease from going away */
struct inode *inode = file->f_path.dentry->d_inode;
+ struct cifsFileInfo *cfile = file->private_data;
if (!(S_ISREG(inode->i_mode)))
return -EINVAL;
@@ -578,8 +617,8 @@ static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
((arg == F_WRLCK) &&
(CIFS_I(inode)->clientCanCacheAll)))
return generic_setlease(file, arg, lease);
- else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
- !CIFS_I(inode)->clientCanCacheRead)
+ else if (tlink_tcon(cfile->tlink)->local_lease &&
+ !CIFS_I(inode)->clientCanCacheRead)
/* If the server claims to support oplock on this
file, then we still need to check oplock even
if the local_lease mount option is set, but there
@@ -898,8 +937,8 @@ init_cifs(void)
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
memset(Local_System_Name, 0, 15);
- rwlock_init(&GlobalSMBSeslock);
- rwlock_init(&cifs_tcp_ses_lock);
+ spin_lock_init(&cifs_tcp_ses_lock);
+ spin_lock_init(&cifs_file_list_lock);
spin_lock_init(&GlobalMid_Lock);
if (cifs_max_pending < 2) {
@@ -912,11 +951,11 @@ init_cifs(void)
rc = cifs_fscache_register();
if (rc)
- goto out;
+ goto out_clean_proc;
rc = cifs_init_inodecache();
if (rc)
- goto out_clean_proc;
+ goto out_unreg_fscache;
rc = cifs_init_mids();
if (rc)
@@ -938,19 +977,19 @@ init_cifs(void)
return 0;
#ifdef CONFIG_CIFS_UPCALL
- out_unregister_filesystem:
+out_unregister_filesystem:
unregister_filesystem(&cifs_fs_type);
#endif
- out_destroy_request_bufs:
+out_destroy_request_bufs:
cifs_destroy_request_bufs();
- out_destroy_mids:
+out_destroy_mids:
cifs_destroy_mids();
- out_destroy_inodecache:
+out_destroy_inodecache:
cifs_destroy_inodecache();
- out_clean_proc:
- cifs_proc_clean();
+out_unreg_fscache:
cifs_fscache_unregister();
- out:
+out_clean_proc:
+ cifs_proc_clean();
return rc;
}
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d82f5fb4761..f35795a16b4 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -42,10 +42,8 @@ extern const struct address_space_operations cifs_addr_ops;
extern const struct address_space_operations cifs_addr_ops_smallbuf;
/* Functions related to super block operations */
-/* extern const struct super_operations cifs_super_ops;*/
-extern void cifs_read_inode(struct inode *);
-/*extern void cifs_delete_inode(struct inode *);*/ /* BB not needed yet */
-/* extern void cifs_write_inode(struct inode *); */ /* BB not needed yet */
+extern void cifs_sb_active(struct super_block *sb);
+extern void cifs_sb_deactive(struct super_block *sb);
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
@@ -104,7 +102,7 @@ extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
const char *symname);
extern int cifs_removexattr(struct dentry *, const char *);
-extern int cifs_setxattr(struct dentry *, const char *, const void *,
+extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
@@ -114,5 +112,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.65"
+#define CIFS_VERSION "1.67"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0cdfb8c32ac..3365e77f6f2 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -97,7 +97,7 @@ enum protocolEnum {
/* Netbios frames protocol not supported at this time */
};
-struct mac_key {
+struct session_key {
unsigned int len;
union {
char ntlm[CIFS_SESS_KEY_SIZE + 16];
@@ -139,6 +139,7 @@ struct TCP_Server_Info {
struct sockaddr_in sockAddr;
struct sockaddr_in6 sockAddr6;
} addr;
+ struct sockaddr_storage srcaddr; /* locally bind to this IP */
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
struct list_head pending_mid_q;
@@ -178,12 +179,10 @@ struct TCP_Server_Info {
int capabilities; /* allow selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u16 CurrentMid; /* multiplex id - rotating counter */
- char cryptKey[CIFS_CRYPTO_KEY_SIZE];
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* needed for CIFS PDU signature */
- struct mac_key mac_signing_key;
- char ntlmv2_hash[16];
+ struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
u16 dialect; /* dialect index that server chose */
/* extended security flavors that server supports */
@@ -191,6 +190,7 @@ struct TCP_Server_Info {
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_ntlmssp; /* supports NTLMSSP */
+ bool session_estab; /* mark when very first sess is established */
#ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
@@ -222,6 +222,11 @@ struct cifsSesInfo {
char userName[MAX_USERNAME_SIZE + 1];
char *domainName;
char *password;
+ char cryptKey[CIFS_CRYPTO_KEY_SIZE];
+ struct session_key auth_key;
+ char ntlmv2_hash[16];
+ unsigned int tilen; /* length of the target info blob */
+ unsigned char *tiblob; /* target info blob in challenge response */
bool need_reconnect:1; /* connection reset, uid now invalid */
};
/* no more than one of the following three session flags may be set */
@@ -308,6 +313,44 @@ struct cifsTconInfo {
};
/*
+ * This is a refcounted and timestamped container for a tcon pointer. The
+ * container holds a tcon reference. It is considered safe to free one of
+ * these when the tl_count goes to 0. The tl_time is the time of the last
+ * "get" on the container.
+ */
+struct tcon_link {
+ unsigned long tl_index;
+ unsigned long tl_flags;
+#define TCON_LINK_MASTER 0
+#define TCON_LINK_PENDING 1
+#define TCON_LINK_IN_TREE 2
+ unsigned long tl_time;
+ atomic_t tl_count;
+ struct cifsTconInfo *tl_tcon;
+};
+
+extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
+
+static inline struct cifsTconInfo *
+tlink_tcon(struct tcon_link *tlink)
+{
+ return tlink->tl_tcon;
+}
+
+extern void cifs_put_tlink(struct tcon_link *tlink);
+
+static inline struct tcon_link *
+cifs_get_tlink(struct tcon_link *tlink)
+{
+ if (tlink && !IS_ERR(tlink))
+ atomic_inc(&tlink->tl_count);
+ return tlink;
+}
+
+/* This function is always expected to succeed */
+extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
+
+/*
* This info hangs off the cifsFileInfo structure, pointed to by llist.
* This is used to track byte stream locks on the file
*/
@@ -345,12 +388,11 @@ struct cifsFileInfo {
__u16 netfid; /* file id from remote */
/* BB add lock scope info here if needed */ ;
/* lock scope id (0 if none) */
- struct file *pfile; /* needed for writepage */
- struct inode *pInode; /* needed for oplock break */
- struct vfsmount *mnt;
+ struct dentry *dentry;
+ unsigned int f_flags;
+ struct tcon_link *tlink;
struct mutex lock_mutex;
struct list_head llist; /* list of byte range locks we have. */
- bool closePend:1; /* file is marked to close */
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
atomic_t count; /* reference count */
@@ -365,14 +407,7 @@ static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
atomic_inc(&cifs_file->count);
}
-/* Release a reference on the file private data */
-static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
-{
- if (atomic_dec_and_test(&cifs_file->count)) {
- iput(cifs_file->pInode);
- kfree(cifs_file);
- }
-}
+void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
/*
* One of these for each file inode
@@ -474,16 +509,16 @@ struct oplock_q_entry {
/* for pending dnotify requests */
struct dir_notify_req {
- struct list_head lhead;
- __le16 Pid;
- __le16 PidHigh;
- __u16 Mid;
- __u16 Tid;
- __u16 Uid;
- __u16 netfid;
- __u32 filter; /* CompletionFilter (for multishot) */
- int multishot;
- struct file *pfile;
+ struct list_head lhead;
+ __le16 Pid;
+ __le16 PidHigh;
+ __u16 Mid;
+ __u16 Tid;
+ __u16 Uid;
+ __u16 netfid;
+ __u32 filter; /* CompletionFilter (for multishot) */
+ int multishot;
+ struct file *pfile;
};
struct dfs_info3_param {
@@ -667,7 +702,7 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
* the reference counters for the server, smb session, and tcon. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
*/
-GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
+GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
/*
* This lock protects the cifs_file->llist and cifs_file->flist
@@ -676,7 +711,7 @@ GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
* If cifs_tcp_ses_lock and the lock below are both needed to be held, then
* the cifs_tcp_ses_lock must be grabbed first and released last.
*/
-GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
+GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
/* Outstanding dir notify requests */
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 14d036d8db1..b0f4b5656d4 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -663,7 +663,6 @@ struct ntlmv2_resp {
__le64 time;
__u64 client_chal; /* random */
__u32 reserved2;
- struct ntlmssp2_name names[2];
/* array of name entries could follow ending in minimum 4 byte struct */
} __attribute__((packed));
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1d60c655e3e..e593c40ba7b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,9 +78,9 @@ extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
extern bool is_valid_oplock_break(struct smb_hdr *smb,
struct TCP_Server_Info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *);
+extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
#ifdef CONFIG_CIFS_EXPERIMENTAL
-extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *);
+extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
#endif
extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
@@ -105,12 +105,12 @@ extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
int offset);
-extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
- __u16 fileHandle, struct file *file,
- struct vfsmount *mnt, unsigned int oflags);
+extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
+ struct file *file, struct tcon_link *tlink,
+ __u32 oplock);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb,
- int mode, int oflags,
+ int mode, unsigned int f_flags,
__u32 *poplock, __u16 *pnetfid, int xid);
void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
@@ -362,12 +362,12 @@ extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
__u32 *);
extern int cifs_verify_signature(struct smb_hdr *,
- const struct mac_key *mac_key,
+ const struct session_key *session_key,
__u32 expected_sequence_number);
-extern int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
+extern int cifs_calculate_session_key(struct session_key *key, const char *rn,
const char *pass);
extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *);
-extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+extern int setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
const struct nls_table *);
#ifdef CONFIG_CIFS_WEAK_PW_HASH
extern void calc_lanman_hash(const char *password, const char *cryptkey,
@@ -408,4 +408,8 @@ extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
+extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
+extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+ const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, int xid);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 7e83b356cc9..e98f1f317b1 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -91,13 +91,13 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
struct list_head *tmp1;
/* list all files open on tree connection and mark them invalid */
- write_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
- write_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
/* BB Add call to invalidate_inodes(sb) for all superblocks mounted
to this tcon */
}
@@ -503,7 +503,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (rsp->EncryptionKeyLength ==
cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
- memcpy(server->cryptKey, rsp->EncryptionKey,
+ memcpy(ses->cryptKey, rsp->EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
} else if (server->secMode & SECMODE_PW_ENCRYPT) {
rc = -EIO; /* need cryptkey unless plain text */
@@ -574,7 +574,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
server->timeAdj *= 60;
if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
- memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
+ memcpy(ses->cryptKey, pSMBr->u.EncryptionKey,
CIFS_CRYPTO_KEY_SIZE);
} else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
&& (pSMBr->EncryptionKeyLength == 0)) {
@@ -593,9 +593,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
rc = -EIO;
goto neg_err_exit;
}
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->srv_count > 1) {
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
if (memcmp(server->server_GUID,
pSMBr->u.extended_response.
GUID, 16) != 0) {
@@ -605,7 +605,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
16);
}
} else {
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
memcpy(server->server_GUID,
pSMBr->u.extended_response.GUID, 16);
}
@@ -620,13 +620,15 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
rc = 0;
else
rc = -EINVAL;
-
- if (server->sec_kerberos || server->sec_mskerberos)
- server->secType = Kerberos;
- else if (server->sec_ntlmssp)
- server->secType = RawNTLMSSP;
- else
- rc = -EOPNOTSUPP;
+ if (server->secType == Kerberos) {
+ if (!server->sec_kerberos &&
+ !server->sec_mskerberos)
+ rc = -EOPNOTSUPP;
+ } else if (server->secType == RawNTLMSSP) {
+ if (!server->sec_ntlmssp)
+ rc = -EOPNOTSUPP;
+ } else
+ rc = -EOPNOTSUPP;
}
} else
server->capabilities &= ~CAP_EXTENDED_SECURITY;
diff --git a/fs/cifs/cn_cifs.h b/fs/cifs/cn_cifs.h
deleted file mode 100644
index ea59ccac2eb..00000000000
--- a/fs/cifs/cn_cifs.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * fs/cifs/cn_cifs.h
- *
- * Copyright (c) International Business Machines Corp., 2002
- * Author(s): Steve French (sfrench@us.ibm.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _CN_CIFS_H
-#define _CN_CIFS_H
-#ifdef CONFIG_CIFS_UPCALL
-#include <linux/types.h>
-#include <linux/connector.h>
-
-struct cifs_upcall {
- char signature[4]; /* CIFS */
- enum command {
- CIFS_GET_IP = 0x00000001, /* get ip address for hostname */
- CIFS_GET_SECBLOB = 0x00000002, /* get SPNEGO wrapped blob */
- } command;
- /* union cifs upcall data follows */
-};
-#endif /* CIFS_UPCALL */
-#endif /* _CN_CIFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 88c84a38bcc..7e73176acb5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -47,7 +47,6 @@
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
-#include "cn_cifs.h"
#include "fscache.h"
#define CIFS_PORT 445
@@ -100,16 +99,24 @@ struct smb_vol {
bool noautotune:1;
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
bool fsc:1; /* enable fscache */
+ bool mfsymlinks:1; /* use Minshall+French Symlinks */
+ bool multiuser:1;
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
+ struct sockaddr_storage srcaddr; /* allow binding to a local IP */
struct nls_table *local_nls;
};
+/* FIXME: should these be tunable? */
+#define TLINK_ERROR_EXPIRE (1 * HZ)
+#define TLINK_IDLE_EXPIRE (600 * HZ)
+
static int ipv4_connect(struct TCP_Server_Info *server);
static int ipv6_connect(struct TCP_Server_Info *server);
+static void cifs_prune_tlinks(struct work_struct *work);
/*
* cifs tcp session reconnection
@@ -143,7 +150,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* before reconnecting the tcp session, mark the smb session (uid)
and the tid bad so they are not used until reconnected */
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
ses->need_reconnect = true;
@@ -153,7 +160,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
tcon->need_reconnect = true;
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
/* do not want to be sending data on a socket we are freeing */
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
@@ -166,6 +173,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
sock_release(server->ssocket);
server->ssocket = NULL;
}
+ server->sequence_number = 0;
+ server->session_estab = false;
spin_lock(&GlobalMid_Lock);
list_for_each(tmp, &server->pending_mid_q) {
@@ -198,7 +207,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsGood;
- server->sequence_number = 0;
spin_unlock(&GlobalMid_Lock);
/* atomic_set(&server->inFlight,0);*/
wake_up(&server->response_q);
@@ -629,9 +637,9 @@ multi_t2_fnd:
} /* end while !EXITING */
/* take it off the list, if it's not already */
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_del_init(&server->tcp_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
@@ -669,7 +677,7 @@ multi_t2_fnd:
* BB: we shouldn't have to do any of this. It shouldn't be
* possible to exit from the thread with active SMB sessions
*/
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (list_empty(&server->pending_mid_q)) {
/* loop through server session structures attached to this and
mark them dead */
@@ -679,7 +687,7 @@ multi_t2_fnd:
ses->status = CifsExiting;
ses->server = NULL;
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
} else {
/* although we can not zero the server struct pointer yet,
since there are active requests which may depnd on them,
@@ -702,7 +710,7 @@ multi_t2_fnd:
}
}
spin_unlock(&GlobalMid_Lock);
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
/* 1/8th of sec is more than enough time for them to exit */
msleep(125);
}
@@ -725,12 +733,12 @@ multi_t2_fnd:
if a crazy root user tried to kill cifsd
kernel thread explicitly this might happen) */
/* BB: This shouldn't be necessary, see above */
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
ses->server = NULL;
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
kfree(server->hostname);
task_to_wake = xchg(&server->tsk, NULL);
@@ -1046,6 +1054,22 @@ cifs_parse_mount_options(char *options, const char *devname,
"long\n");
return 1;
}
+ } else if (strnicmp(data, "srcaddr", 7) == 0) {
+ vol->srcaddr.ss_family = AF_UNSPEC;
+
+ if (!value || !*value) {
+ printk(KERN_WARNING "CIFS: srcaddr value"
+ " not specified.\n");
+ return 1; /* needs_arg; */
+ }
+ i = cifs_convert_address((struct sockaddr *)&vol->srcaddr,
+ value, strlen(value));
+ if (i < 0) {
+ printk(KERN_WARNING "CIFS: Could not parse"
+ " srcaddr: %s\n",
+ value);
+ return 1;
+ }
} else if (strnicmp(data, "prefixpath", 10) == 0) {
if (!value || !*value) {
printk(KERN_WARNING
@@ -1325,6 +1349,10 @@ cifs_parse_mount_options(char *options, const char *devname,
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
} else if (strnicmp(data, "fsc", 3) == 0) {
vol->fsc = true;
+ } else if (strnicmp(data, "mfsymlinks", 10) == 0) {
+ vol->mfsymlinks = true;
+ } else if (strnicmp(data, "multiuser", 8) == 0) {
+ vol->multiuser = true;
} else
printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
data);
@@ -1356,6 +1384,13 @@ cifs_parse_mount_options(char *options, const char *devname,
return 1;
}
}
+
+ if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) {
+ cERROR(1, "Multiuser mounts currently require krb5 "
+ "authentication!");
+ return 1;
+ }
+
if (vol->UNCip == NULL)
vol->UNCip = &vol->UNC[2];
@@ -1374,8 +1409,36 @@ cifs_parse_mount_options(char *options, const char *devname,
return 0;
}
+/** Returns true if srcaddr isn't specified and rhs isn't
+ * specified, or if srcaddr is specified and
+ * matches the IP address of the rhs argument.
+ */
+static bool
+srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
+{
+ switch (srcaddr->sa_family) {
+ case AF_UNSPEC:
+ return (rhs->sa_family == AF_UNSPEC);
+ case AF_INET: {
+ struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
+ struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
+ return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
+ struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs;
+ return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
+ }
+ default:
+ WARN_ON(1);
+ return false; /* don't expect to be here */
+ }
+}
+
+
static bool
-match_address(struct TCP_Server_Info *server, struct sockaddr *addr)
+match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
+ struct sockaddr *srcaddr)
{
struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
@@ -1402,6 +1465,9 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr)
break;
}
+ if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
+ return false;
+
return true;
}
@@ -1458,29 +1524,21 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
{
struct TCP_Server_Info *server;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- /*
- * the demux thread can exit on its own while still in CifsNew
- * so don't accept any sockets in that state. Since the
- * tcpStatus never changes back to CifsNew it's safe to check
- * for this without a lock.
- */
- if (server->tcpStatus == CifsNew)
- continue;
-
- if (!match_address(server, addr))
+ if (!match_address(server, addr,
+ (struct sockaddr *)&vol->srcaddr))
continue;
if (!match_security(server, vol))
continue;
++server->srv_count;
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cFYI(1, "Existing tcp session with server found");
return server;
}
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
@@ -1489,14 +1547,14 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
{
struct task_struct *task;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (--server->srv_count > 0) {
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&server->tcp_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
@@ -1574,6 +1632,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
memcpy(tcp_ses->server_RFC1001_name,
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+ tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
@@ -1584,6 +1643,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
* no need to spinlock this init of tcpStatus or srv_count
*/
tcp_ses->tcpStatus = CifsNew;
+ memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
+ sizeof(tcp_ses->srcaddr));
++tcp_ses->srv_count;
if (addr.ss_family == AF_INET6) {
@@ -1618,9 +1679,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
}
/* thread spawned, put it on the list */
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_client_cookie(tcp_ses);
@@ -1642,7 +1703,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
struct cifsSesInfo *ses;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
switch (server->secType) {
case Kerberos:
@@ -1662,10 +1723,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
continue;
}
++ses->ses_count;
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
@@ -1676,14 +1737,14 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
struct TCP_Server_Info *server = ses->server;
cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&ses->smb_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
if (ses->status == CifsGood) {
xid = GetXid();
@@ -1740,6 +1801,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
if (ses == NULL)
goto get_ses_fail;
+ ses->tilen = 0;
+ ses->tiblob = NULL;
/* new SMB session uses our server ref */
ses->server = server;
if (server->addr.sockAddr6.sin6_family == AF_INET6)
@@ -1778,9 +1841,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
goto get_ses_fail;
/* success, put it on the list */
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
FreeXid(xid);
return ses;
@@ -1797,7 +1860,7 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
struct list_head *tmp;
struct cifsTconInfo *tcon;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &ses->tcon_list) {
tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
if (tcon->tidStatus == CifsExiting)
@@ -1806,10 +1869,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
continue;
++tcon->tc_count;
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return tcon;
}
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
@@ -1820,14 +1883,14 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
struct cifsSesInfo *ses = tcon->ses;
cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
}
list_del_init(&tcon->tcon_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
xid = GetXid();
CIFSSMBTDis(xid, tcon);
@@ -1900,9 +1963,9 @@ cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
tcon->nocase = volume_info->nocase;
tcon->local_lease = volume_info->local_lease;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_fscache_get_super_cookie(tcon);
@@ -1913,6 +1976,23 @@ out_fail:
return ERR_PTR(rc);
}
+void
+cifs_put_tlink(struct tcon_link *tlink)
+{
+ if (!tlink || IS_ERR(tlink))
+ return;
+
+ if (!atomic_dec_and_test(&tlink->tl_count) ||
+ test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
+ tlink->tl_time = jiffies;
+ return;
+ }
+
+ if (!IS_ERR(tlink_tcon(tlink)))
+ cifs_put_tcon(tlink_tcon(tlink));
+ kfree(tlink);
+ return;
+}
int
get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
@@ -1997,6 +2077,33 @@ static void rfc1002mangle(char *target, char *source, unsigned int length)
}
+static int
+bind_socket(struct TCP_Server_Info *server)
+{
+ int rc = 0;
+ if (server->srcaddr.ss_family != AF_UNSPEC) {
+ /* Bind to the specified local IP address */
+ struct socket *socket = server->ssocket;
+ rc = socket->ops->bind(socket,
+ (struct sockaddr *) &server->srcaddr,
+ sizeof(server->srcaddr));
+ if (rc < 0) {
+ struct sockaddr_in *saddr4;
+ struct sockaddr_in6 *saddr6;
+ saddr4 = (struct sockaddr_in *)&server->srcaddr;
+ saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
+ if (saddr6->sin6_family == AF_INET6)
+ cERROR(1, "cifs: "
+ "Failed to bind to: %pI6c, error: %d\n",
+ &saddr6->sin6_addr, rc);
+ else
+ cERROR(1, "cifs: "
+ "Failed to bind to: %pI4, error: %d\n",
+ &saddr4->sin_addr.s_addr, rc);
+ }
+ }
+ return rc;
+}
static int
ipv4_connect(struct TCP_Server_Info *server)
@@ -2022,6 +2129,10 @@ ipv4_connect(struct TCP_Server_Info *server)
cifs_reclassify_socket4(socket);
}
+ rc = bind_socket(server);
+ if (rc < 0)
+ return rc;
+
/* user overrode default port */
if (server->addr.sockAddr.sin_port) {
rc = socket->ops->connect(socket, (struct sockaddr *)
@@ -2184,6 +2295,10 @@ ipv6_connect(struct TCP_Server_Info *server)
cifs_reclassify_socket6(socket);
}
+ rc = bind_socket(server);
+ if (rc < 0)
+ return rc;
+
/* user overrode default port */
if (server->addr.sockAddr6.sin6_port) {
rc = socket->ops->connect(socket,
@@ -2383,6 +2498,8 @@ convert_delimiter(char *path, char delim)
static void setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
+ INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+
if (pvolume_info->rsize > CIFSMaxBufSize) {
cERROR(1, "rsize %d too large, using MaxBufSize",
pvolume_info->rsize);
@@ -2462,10 +2579,21 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (pvolume_info->fsc)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
+ if (pvolume_info->multiuser)
+ cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
+ CIFS_MOUNT_NO_PERM);
if (pvolume_info->direct_io) {
cFYI(1, "mounting share using direct i/o");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
+ if (pvolume_info->mfsymlinks) {
+ if (pvolume_info->sfu_emul) {
+ cERROR(1, "mount option mfsymlinks ignored if sfu "
+ "mount option is used");
+ } else {
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS;
+ }
+ }
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
cERROR(1, "mount option dynperm ignored if cifsacl "
@@ -2552,6 +2680,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
struct TCP_Server_Info *srvTcp;
char *full_path;
char *mount_data = mount_data_global;
+ struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
struct dfs_info3_param *referrals = NULL;
unsigned int num_referrals = 0;
@@ -2563,6 +2692,7 @@ try_mount_again:
pSesInfo = NULL;
srvTcp = NULL;
full_path = NULL;
+ tlink = NULL;
xid = GetXid();
@@ -2638,8 +2768,6 @@ try_mount_again:
goto remote_path_check;
}
- cifs_sb->tcon = tcon;
-
/* do not care if following two calls succeed - informational */
if (!tcon->ipc) {
CIFSSMBQFSDeviceInfo(xid, tcon);
@@ -2748,6 +2876,38 @@ remote_path_check:
#endif
}
+ if (rc)
+ goto mount_fail_check;
+
+ /* now, hang the tcon off of the superblock */
+ tlink = kzalloc(sizeof *tlink, GFP_KERNEL);
+ if (tlink == NULL) {
+ rc = -ENOMEM;
+ goto mount_fail_check;
+ }
+
+ tlink->tl_index = pSesInfo->linux_uid;
+ tlink->tl_tcon = tcon;
+ tlink->tl_time = jiffies;
+ set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
+ set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+
+ rc = radix_tree_preload(GFP_KERNEL);
+ if (rc == -ENOMEM) {
+ kfree(tlink);
+ goto mount_fail_check;
+ }
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
+ radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
+ CIFS_TLINK_MASTER_TAG);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+ radix_tree_preload_end();
+
+ queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ TLINK_IDLE_EXPIRE);
+
mount_fail_check:
/* on error free sesinfo and tcon struct if needed */
if (rc) {
@@ -2825,14 +2985,13 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
#ifdef CONFIG_CIFS_WEAK_PW_HASH
if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
(ses->server->secType == LANMAN))
- calc_lanman_hash(tcon->password, ses->server->cryptKey,
+ calc_lanman_hash(tcon->password, ses->cryptKey,
ses->server->secMode &
SECMODE_PW_ENCRYPT ? true : false,
bcc_ptr);
else
#endif /* CIFS_WEAK_PW_HASH */
- SMBNTencrypt(tcon->password, ses->server->cryptKey,
- bcc_ptr);
+ SMBNTencrypt(tcon->password, ses->cryptKey, bcc_ptr);
bcc_ptr += CIFS_SESS_KEY_SIZE;
if (ses->capabilities & CAP_UNICODE) {
@@ -2934,19 +3093,39 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
int
cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
{
- int rc = 0;
+ int i, ret;
char *tmp;
+ struct tcon_link *tlink[8];
+ unsigned long index = 0;
+
+ cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
+
+ do {
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
+ (void **)tlink, index,
+ ARRAY_SIZE(tlink));
+ /* increment index for next pass */
+ if (ret > 0)
+ index = tlink[ret - 1]->tl_index + 1;
+ for (i = 0; i < ret; i++) {
+ cifs_get_tlink(tlink[i]);
+ clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
+ radix_tree_delete(&cifs_sb->tlink_tree,
+ tlink[i]->tl_index);
+ }
+ spin_unlock(&cifs_sb->tlink_tree_lock);
- if (cifs_sb->tcon)
- cifs_put_tcon(cifs_sb->tcon);
+ for (i = 0; i < ret; i++)
+ cifs_put_tlink(tlink[i]);
+ } while (ret != 0);
- cifs_sb->tcon = NULL;
tmp = cifs_sb->prepath;
cifs_sb->prepathlen = 0;
cifs_sb->prepath = NULL;
kfree(tmp);
- return rc;
+ return 0;
}
int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
@@ -2997,6 +3176,15 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
if (rc) {
cERROR(1, "Send error in SessSetup = %d", rc);
} else {
+ mutex_lock(&ses->server->srv_mutex);
+ if (!server->session_estab) {
+ memcpy(&server->session_key.data,
+ &ses->auth_key.data, ses->auth_key.len);
+ server->session_key.len = ses->auth_key.len;
+ ses->server->session_estab = true;
+ }
+ mutex_unlock(&server->srv_mutex);
+
cFYI(1, "CIFS Session Established successfully");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
@@ -3007,3 +3195,237 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
return rc;
}
+static struct cifsTconInfo *
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
+{
+ struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb);
+ struct cifsSesInfo *ses;
+ struct cifsTconInfo *tcon = NULL;
+ struct smb_vol *vol_info;
+ char username[MAX_USERNAME_SIZE + 1];
+
+ vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
+ if (vol_info == NULL) {
+ tcon = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid);
+ vol_info->username = username;
+ vol_info->local_nls = cifs_sb->local_nls;
+ vol_info->linux_uid = fsuid;
+ vol_info->cred_uid = fsuid;
+ vol_info->UNC = master_tcon->treeName;
+ vol_info->retry = master_tcon->retry;
+ vol_info->nocase = master_tcon->nocase;
+ vol_info->local_lease = master_tcon->local_lease;
+ vol_info->no_linux_ext = !master_tcon->unix_ext;
+
+ /* FIXME: allow for other secFlg settings */
+ vol_info->secFlg = CIFSSEC_MUST_KRB5;
+
+ /* get a reference for the same TCP session */
+ spin_lock(&cifs_tcp_ses_lock);
+ ++master_tcon->ses->server->srv_count;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
+ if (IS_ERR(ses)) {
+ tcon = (struct cifsTconInfo *)ses;
+ cifs_put_tcp_session(master_tcon->ses->server);
+ goto out;
+ }
+
+ tcon = cifs_get_tcon(ses, vol_info);
+ if (IS_ERR(tcon)) {
+ cifs_put_smb_ses(ses);
+ goto out;
+ }
+
+ if (ses->capabilities & CAP_UNIX)
+ reset_cifs_unix_caps(0, tcon, NULL, vol_info);
+out:
+ kfree(vol_info);
+
+ return tcon;
+}
+
+static struct tcon_link *
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
+{
+ struct tcon_link *tlink;
+ unsigned int ret;
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
+ 0, 1, CIFS_TLINK_MASTER_TAG);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ /* the master tcon should always be present */
+ if (ret == 0)
+ BUG();
+
+ return tlink;
+}
+
+struct cifsTconInfo *
+cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
+{
+ return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
+}
+
+static int
+cifs_sb_tcon_pending_wait(void *unused)
+{
+ schedule();
+ return signal_pending(current) ? -ERESTARTSYS : 0;
+}
+
+/*
+ * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
+ * current task.
+ *
+ * If the superblock doesn't refer to a multiuser mount, then just return
+ * the master tcon for the mount.
+ *
+ * First, search the radix tree for an existing tcon for this fsuid. If one
+ * exists, then check to see if it's pending construction. If it is then wait
+ * for construction to complete. Once it's no longer pending, check to see if
+ * it failed and either return an error or retry construction, depending on
+ * the timeout.
+ *
+ * If one doesn't exist then insert a new tcon_link struct into the tree and
+ * try to construct a new one.
+ */
+struct tcon_link *
+cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
+{
+ int ret;
+ unsigned long fsuid = (unsigned long) current_fsuid();
+ struct tcon_link *tlink, *newtlink;
+
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+ if (tlink)
+ cifs_get_tlink(tlink);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ if (tlink == NULL) {
+ newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
+ if (newtlink == NULL)
+ return ERR_PTR(-ENOMEM);
+ newtlink->tl_index = fsuid;
+ newtlink->tl_tcon = ERR_PTR(-EACCES);
+ set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
+ set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
+ cifs_get_tlink(newtlink);
+
+ ret = radix_tree_preload(GFP_KERNEL);
+ if (ret != 0) {
+ kfree(newtlink);
+ return ERR_PTR(ret);
+ }
+
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ /* was one inserted after previous search? */
+ tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+ if (tlink) {
+ cifs_get_tlink(tlink);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+ radix_tree_preload_end();
+ kfree(newtlink);
+ goto wait_for_construction;
+ }
+ ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+ radix_tree_preload_end();
+ if (ret) {
+ kfree(newtlink);
+ return ERR_PTR(ret);
+ }
+ tlink = newtlink;
+ } else {
+wait_for_construction:
+ ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
+ cifs_sb_tcon_pending_wait,
+ TASK_INTERRUPTIBLE);
+ if (ret) {
+ cifs_put_tlink(tlink);
+ return ERR_PTR(ret);
+ }
+
+ /* if it's good, return it */
+ if (!IS_ERR(tlink->tl_tcon))
+ return tlink;
+
+ /* return error if we tried this already recently */
+ if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
+ cifs_put_tlink(tlink);
+ return ERR_PTR(-EACCES);
+ }
+
+ if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
+ goto wait_for_construction;
+ }
+
+ tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
+ clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
+ wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
+
+ if (IS_ERR(tlink->tl_tcon)) {
+ cifs_put_tlink(tlink);
+ return ERR_PTR(-EACCES);
+ }
+
+ return tlink;
+}
+
+/*
+ * periodic workqueue job that scans tcon_tree for a superblock and closes
+ * out tcons.
+ */
+static void
+cifs_prune_tlinks(struct work_struct *work)
+{
+ struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
+ prune_tlinks.work);
+ struct tcon_link *tlink[8];
+ unsigned long now = jiffies;
+ unsigned long index = 0;
+ int i, ret;
+
+ do {
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
+ (void **)tlink, index,
+ ARRAY_SIZE(tlink));
+ /* increment index for next pass */
+ if (ret > 0)
+ index = tlink[ret - 1]->tl_index + 1;
+ for (i = 0; i < ret; i++) {
+ if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
+ atomic_read(&tlink[i]->tl_count) != 0 ||
+ time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
+ now)) {
+ tlink[i] = NULL;
+ continue;
+ }
+ cifs_get_tlink(tlink[i]);
+ clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
+ radix_tree_delete(&cifs_sb->tlink_tree,
+ tlink[i]->tl_index);
+ }
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ for (i = 0; i < ret; i++) {
+ if (tlink[i] != NULL)
+ cifs_put_tlink(tlink[i]);
+ }
+ } while (ret != 0);
+
+ queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ TLINK_IDLE_EXPIRE);
+}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f9ed0751cc1..3840eddbfb7 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -54,18 +54,18 @@ build_path_from_dentry(struct dentry *direntry)
int dfsplen;
char *full_path;
char dirsep;
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
if (direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
we need to reopen the file after it was closed implicitly
when the server crashed */
- cifs_sb = CIFS_SB(direntry->d_sb);
dirsep = CIFS_DIR_SEP(cifs_sb);
pplen = cifs_sb->prepathlen;
- if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS))
- dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1);
+ if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
+ dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
cifs_bp_rename_retry:
@@ -117,7 +117,7 @@ cifs_bp_rename_retry:
/* BB test paths to Windows with '/' in the midst of prepath */
if (dfsplen) {
- strncpy(full_path, cifs_sb->tcon->treeName, dfsplen);
+ strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
int i;
for (i = 0; i < dfsplen; i++) {
@@ -130,135 +130,6 @@ cifs_bp_rename_retry:
return full_path;
}
-struct cifsFileInfo *
-cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
- struct file *file, struct vfsmount *mnt, unsigned int oflags)
-{
- int oplock = 0;
- struct cifsFileInfo *pCifsFile;
- struct cifsInodeInfo *pCifsInode;
- struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
-
- pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
- if (pCifsFile == NULL)
- return pCifsFile;
-
- if (oplockEnabled)
- oplock = REQ_OPLOCK;
-
- pCifsFile->netfid = fileHandle;
- pCifsFile->pid = current->tgid;
- pCifsFile->pInode = igrab(newinode);
- pCifsFile->mnt = mnt;
- pCifsFile->pfile = file;
- pCifsFile->invalidHandle = false;
- pCifsFile->closePend = false;
- mutex_init(&pCifsFile->fh_mutex);
- mutex_init(&pCifsFile->lock_mutex);
- INIT_LIST_HEAD(&pCifsFile->llist);
- atomic_set(&pCifsFile->count, 1);
- INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
-
- write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
- pCifsInode = CIFS_I(newinode);
- if (pCifsInode) {
- /* if readable file instance put first in list*/
- if (oflags & FMODE_READ)
- list_add(&pCifsFile->flist, &pCifsInode->openFileList);
- else
- list_add_tail(&pCifsFile->flist,
- &pCifsInode->openFileList);
-
- if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
- pCifsInode->clientCanCacheAll = true;
- pCifsInode->clientCanCacheRead = true;
- cFYI(1, "Exclusive Oplock inode %p", newinode);
- } else if ((oplock & 0xF) == OPLOCK_READ)
- pCifsInode->clientCanCacheRead = true;
- }
- write_unlock(&GlobalSMBSeslock);
-
- file->private_data = pCifsFile;
-
- return pCifsFile;
-}
-
-int cifs_posix_open(char *full_path, struct inode **pinode,
- struct super_block *sb, int mode, int oflags,
- __u32 *poplock, __u16 *pnetfid, int xid)
-{
- int rc;
- FILE_UNIX_BASIC_INFO *presp_data;
- __u32 posix_flags = 0;
- struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifs_fattr fattr;
-
- cFYI(1, "posix open %s", full_path);
-
- presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
- if (presp_data == NULL)
- return -ENOMEM;
-
-/* So far cifs posix extensions can only map the following flags.
- There are other valid fmode oflags such as FMODE_LSEEK, FMODE_PREAD, but
- so far we do not seem to need them, and we can treat them as local only */
- if ((oflags & (FMODE_READ | FMODE_WRITE)) ==
- (FMODE_READ | FMODE_WRITE))
- posix_flags = SMB_O_RDWR;
- else if (oflags & FMODE_READ)
- posix_flags = SMB_O_RDONLY;
- else if (oflags & FMODE_WRITE)
- posix_flags = SMB_O_WRONLY;
- if (oflags & O_CREAT)
- posix_flags |= SMB_O_CREAT;
- if (oflags & O_EXCL)
- posix_flags |= SMB_O_EXCL;
- if (oflags & O_TRUNC)
- posix_flags |= SMB_O_TRUNC;
- /* be safe and imply O_SYNC for O_DSYNC */
- if (oflags & O_DSYNC)
- posix_flags |= SMB_O_SYNC;
- if (oflags & O_DIRECTORY)
- posix_flags |= SMB_O_DIRECTORY;
- if (oflags & O_NOFOLLOW)
- posix_flags |= SMB_O_NOFOLLOW;
- if (oflags & O_DIRECT)
- posix_flags |= SMB_O_DIRECT;
-
- mode &= ~current_umask();
- rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
- pnetfid, presp_data, poplock, full_path,
- cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc)
- goto posix_open_ret;
-
- if (presp_data->Type == cpu_to_le32(-1))
- goto posix_open_ret; /* open ok, caller does qpathinfo */
-
- if (!pinode)
- goto posix_open_ret; /* caller does not need info */
-
- cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
-
- /* get new inode and set it up */
- if (*pinode == NULL) {
- cifs_fill_uniqueid(sb, &fattr);
- *pinode = cifs_iget(sb, &fattr);
- if (!*pinode) {
- rc = -ENOMEM;
- goto posix_open_ret;
- }
- } else {
- cifs_fattr_to_inode(*pinode, &fattr);
- }
-
-posix_open_ret:
- kfree(presp_data);
- return rc;
-}
-
static void setup_cifs_dentry(struct cifsTconInfo *tcon,
struct dentry *direntry,
struct inode *newinode)
@@ -291,6 +162,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
int desiredAccess = GENERIC_READ | GENERIC_WRITE;
__u16 fileHandle;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *tcon;
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
@@ -300,21 +172,26 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
- tcon = cifs_sb->tcon;
-
- full_path = build_path_from_dentry(direntry);
- if (full_path == NULL) {
- rc = -ENOMEM;
- goto cifs_create_out;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ FreeXid(xid);
+ return PTR_ERR(tlink);
}
+ tcon = tlink_tcon(tlink);
if (oplockEnabled)
oplock = REQ_OPLOCK;
if (nd && (nd->flags & LOOKUP_OPEN))
- oflags = nd->intent.open.flags;
+ oflags = nd->intent.open.file->f_flags;
else
- oflags = FMODE_READ | SMB_O_CREAT;
+ oflags = O_RDONLY | O_CREAT;
+
+ full_path = build_path_from_dentry(direntry);
+ if (full_path == NULL) {
+ rc = -ENOMEM;
+ goto cifs_create_out;
+ }
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
@@ -344,9 +221,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
/* if the file is going to stay open, then we
need to set the desired access properly */
desiredAccess = 0;
- if (oflags & FMODE_READ)
+ if (OPEN_FMODE(oflags) & FMODE_READ)
desiredAccess |= GENERIC_READ; /* is this too little? */
- if (oflags & FMODE_WRITE)
+ if (OPEN_FMODE(oflags) & FMODE_WRITE)
desiredAccess |= GENERIC_WRITE;
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
@@ -375,7 +252,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
- if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ if (tcon->ses->capabilities & CAP_NT_SMBS)
rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
@@ -467,8 +344,7 @@ cifs_create_set_dentry:
goto cifs_create_out;
}
- pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
- nd->path.mnt, oflags);
+ pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock);
if (pfile_info == NULL) {
fput(filp);
CIFSSMBClose(xid, tcon, fileHandle);
@@ -481,6 +357,7 @@ cifs_create_set_dentry:
cifs_create_out:
kfree(buf);
kfree(full_path);
+ cifs_put_tlink(tlink);
FreeXid(xid);
return rc;
}
@@ -491,6 +368,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
int rc = -EPERM;
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
@@ -503,10 +381,14 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
if (!old_valid_dev(device_number))
return -EINVAL;
- xid = GetXid();
-
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+
+ pTcon = tlink_tcon(tlink);
+
+ xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -606,6 +488,7 @@ mknod_out:
kfree(full_path);
kfree(buf);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -619,6 +502,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
__u16 fileHandle = 0;
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
@@ -633,7 +517,12 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
/* check whether path exists */
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ FreeXid(xid);
+ return (struct dentry *)tlink;
+ }
+ pTcon = tlink_tcon(tlink);
/*
* Don't allow the separator character in a path component.
@@ -644,8 +533,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
for (i = 0; i < direntry->d_name.len; i++)
if (direntry->d_name.name[i] == '\\') {
cFYI(1, "Invalid file name");
- FreeXid(xid);
- return ERR_PTR(-EINVAL);
+ rc = -EINVAL;
+ goto lookup_out;
}
}
@@ -655,7 +544,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
*/
if (nd && (nd->flags & LOOKUP_EXCL)) {
d_instantiate(direntry, NULL);
- return NULL;
+ rc = 0;
+ goto lookup_out;
}
/* can not grab the rename sem here since it would
@@ -663,8 +553,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
in which we already have the sb rename sem */
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
- FreeXid(xid);
- return ERR_PTR(-ENOMEM);
+ rc = -ENOMEM;
+ goto lookup_out;
}
if (direntry->d_inode != NULL) {
@@ -687,11 +577,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
if (pTcon->unix_ext) {
if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
- (nd->intent.open.flags & O_CREAT)) {
+ (nd->intent.open.file->f_flags & O_CREAT)) {
rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
- nd->intent.open.flags, &oplock,
+ nd->intent.open.file->f_flags, &oplock,
&fileHandle, xid);
/*
* The check below works around a bug in POSIX
@@ -727,9 +617,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
goto lookup_out;
}
- cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
- nd->path.mnt,
- nd->intent.open.flags);
+ cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
+ oplock);
if (cfile == NULL) {
fput(filp);
CIFSSMBClose(xid, pTcon, fileHandle);
@@ -759,6 +648,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
lookup_out:
kfree(full_path);
+ cifs_put_tlink(tlink);
FreeXid(xid);
return ERR_PTR(rc);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index de748c652d1..8c81e7b14d5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -60,34 +60,32 @@ static inline int cifs_convert_flags(unsigned int flags)
FILE_READ_DATA);
}
-static inline fmode_t cifs_posix_convert_flags(unsigned int flags)
+static u32 cifs_posix_convert_flags(unsigned int flags)
{
- fmode_t posix_flags = 0;
+ u32 posix_flags = 0;
if ((flags & O_ACCMODE) == O_RDONLY)
- posix_flags = FMODE_READ;
+ posix_flags = SMB_O_RDONLY;
else if ((flags & O_ACCMODE) == O_WRONLY)
- posix_flags = FMODE_WRITE;
- else if ((flags & O_ACCMODE) == O_RDWR) {
- /* GENERIC_ALL is too much permission to request
- can cause unnecessary access denied on create */
- /* return GENERIC_ALL; */
- posix_flags = FMODE_READ | FMODE_WRITE;
- }
- /* can not map O_CREAT or O_EXCL or O_TRUNC flags when
- reopening a file. They had their effect on the original open */
- if (flags & O_APPEND)
- posix_flags |= (fmode_t)O_APPEND;
+ posix_flags = SMB_O_WRONLY;
+ else if ((flags & O_ACCMODE) == O_RDWR)
+ posix_flags = SMB_O_RDWR;
+
+ if (flags & O_CREAT)
+ posix_flags |= SMB_O_CREAT;
+ if (flags & O_EXCL)
+ posix_flags |= SMB_O_EXCL;
+ if (flags & O_TRUNC)
+ posix_flags |= SMB_O_TRUNC;
+ /* be safe and imply O_SYNC for O_DSYNC */
if (flags & O_DSYNC)
- posix_flags |= (fmode_t)O_DSYNC;
- if (flags & __O_SYNC)
- posix_flags |= (fmode_t)__O_SYNC;
+ posix_flags |= SMB_O_SYNC;
if (flags & O_DIRECTORY)
- posix_flags |= (fmode_t)O_DIRECTORY;
+ posix_flags |= SMB_O_DIRECTORY;
if (flags & O_NOFOLLOW)
- posix_flags |= (fmode_t)O_NOFOLLOW;
+ posix_flags |= SMB_O_NOFOLLOW;
if (flags & O_DIRECT)
- posix_flags |= (fmode_t)O_DIRECT;
+ posix_flags |= SMB_O_DIRECT;
return posix_flags;
}
@@ -106,66 +104,8 @@ static inline int cifs_get_disposition(unsigned int flags)
return FILE_OPEN;
}
-/* all arguments to this function must be checked for validity in caller */
-static inline int
-cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
- struct cifsInodeInfo *pCifsInode, __u32 oplock,
- u16 netfid)
-{
-
- write_lock(&GlobalSMBSeslock);
-
- pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- if (pCifsInode == NULL) {
- write_unlock(&GlobalSMBSeslock);
- return -EINVAL;
- }
-
- if (pCifsInode->clientCanCacheRead) {
- /* we have the inode open somewhere else
- no need to discard cache data */
- goto psx_client_can_cache;
- }
-
- /* BB FIXME need to fix this check to move it earlier into posix_open
- BB fIX following section BB FIXME */
-
- /* if not oplocked, invalidate inode pages if mtime or file
- size changed */
-/* temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
- if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
- (file->f_path.dentry->d_inode->i_size ==
- (loff_t)le64_to_cpu(buf->EndOfFile))) {
- cFYI(1, "inode unchanged on server");
- } else {
- if (file->f_path.dentry->d_inode->i_mapping) {
- rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
- if (rc != 0)
- CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
- }
- cFYI(1, "invalidating remote inode since open detected it "
- "changed");
- invalidate_remote_inode(file->f_path.dentry->d_inode);
- } */
-
-psx_client_can_cache:
- if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
- pCifsInode->clientCanCacheAll = true;
- pCifsInode->clientCanCacheRead = true;
- cFYI(1, "Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode);
- } else if ((oplock & 0xF) == OPLOCK_READ)
- pCifsInode->clientCanCacheRead = true;
-
- /* will have to change the unlock if we reenable the
- filemap_fdatawrite (which does not seem necessary */
- write_unlock(&GlobalSMBSeslock);
- return 0;
-}
-
-/* all arguments to this function must be checked for validity in caller */
static inline int cifs_open_inode_helper(struct inode *inode,
- struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
+ struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf,
char *full_path, int xid)
{
struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
@@ -207,16 +147,175 @@ client_can_cache:
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, NULL);
- if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
cFYI(1, "Exclusive Oplock granted on inode %p", inode);
- } else if ((*oplock & 0xF) == OPLOCK_READ)
+ } else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
return rc;
}
+int cifs_posix_open(char *full_path, struct inode **pinode,
+ struct super_block *sb, int mode, unsigned int f_flags,
+ __u32 *poplock, __u16 *pnetfid, int xid)
+{
+ int rc;
+ FILE_UNIX_BASIC_INFO *presp_data;
+ __u32 posix_flags = 0;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_fattr fattr;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *tcon;
+
+ cFYI(1, "posix open %s", full_path);
+
+ presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+ if (presp_data == NULL)
+ return -ENOMEM;
+
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ goto posix_open_ret;
+ }
+
+ tcon = tlink_tcon(tlink);
+ mode &= ~current_umask();
+
+ posix_flags = cifs_posix_convert_flags(f_flags);
+ rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
+ poplock, full_path, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
+
+ if (rc)
+ goto posix_open_ret;
+
+ if (presp_data->Type == cpu_to_le32(-1))
+ goto posix_open_ret; /* open ok, caller does qpathinfo */
+
+ if (!pinode)
+ goto posix_open_ret; /* caller does not need info */
+
+ cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
+
+ /* get new inode and set it up */
+ if (*pinode == NULL) {
+ cifs_fill_uniqueid(sb, &fattr);
+ *pinode = cifs_iget(sb, &fattr);
+ if (!*pinode) {
+ rc = -ENOMEM;
+ goto posix_open_ret;
+ }
+ } else {
+ cifs_fattr_to_inode(*pinode, &fattr);
+ }
+
+posix_open_ret:
+ kfree(presp_data);
+ return rc;
+}
+
+struct cifsFileInfo *
+cifs_new_fileinfo(__u16 fileHandle, struct file *file,
+ struct tcon_link *tlink, __u32 oplock)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
+ struct cifsFileInfo *pCifsFile;
+
+ pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+ if (pCifsFile == NULL)
+ return pCifsFile;
+
+ pCifsFile->netfid = fileHandle;
+ pCifsFile->pid = current->tgid;
+ pCifsFile->uid = current_fsuid();
+ pCifsFile->dentry = dget(dentry);
+ pCifsFile->f_flags = file->f_flags;
+ pCifsFile->invalidHandle = false;
+ pCifsFile->tlink = cifs_get_tlink(tlink);
+ mutex_init(&pCifsFile->fh_mutex);
+ mutex_init(&pCifsFile->lock_mutex);
+ INIT_LIST_HEAD(&pCifsFile->llist);
+ atomic_set(&pCifsFile->count, 1);
+ INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+
+ spin_lock(&cifs_file_list_lock);
+ list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
+ /* if readable file instance put first in list*/
+ if (file->f_mode & FMODE_READ)
+ list_add(&pCifsFile->flist, &pCifsInode->openFileList);
+ else
+ list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
+ spin_unlock(&cifs_file_list_lock);
+
+ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+ pCifsInode->clientCanCacheAll = true;
+ pCifsInode->clientCanCacheRead = true;
+ cFYI(1, "Exclusive Oplock inode %p", inode);
+ } else if ((oplock & 0xF) == OPLOCK_READ)
+ pCifsInode->clientCanCacheRead = true;
+
+ file->private_data = pCifsFile;
+ return pCifsFile;
+}
+
+/*
+ * Release a reference on the file private data. This may involve closing
+ * the filehandle out on the server.
+ */
+void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+{
+ struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
+ struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
+ struct cifsLockInfo *li, *tmp;
+
+ spin_lock(&cifs_file_list_lock);
+ if (!atomic_dec_and_test(&cifs_file->count)) {
+ spin_unlock(&cifs_file_list_lock);
+ return;
+ }
+
+ /* remove it from the lists */
+ list_del(&cifs_file->flist);
+ list_del(&cifs_file->tlist);
+
+ if (list_empty(&cifsi->openFileList)) {
+ cFYI(1, "closing last open instance for inode %p",
+ cifs_file->dentry->d_inode);
+ cifsi->clientCanCacheRead = false;
+ cifsi->clientCanCacheAll = false;
+ }
+ spin_unlock(&cifs_file_list_lock);
+
+ if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ int xid, rc;
+
+ xid = GetXid();
+ rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
+ FreeXid(xid);
+ }
+
+ /* Delete any outstanding lock records. We'll lose them when the file
+ * is closed anyway.
+ */
+ mutex_lock(&cifs_file->lock_mutex);
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
+ list_del(&li->llist);
+ kfree(li);
+ }
+ mutex_unlock(&cifs_file->lock_mutex);
+
+ cifs_put_tlink(cifs_file->tlink);
+ dput(cifs_file->dentry);
+ kfree(cifs_file);
+}
+
int cifs_open(struct inode *inode, struct file *file)
{
int rc = -EACCES;
@@ -224,6 +323,7 @@ int cifs_open(struct inode *inode, struct file *file)
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
+ struct tcon_link *tlink;
struct cifsFileInfo *pCifsFile = NULL;
struct cifsInodeInfo *pCifsInode;
char *full_path = NULL;
@@ -235,7 +335,12 @@ int cifs_open(struct inode *inode, struct file *file)
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
- tcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ FreeXid(xid);
+ return PTR_ERR(tlink);
+ }
+ tcon = tlink_tcon(tlink);
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
@@ -257,27 +362,15 @@ int cifs_open(struct inode *inode, struct file *file)
(tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- int oflags = (int) cifs_posix_convert_flags(file->f_flags);
- oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
- oflags, &oplock, &netfid, xid);
+ file->f_flags, &oplock, &netfid, xid);
if (rc == 0) {
cFYI(1, "posix open succeeded");
- /* no need for special case handling of setting mode
- on read only files needed here */
- rc = cifs_posix_open_inode_helper(inode, file,
- pCifsInode, oplock, netfid);
- if (rc != 0) {
- CIFSSMBClose(xid, tcon, netfid);
- goto out;
- }
-
- pCifsFile = cifs_new_fileinfo(inode, netfid, file,
- file->f_path.mnt,
- oflags);
+ pCifsFile = cifs_new_fileinfo(netfid, file, tlink,
+ oplock);
if (pCifsFile == NULL) {
CIFSSMBClose(xid, tcon, netfid);
rc = -ENOMEM;
@@ -345,7 +438,7 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
- if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ if (tcon->ses->capabilities & CAP_NT_SMBS)
rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
@@ -365,12 +458,11 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
- rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
+ rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid);
if (rc != 0)
goto out;
- pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
- file->f_flags);
+ pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
if (pCifsFile == NULL) {
rc = -ENOMEM;
goto out;
@@ -402,6 +494,7 @@ out:
kfree(buf);
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -416,14 +509,13 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile)
return rc;
}
-static int cifs_reopen_file(struct file *file, bool can_flush)
+static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
{
int rc = -EACCES;
int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct cifsFileInfo *pCifsFile;
struct cifsInodeInfo *pCifsInode;
struct inode *inode;
char *full_path = NULL;
@@ -431,11 +523,6 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
int disposition = FILE_OPEN;
__u16 netfid;
- if (file->private_data)
- pCifsFile = file->private_data;
- else
- return -EBADF;
-
xid = GetXid();
mutex_lock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
@@ -445,39 +532,24 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
return rc;
}
- if (file->f_path.dentry == NULL) {
- cERROR(1, "no valid name if dentry freed");
- dump_stack();
- rc = -EBADF;
- goto reopen_error_exit;
- }
-
- inode = file->f_path.dentry->d_inode;
- if (inode == NULL) {
- cERROR(1, "inode not valid");
- dump_stack();
- rc = -EBADF;
- goto reopen_error_exit;
- }
-
+ inode = pCifsFile->dentry->d_inode;
cifs_sb = CIFS_SB(inode->i_sb);
- tcon = cifs_sb->tcon;
+ tcon = tlink_tcon(pCifsFile->tlink);
/* can not grab rename sem here because various ops, including
those that already have the rename sem can end up causing writepage
to get called and if the server was down that means we end up here,
and we can never tell if the caller already has the rename_sem */
- full_path = build_path_from_dentry(file->f_path.dentry);
+ full_path = build_path_from_dentry(pCifsFile->dentry);
if (full_path == NULL) {
rc = -ENOMEM;
-reopen_error_exit:
mutex_unlock(&pCifsFile->fh_mutex);
FreeXid(xid);
return rc;
}
cFYI(1, "inode = 0x%p file flags 0x%x for %s",
- inode, file->f_flags, full_path);
+ inode, pCifsFile->f_flags, full_path);
if (oplockEnabled)
oplock = REQ_OPLOCK;
@@ -487,8 +559,14 @@ reopen_error_exit:
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
- int oflags = (int) cifs_posix_convert_flags(file->f_flags);
- /* can not refresh inode info since size could be stale */
+
+ /*
+ * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
+ * original open. Must mask them off for a reopen.
+ */
+ unsigned int oflags = pCifsFile->f_flags &
+ ~(O_CREAT | O_EXCL | O_TRUNC);
+
rc = cifs_posix_open(full_path, NULL, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
@@ -500,7 +578,7 @@ reopen_error_exit:
in the reconnect path it is important to retry hard */
}
- desiredAccess = cifs_convert_flags(file->f_flags);
+ desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
/* Can not refresh inode by passing in file_info buf to be returned
by SMBOpen and then calling get_inode_info with returned buf
@@ -516,49 +594,50 @@ reopen_error_exit:
mutex_unlock(&pCifsFile->fh_mutex);
cFYI(1, "cifs_open returned 0x%x", rc);
cFYI(1, "oplock: %d", oplock);
- } else {
+ goto reopen_error_exit;
+ }
+
reopen_success:
- pCifsFile->netfid = netfid;
- pCifsFile->invalidHandle = false;
- mutex_unlock(&pCifsFile->fh_mutex);
- pCifsInode = CIFS_I(inode);
- if (pCifsInode) {
- if (can_flush) {
- rc = filemap_write_and_wait(inode->i_mapping);
- if (rc != 0)
- CIFS_I(inode)->write_behind_rc = rc;
- /* temporarily disable caching while we
- go to server to get inode info */
- pCifsInode->clientCanCacheAll = false;
- pCifsInode->clientCanCacheRead = false;
- if (tcon->unix_ext)
- rc = cifs_get_inode_info_unix(&inode,
- full_path, inode->i_sb, xid);
- else
- rc = cifs_get_inode_info(&inode,
- full_path, NULL, inode->i_sb,
- xid, NULL);
- } /* else we are writing out data to server already
- and could deadlock if we tried to flush data, and
- since we do not know if we have data that would
- invalidate the current end of file on the server
- we can not go to the server to get the new inod
- info */
- if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
- pCifsInode->clientCanCacheAll = true;
- pCifsInode->clientCanCacheRead = true;
- cFYI(1, "Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode);
- } else if ((oplock & 0xF) == OPLOCK_READ) {
- pCifsInode->clientCanCacheRead = true;
- pCifsInode->clientCanCacheAll = false;
- } else {
- pCifsInode->clientCanCacheRead = false;
- pCifsInode->clientCanCacheAll = false;
- }
- cifs_relock_file(pCifsFile);
- }
+ pCifsFile->netfid = netfid;
+ pCifsFile->invalidHandle = false;
+ mutex_unlock(&pCifsFile->fh_mutex);
+ pCifsInode = CIFS_I(inode);
+
+ if (can_flush) {
+ rc = filemap_write_and_wait(inode->i_mapping);
+ if (rc != 0)
+ CIFS_I(inode)->write_behind_rc = rc;
+
+ pCifsInode->clientCanCacheAll = false;
+ pCifsInode->clientCanCacheRead = false;
+ if (tcon->unix_ext)
+ rc = cifs_get_inode_info_unix(&inode,
+ full_path, inode->i_sb, xid);
+ else
+ rc = cifs_get_inode_info(&inode,
+ full_path, NULL, inode->i_sb,
+ xid, NULL);
+ } /* else we are writing out data to server already
+ and could deadlock if we tried to flush data, and
+ since we do not know if we have data that would
+ invalidate the current end of file on the server
+ we can not go to the server to get the new inod
+ info */
+ if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
+ pCifsInode->clientCanCacheAll = true;
+ pCifsInode->clientCanCacheRead = true;
+ cFYI(1, "Exclusive Oplock granted on inode %p",
+ pCifsFile->dentry->d_inode);
+ } else if ((oplock & 0xF) == OPLOCK_READ) {
+ pCifsInode->clientCanCacheRead = true;
+ pCifsInode->clientCanCacheAll = false;
+ } else {
+ pCifsInode->clientCanCacheRead = false;
+ pCifsInode->clientCanCacheAll = false;
}
+ cifs_relock_file(pCifsFile);
+
+reopen_error_exit:
kfree(full_path);
FreeXid(xid);
return rc;
@@ -566,79 +645,11 @@ reopen_success:
int cifs_close(struct inode *inode, struct file *file)
{
- int rc = 0;
- int xid, timeout;
- struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
- struct cifsFileInfo *pSMBFile = file->private_data;
+ cifsFileInfo_put(file->private_data);
+ file->private_data = NULL;
- xid = GetXid();
-
- cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
- if (pSMBFile) {
- struct cifsLockInfo *li, *tmp;
- write_lock(&GlobalSMBSeslock);
- pSMBFile->closePend = true;
- if (pTcon) {
- /* no sense reconnecting to close a file that is
- already closed */
- if (!pTcon->need_reconnect) {
- write_unlock(&GlobalSMBSeslock);
- timeout = 2;
- while ((atomic_read(&pSMBFile->count) != 1)
- && (timeout <= 2048)) {
- /* Give write a better chance to get to
- server ahead of the close. We do not
- want to add a wait_q here as it would
- increase the memory utilization as
- the struct would be in each open file,
- but this should give enough time to
- clear the socket */
- cFYI(DBG2, "close delay, write pending");
- msleep(timeout);
- timeout *= 4;
- }
- if (!pTcon->need_reconnect &&
- !pSMBFile->invalidHandle)
- rc = CIFSSMBClose(xid, pTcon,
- pSMBFile->netfid);
- } else
- write_unlock(&GlobalSMBSeslock);
- } else
- write_unlock(&GlobalSMBSeslock);
-
- /* Delete any outstanding lock records.
- We'll lose them when the file is closed anyway. */
- mutex_lock(&pSMBFile->lock_mutex);
- list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
- list_del(&li->llist);
- kfree(li);
- }
- mutex_unlock(&pSMBFile->lock_mutex);
-
- write_lock(&GlobalSMBSeslock);
- list_del(&pSMBFile->flist);
- list_del(&pSMBFile->tlist);
- write_unlock(&GlobalSMBSeslock);
- cifsFileInfo_put(file->private_data);
- file->private_data = NULL;
- } else
- rc = -EBADF;
-
- read_lock(&GlobalSMBSeslock);
- if (list_empty(&(CIFS_I(inode)->openFileList))) {
- cFYI(1, "closing last open instance for inode %p", inode);
- /* if the file is not open we do not know if we can cache info
- on this inode, much less write behind and read ahead */
- CIFS_I(inode)->clientCanCacheRead = false;
- CIFS_I(inode)->clientCanCacheAll = false;
- }
- read_unlock(&GlobalSMBSeslock);
- if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
- rc = CIFS_I(inode)->write_behind_rc;
- FreeXid(xid);
- return rc;
+ /* return code from the ->release op is always ignored */
+ return 0;
}
int cifs_closedir(struct inode *inode, struct file *file)
@@ -653,25 +664,21 @@ int cifs_closedir(struct inode *inode, struct file *file)
xid = GetXid();
if (pCFileStruct) {
- struct cifsTconInfo *pTcon;
- struct cifs_sb_info *cifs_sb =
- CIFS_SB(file->f_path.dentry->d_sb);
-
- pTcon = cifs_sb->tcon;
+ struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
cFYI(1, "Freeing private data in close dir");
- write_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
if (!pCFileStruct->srch_inf.endOfSearch &&
!pCFileStruct->invalidHandle) {
pCFileStruct->invalidHandle = true;
- write_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
cFYI(1, "Closing uncompleted readdir with rc %d",
rc);
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
- write_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
if (ptmp) {
cFYI(1, "closedir free smb buf in srch struct");
@@ -681,6 +688,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
else
cifs_buf_release(ptmp);
}
+ cifs_put_tlink(pCFileStruct->tlink);
kfree(file->private_data);
file->private_data = NULL;
}
@@ -767,7 +775,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
cFYI(1, "Unknown type of lock");
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- tcon = cifs_sb->tcon;
+ tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
if (file->private_data == NULL) {
rc = -EBADF;
@@ -960,14 +968,14 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- pTcon = cifs_sb->tcon;
-
/* cFYI(1, " write %d bytes to offset %lld of %s", write_size,
*poffset, file->f_path.dentry->d_name.name); */
if (file->private_data == NULL)
return -EBADF;
+
open_file = file->private_data;
+ pTcon = tlink_tcon(open_file->tlink);
rc = generic_write_checks(file, poffset, &write_size, 0);
if (rc)
@@ -988,19 +996,12 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
we blocked so return what we managed to write */
return total_written;
}
- if (open_file->closePend) {
- FreeXid(xid);
- if (total_written)
- return total_written;
- else
- return -EBADF;
- }
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
reopen_file not to flush data to server
now */
- rc = cifs_reopen_file(file, false);
+ rc = cifs_reopen_file(open_file, false);
if (rc != 0)
break;
}
@@ -1048,8 +1049,9 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
return total_written;
}
-static ssize_t cifs_write(struct file *file, const char *write_data,
- size_t write_size, loff_t *poffset)
+static ssize_t cifs_write(struct cifsFileInfo *open_file,
+ const char *write_data, size_t write_size,
+ loff_t *poffset)
{
int rc = 0;
unsigned int bytes_written = 0;
@@ -1057,19 +1059,15 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
int xid, long_op;
- struct cifsFileInfo *open_file;
- struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
+ struct dentry *dentry = open_file->dentry;
+ struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
- pTcon = cifs_sb->tcon;
+ cifs_sb = CIFS_SB(dentry->d_sb);
cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
- *poffset, file->f_path.dentry->d_name.name);
+ *poffset, dentry->d_name.name);
- if (file->private_data == NULL)
- return -EBADF;
- open_file = file->private_data;
+ pTcon = tlink_tcon(open_file->tlink);
xid = GetXid();
@@ -1078,28 +1076,12 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
total_written += bytes_written) {
rc = -EAGAIN;
while (rc == -EAGAIN) {
- if (file->private_data == NULL) {
- /* file has been closed on us */
- FreeXid(xid);
- /* if we have gotten here we have written some data
- and blocked, and the file has been freed on us
- while we blocked so return what we managed to
- write */
- return total_written;
- }
- if (open_file->closePend) {
- FreeXid(xid);
- if (total_written)
- return total_written;
- else
- return -EBADF;
- }
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
reopen_file not to flush data to
server now */
- rc = cifs_reopen_file(file, false);
+ rc = cifs_reopen_file(open_file, false);
if (rc != 0)
break;
}
@@ -1146,43 +1128,41 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
cifs_stats_bytes_written(pTcon, total_written);
- /* since the write may have blocked check these pointers again */
- if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
-/*BB We could make this contingent on superblock ATIME flag too */
-/* file->f_path.dentry->d_inode->i_ctime =
- file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
- if (total_written > 0) {
- spin_lock(&file->f_path.dentry->d_inode->i_lock);
- if (*poffset > file->f_path.dentry->d_inode->i_size)
- i_size_write(file->f_path.dentry->d_inode,
- *poffset);
- spin_unlock(&file->f_path.dentry->d_inode->i_lock);
- }
- mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ if (total_written > 0) {
+ spin_lock(&dentry->d_inode->i_lock);
+ if (*poffset > dentry->d_inode->i_size)
+ i_size_write(dentry->d_inode, *poffset);
+ spin_unlock(&dentry->d_inode->i_lock);
}
+ mark_inode_dirty_sync(dentry->d_inode);
FreeXid(xid);
return total_written;
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
+struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ bool fsuid_only)
{
struct cifsFileInfo *open_file = NULL;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
- read_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (open_file->closePend)
+ if (fsuid_only && open_file->uid != current_fsuid())
continue;
- if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
- (open_file->pfile->f_flags & O_RDONLY))) {
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if (!open_file->invalidHandle) {
/* found a good file */
/* lock it so it will not be closed on us */
cifsFileInfo_get(open_file);
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
@@ -1190,14 +1170,16 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
} else /* write only file */
break; /* write only files are last so must be done */
}
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
return NULL;
}
#endif
-struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
+struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
+ bool fsuid_only)
{
struct cifsFileInfo *open_file;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
bool any_available = false;
int rc;
@@ -1211,53 +1193,39 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
return NULL;
}
- read_lock(&GlobalSMBSeslock);
+ /* only filter by fsuid on multiuser mounts */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
+ fsuid_only = false;
+
+ spin_lock(&cifs_file_list_lock);
refind_writable:
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (open_file->closePend ||
- (!any_available && open_file->pid != current->tgid))
+ if (!any_available && open_file->pid != current->tgid)
continue;
-
- if (open_file->pfile &&
- ((open_file->pfile->f_flags & O_RDWR) ||
- (open_file->pfile->f_flags & O_WRONLY))) {
+ if (fsuid_only && open_file->uid != current_fsuid())
+ continue;
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
cifsFileInfo_get(open_file);
if (!open_file->invalidHandle) {
/* found a good writable file */
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
return open_file;
}
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
+
/* Had to unlock since following call can block */
- rc = cifs_reopen_file(open_file->pfile, false);
- if (!rc) {
- if (!open_file->closePend)
- return open_file;
- else { /* start over in case this was deleted */
- /* since the list could be modified */
- read_lock(&GlobalSMBSeslock);
- cifsFileInfo_put(open_file);
- goto refind_writable;
- }
- }
+ rc = cifs_reopen_file(open_file, false);
+ if (!rc)
+ return open_file;
- /* if it fails, try another handle if possible -
- (we can not do this if closePending since
- loop could be modified - in which case we
- have to start at the beginning of the list
- again. Note that it would be bad
- to hold up writepages here (rather than
- in caller) with continuous retries */
+ /* if it fails, try another handle if possible */
cFYI(1, "wp failed on reopen file");
- read_lock(&GlobalSMBSeslock);
- /* can not use this handle, no write
- pending on this one after all */
cifsFileInfo_put(open_file);
- if (open_file->closePend) /* list could have changed */
- goto refind_writable;
+ spin_lock(&cifs_file_list_lock);
+
/* else we simply continue to the next entry. Thus
we do not loop on reopen errors. If we
can not reopen the file, for example if we
@@ -1272,7 +1240,7 @@ refind_writable:
any_available = true;
goto refind_writable;
}
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
return NULL;
}
@@ -1284,7 +1252,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
int rc = -EFAULT;
int bytes_written = 0;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
struct inode *inode;
struct cifsFileInfo *open_file;
@@ -1293,7 +1260,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
inode = page->mapping->host;
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
offset += (loff_t)from;
write_data = kmap(page);
@@ -1314,10 +1280,10 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
if (mapping->host->i_size - offset < (loff_t)to)
to = (unsigned)(mapping->host->i_size - offset);
- open_file = find_writable_file(CIFS_I(mapping->host));
+ open_file = find_writable_file(CIFS_I(mapping->host), false);
if (open_file) {
- bytes_written = cifs_write(open_file->pfile, write_data,
- to-from, &offset);
+ bytes_written = cifs_write(open_file, write_data,
+ to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
@@ -1352,6 +1318,7 @@ static int cifs_writepages(struct address_space *mapping,
int nr_pages;
__u64 offset = 0;
struct cifsFileInfo *open_file;
+ struct cifsTconInfo *tcon;
struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
struct page *page;
struct pagevec pvec;
@@ -1359,6 +1326,15 @@ static int cifs_writepages(struct address_space *mapping,
int scanned = 0;
int xid, long_op;
+ /*
+ * BB: Is this meaningful for a non-block-device file system?
+ * If it is, we should test it again after we do I/O
+ */
+ if (wbc->nonblocking && bdi_write_congested(bdi)) {
+ wbc->encountered_congestion = 1;
+ return 0;
+ }
+
cifs_sb = CIFS_SB(mapping->host->i_sb);
/*
@@ -1368,27 +1344,29 @@ static int cifs_writepages(struct address_space *mapping,
if (cifs_sb->wsize < PAGE_CACHE_SIZE)
return generic_writepages(mapping, wbc);
- if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
- if (cifs_sb->tcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- if (!experimEnabled)
- return generic_writepages(mapping, wbc);
-
iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
if (iov == NULL)
return generic_writepages(mapping, wbc);
-
/*
- * BB: Is this meaningful for a non-block-device file system?
- * If it is, we should test it again after we do I/O
+ * if there's no open file, then this is likely to fail too,
+ * but it'll at least handle the return. Maybe it should be
+ * a BUG() instead?
*/
- if (wbc->nonblocking && bdi_write_congested(bdi)) {
- wbc->encountered_congestion = 1;
+ open_file = find_writable_file(CIFS_I(mapping->host), false);
+ if (!open_file) {
kfree(iov);
- return 0;
+ return generic_writepages(mapping, wbc);
}
+ tcon = tlink_tcon(open_file->tlink);
+ if (!experimEnabled && tcon->ses->server->secMode &
+ (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
+ cifsFileInfo_put(open_file);
+ return generic_writepages(mapping, wbc);
+ }
+ cifsFileInfo_put(open_file);
+
xid = GetXid();
pagevec_init(&pvec, 0);
@@ -1492,38 +1470,34 @@ retry:
break;
}
if (n_iov) {
- /* Search for a writable handle every time we call
- * CIFSSMBWrite2. We can't rely on the last handle
- * we used to still be valid
- */
- open_file = find_writable_file(CIFS_I(mapping->host));
+ open_file = find_writable_file(CIFS_I(mapping->host),
+ false);
if (!open_file) {
cERROR(1, "No writable handles for inode");
rc = -EBADF;
} else {
long_op = cifs_write_timeout(cifsi, offset);
- rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
- open_file->netfid,
+ rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
bytes_to_write, offset,
&bytes_written, iov, n_iov,
long_op);
cifsFileInfo_put(open_file);
cifs_update_eof(cifsi, offset, bytes_written);
+ }
- if (rc || bytes_written < bytes_to_write) {
- cERROR(1, "Write2 ret %d, wrote %d",
- rc, bytes_written);
- /* BB what if continued retry is
- requested via mount flags? */
- if (rc == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- } else {
- cifs_stats_bytes_written(cifs_sb->tcon,
- bytes_written);
- }
+ if (rc || bytes_written < bytes_to_write) {
+ cERROR(1, "Write2 ret %d, wrote %d",
+ rc, bytes_written);
+ /* BB what if continued retry is
+ requested via mount flags? */
+ if (rc == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ } else {
+ cifs_stats_bytes_written(tcon, bytes_written);
}
+
for (i = 0; i < n_iov; i++) {
page = pvec.pages[first + i];
/* Should we also set page error on
@@ -1624,7 +1598,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
/* BB check if anything else missing out of ppw
such as updating last write time */
page_data = kmap(page);
- rc = cifs_write(file, page_data + offset, copied, &pos);
+ rc = cifs_write(file->private_data, page_data + offset,
+ copied, &pos);
/* if (rc < 0) should we set writebehind rc? */
kunmap(page);
@@ -1665,7 +1640,7 @@ int cifs_fsync(struct file *file, int datasync)
if (rc == 0) {
rc = CIFS_I(inode)->write_behind_rc;
CIFS_I(inode)->write_behind_rc = 0;
- tcon = CIFS_SB(inode->i_sb)->tcon;
+ tcon = tlink_tcon(smbfile->tlink);
if (!rc && tcon && smbfile &&
!(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
@@ -1750,7 +1725,6 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
rc = -EBADF;
@@ -1758,6 +1732,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
return rc;
}
open_file = file->private_data;
+ pTcon = tlink_tcon(open_file->tlink);
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1771,9 +1746,8 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
smb_read_data = NULL;
while (rc == -EAGAIN) {
int buf_type = CIFS_NO_BUFFER;
- if ((open_file->invalidHandle) &&
- (!open_file->closePend)) {
- rc = cifs_reopen_file(file, true);
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, true);
if (rc != 0)
break;
}
@@ -1831,7 +1805,6 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- pTcon = cifs_sb->tcon;
if (file->private_data == NULL) {
rc = -EBADF;
@@ -1839,6 +1812,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return rc;
}
open_file = file->private_data;
+ pTcon = tlink_tcon(open_file->tlink);
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1857,9 +1831,8 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
}
rc = -EAGAIN;
while (rc == -EAGAIN) {
- if ((open_file->invalidHandle) &&
- (!open_file->closePend)) {
- rc = cifs_reopen_file(file, true);
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, true);
if (rc != 0)
break;
}
@@ -1974,7 +1947,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
open_file = file->private_data;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- pTcon = cifs_sb->tcon;
+ pTcon = tlink_tcon(open_file->tlink);
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
@@ -2022,9 +1995,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
read_size, contig_pages);
rc = -EAGAIN;
while (rc == -EAGAIN) {
- if ((open_file->invalidHandle) &&
- (!open_file->closePend)) {
- rc = cifs_reopen_file(file, true);
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, true);
if (rc != 0)
break;
}
@@ -2173,18 +2145,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
- read_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
- if (open_file->closePend)
- continue;
- if (open_file->pfile &&
- ((open_file->pfile->f_flags & O_RDWR) ||
- (open_file->pfile->f_flags & O_WRONLY))) {
- read_unlock(&GlobalSMBSeslock);
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ spin_unlock(&cifs_file_list_lock);
return 1;
}
}
- read_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
return 0;
}
@@ -2310,9 +2278,8 @@ void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
- struct inode *inode = cfile->pInode;
+ struct inode *inode = cfile->dentry->d_inode;
struct cifsInodeInfo *cinode = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
int rc, waitrc = 0;
if (inode && S_ISREG(inode->i_mode)) {
@@ -2338,9 +2305,9 @@ void cifs_oplock_break(struct work_struct *work)
* not bother sending an oplock release if session to server still is
* disconnected since oplock already released by the server
*/
- if (!cfile->closePend && !cfile->oplock_break_cancelled) {
- rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
- LOCKING_ANDX_OPLOCK_RELEASE, false);
+ if (!cfile->oplock_break_cancelled) {
+ rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
+ 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
cFYI(1, "Oplock release rc = %d", rc);
}
@@ -2349,22 +2316,22 @@ void cifs_oplock_break(struct work_struct *work)
* finished grabbing reference for us. Make sure it's done by
* waiting for GlobalSMSSeslock.
*/
- write_lock(&GlobalSMBSeslock);
- write_unlock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
+ spin_unlock(&cifs_file_list_lock);
cifs_oplock_break_put(cfile);
}
void cifs_oplock_break_get(struct cifsFileInfo *cfile)
{
- mntget(cfile->mnt);
+ cifs_sb_active(cfile->dentry->d_sb);
cifsFileInfo_get(cfile);
}
void cifs_oplock_break_put(struct cifsFileInfo *cfile)
{
- mntput(cfile->mnt);
cifsFileInfo_put(cfile);
+ cifs_sb_deactive(cfile->dentry->d_sb);
}
const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 9f3f5c4be16..a2ad94efcfe 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -62,15 +62,15 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
if (cifsi->fscache)
return;
- cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache,
- &cifs_fscache_inode_object_def,
- cifsi);
- cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)",
- cifs_sb->tcon->fscache, cifsi->fscache);
+ cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
+ &cifs_fscache_inode_object_def, cifsi);
+ cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
+ cifsi->fscache);
}
void cifs_fscache_release_inode_cookie(struct inode *inode)
@@ -117,7 +117,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
/* retire the current fscache cache and get a new one */
fscache_relinquish_cookie(cifsi->fscache, 1);
- cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache,
+ cifsi->fscache = fscache_acquire_cookie(
+ cifs_sb_master_tcon(cifs_sb)->fscache,
&cifs_fscache_inode_object_def,
cifsi);
cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p",
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 53cce8cc222..94979309698 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -52,7 +52,7 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
/* check if server can support readpages */
- if (cifs_sb->tcon->ses->server->maxBuf <
+ if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
@@ -288,8 +288,8 @@ int cifs_get_file_info_unix(struct file *filp)
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
struct cifsFileInfo *cfile = filp->private_data;
+ struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -313,15 +313,21 @@ int cifs_get_inode_info_unix(struct inode **pinode,
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct cifsTconInfo *tcon;
+ struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- tcon = cifs_sb->tcon;
cFYI(1, "Getting info on %s", full_path);
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
@@ -332,6 +338,13 @@ int cifs_get_inode_info_unix(struct inode **pinode,
return rc;
}
+ /* check for Minshall+French symlinks */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ if (tmprc)
+ cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
+ }
+
if (*pinode == NULL) {
/* get new inode */
cifs_fill_uniqueid(sb, &fattr);
@@ -353,7 +366,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
int rc;
int oplock = 0;
__u16 netfid;
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *tcon;
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -372,7 +386,12 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
return -EINVAL; /* EOPNOTSUPP? */
}
- rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+ rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
@@ -380,7 +399,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
- rc = CIFSSMBRead(xid, pTcon, netfid,
+ rc = CIFSSMBRead(xid, tcon, netfid,
24 /* length */, 0 /* offset */,
&bytes_read, &pbuf, &buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
@@ -422,8 +441,9 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP; /* or some unknown SFU type */
}
- CIFSSMBClose(xid, pTcon, netfid);
+ CIFSSMBClose(xid, tcon, netfid);
}
+ cifs_put_tlink(tlink);
return rc;
}
@@ -441,11 +461,19 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
ssize_t rc;
char ea_value[4];
__u32 mode;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *tcon;
- rc = CIFSSMBQAllEAs(xid, cifs_sb->tcon, path, "SETFILEBITS",
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+ rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
if (rc < 0)
return (int)rc;
else if (rc > 3) {
@@ -468,6 +496,8 @@ static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+
memset(fattr, 0, sizeof(*fattr));
fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
if (info->DeletePending)
@@ -482,8 +512,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
if (adjust_tz) {
- fattr->cf_ctime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
- fattr->cf_mtime.tv_sec += cifs_sb->tcon->ses->server->timeAdj;
+ fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
+ fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
}
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
@@ -515,8 +545,8 @@ int cifs_get_file_info(struct file *filp)
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
struct cifsFileInfo *cfile = filp->private_data;
+ struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -554,26 +584,33 @@ int cifs_get_inode_info(struct inode **pinode,
{
int rc = 0, tmprc;
struct cifsTconInfo *pTcon;
+ struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
bool adjustTZ = false;
struct cifs_fattr fattr;
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
cFYI(1, "Getting info on %s", full_path);
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
cFYI(1, "No need to revalidate cached inode sizes");
- return rc;
+ goto cgii_exit;
}
}
/* if file info not passed in then get it from server */
if (pfindData == NULL) {
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
+ if (buf == NULL) {
+ rc = -ENOMEM;
+ goto cgii_exit;
+ }
pfindData = (FILE_ALL_INFO *)buf;
/* could do find first instead but this returns more info */
@@ -661,6 +698,13 @@ int cifs_get_inode_info(struct inode **pinode,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
+ /* check for Minshall+French symlinks */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
+ tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ if (tmprc)
+ cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
+ }
+
if (!*pinode) {
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
@@ -671,6 +715,7 @@ int cifs_get_inode_info(struct inode **pinode,
cgii_exit:
kfree(buf);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -683,6 +728,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
int pplen = cifs_sb->prepathlen;
int dfsplen;
char *full_path = NULL;
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
/* if no prefix path, simply set path to the root of share to "" */
if (pplen == 0) {
@@ -692,8 +738,8 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
return full_path;
}
- if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS))
- dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1);
+ if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
+ dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
@@ -702,7 +748,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb)
return full_path;
if (dfsplen) {
- strncpy(full_path, cifs_sb->tcon->treeName, dfsplen);
+ strncpy(full_path, tcon->treeName, dfsplen);
/* switch slash direction in prepath depending on whether
* windows or posix style path names
*/
@@ -818,18 +864,18 @@ retry_iget5_locked:
struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
{
int xid;
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
char *full_path;
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
- cifs_sb = CIFS_SB(sb);
full_path = cifs_build_path_to_root(cifs_sb);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
xid = GetXid();
- if (cifs_sb->tcon->unix_ext)
+ if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
@@ -840,10 +886,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
- cifs_sb->tcon->resource_id = CIFS_I(inode)->uniqueid;
+ tcon->resource_id = CIFS_I(inode)->uniqueid;
#endif
- if (rc && cifs_sb->tcon->ipc) {
+ if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
inode->i_nlink = 2;
@@ -879,7 +925,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct tcon_link *tlink = NULL;
+ struct cifsTconInfo *pTcon;
FILE_BASIC_INFO info_buf;
if (attrs == NULL)
@@ -918,13 +965,22 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
/*
* If the file is already open for write, just use that fileid
*/
- open_file = find_writable_file(cifsInode);
+ open_file = find_writable_file(cifsInode, true);
if (open_file) {
netfid = open_file->netfid;
netpid = open_file->pid;
+ pTcon = tlink_tcon(open_file->tlink);
goto set_via_filehandle;
}
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ tlink = NULL;
+ goto out;
+ }
+ pTcon = tlink_tcon(tlink);
+
/*
* NT4 apparently returns success on this call, but it doesn't
* really work.
@@ -968,6 +1024,8 @@ set_via_filehandle:
else
cifsFileInfo_put(open_file);
out:
+ if (tlink != NULL)
+ cifs_put_tlink(tlink);
return rc;
}
@@ -985,10 +1043,16 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *tcon;
__u32 dosattr, origattr;
FILE_BASIC_INFO *info_buf = NULL;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
&netfid, &oplock, NULL, cifs_sb->local_nls,
@@ -1057,6 +1121,7 @@ out_close:
CIFSSMBClose(xid, tcon, netfid);
out:
kfree(info_buf);
+ cifs_put_tlink(tlink);
return rc;
/*
@@ -1096,12 +1161,18 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
struct cifsInodeInfo *cifs_inode;
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *tcon;
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
xid = GetXid();
/* Unlink can be called from rename so we can not take the
@@ -1109,8 +1180,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto unlink_out;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -1176,10 +1246,11 @@ out_reval:
dir->i_ctime = dir->i_mtime = current_fs_time(sb);
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
-
+unlink_out:
kfree(full_path);
kfree(attrs);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -1188,6 +1259,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
int rc = 0, tmprc;
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
@@ -1195,16 +1267,18 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
cFYI(1, "In cifs_mkdir, mode = 0x%x inode = 0x%p", mode, inode);
- xid = GetXid();
-
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto mkdir_out;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
@@ -1362,6 +1436,7 @@ mkdir_get_info:
mkdir_out:
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -1370,6 +1445,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
int rc = 0;
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
@@ -1378,18 +1454,23 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
xid = GetXid();
- cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
-
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto rmdir_exit;
}
+ cifs_sb = CIFS_SB(inode->i_sb);
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ goto rmdir_exit;
+ }
+ pTcon = tlink_tcon(tlink);
+
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
if (!rc) {
drop_nlink(inode);
@@ -1410,6 +1491,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
+rmdir_exit:
kfree(full_path);
FreeXid(xid);
return rc;
@@ -1420,10 +1502,16 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
struct dentry *to_dentry, const char *toPath)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *pTcon;
__u16 srcfid;
int oplock, rc;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
/* try path-based rename first */
rc = CIFSSMBRename(xid, pTcon, fromPath, toPath, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
@@ -1435,11 +1523,11 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
* rename by filehandle to various Windows servers.
*/
if (rc == 0 || rc != -ETXTBSY)
- return rc;
+ goto do_rename_exit;
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
- return rc;
+ goto do_rename_exit;
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
@@ -1455,7 +1543,8 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
CIFSSMBClose(xid, pTcon, srcfid);
}
-
+do_rename_exit:
+ cifs_put_tlink(tlink);
return rc;
}
@@ -1465,13 +1554,17 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
char *fromName = NULL;
char *toName = NULL;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
cifs_sb = CIFS_SB(source_dir->i_sb);
- tcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
xid = GetXid();
@@ -1547,6 +1640,7 @@ cifs_rename_exit:
kfree(fromName);
kfree(toName);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -1599,11 +1693,12 @@ int cifs_revalidate_file(struct file *filp)
{
int rc = 0;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
if (!cifs_inode_needs_reval(inode))
goto check_inval;
- if (CIFS_SB(inode->i_sb)->tcon->unix_ext)
+ if (tlink_tcon(cfile->tlink)->unix_ext)
rc = cifs_get_file_info_unix(filp);
else
rc = cifs_get_file_info(filp);
@@ -1644,7 +1739,7 @@ int cifs_revalidate_dentry(struct dentry *dentry)
"jiffies %ld", full_path, inode, inode->i_count.counter,
dentry, dentry->d_time, jiffies);
- if (CIFS_SB(sb)->tcon->unix_ext)
+ if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
@@ -1660,13 +1755,29 @@ check_inval:
}
int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
+ struct kstat *stat)
{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+ struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
int err = cifs_revalidate_dentry(dentry);
+
if (!err) {
generic_fillattr(dentry->d_inode, stat);
stat->blksize = CIFS_MAX_MSGSIZE;
stat->ino = CIFS_I(dentry->d_inode)->uniqueid;
+
+ /*
+ * If on a multiuser mount without unix extensions, and the
+ * admin hasn't overridden them, set the ownership to the
+ * fsuid/fsgid of the current process.
+ */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
+ !tcon->unix_ext) {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
+ stat->uid = current_fsuid();
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
+ stat->gid = current_fsgid();
+ }
}
return err;
}
@@ -1708,7 +1819,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct tcon_link *tlink = NULL;
+ struct cifsTconInfo *pTcon = NULL;
/*
* To avoid spurious oplock breaks from server, in the case of
@@ -1719,10 +1831,11 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
- open_file = find_writable_file(cifsInode);
+ open_file = find_writable_file(cifsInode, true);
if (open_file) {
__u16 nfid = open_file->netfid;
__u32 npid = open_file->pid;
+ pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
npid, false);
cifsFileInfo_put(open_file);
@@ -1737,6 +1850,13 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
rc = -EINVAL;
if (rc != 0) {
+ if (pTcon == NULL) {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+ }
+
/* Set file size by pathname rather than by handle
either because no valid, writeable file handle for
it was found or because there was an error setting
@@ -1766,6 +1886,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
CIFSSMBClose(xid, pTcon, netfid);
}
}
+ if (tlink)
+ cifs_put_tlink(tlink);
}
if (rc == 0) {
@@ -1786,7 +1908,8 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
struct inode *inode = direntry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *pTcon;
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
@@ -1873,17 +1996,25 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->ctime = NO_CHANGE_64;
args->device = 0;
- open_file = find_writable_file(cifsInode);
+ open_file = find_writable_file(cifsInode, true);
if (open_file) {
u16 nfid = open_file->netfid;
u32 npid = open_file->pid;
+ pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
cifsFileInfo_put(open_file);
} else {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ goto out;
+ }
+ pTcon = tlink_tcon(tlink);
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ cifs_put_tlink(tlink);
}
if (rc)
@@ -2064,7 +2195,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
+ struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb);
if (pTcon->unix_ext)
return cifs_setattr_unix(direntry, attrs);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 9d38a71c8e1..077bf756f34 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -37,11 +37,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
int xid;
struct cifs_sb_info *cifs_sb;
#ifdef CONFIG_CIFS_POSIX
+ struct cifsFileInfo *pSMBFile = filep->private_data;
+ struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink);
__u64 ExtAttrBits = 0;
__u64 ExtAttrMask = 0;
- __u64 caps;
- struct cifsTconInfo *tcon;
- struct cifsFileInfo *pSMBFile = filep->private_data;
+ __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
#endif /* CONFIG_CIFS_POSIX */
xid = GetXid();
@@ -50,17 +50,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
cifs_sb = CIFS_SB(inode->i_sb);
-#ifdef CONFIG_CIFS_POSIX
- tcon = cifs_sb->tcon;
- if (tcon)
- caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
- else {
- rc = -EIO;
- FreeXid(xid);
- return -EIO;
- }
-#endif /* CONFIG_CIFS_POSIX */
-
switch (command) {
case CIFS_IOC_CHECKUMOUNT:
cFYI(1, "User unmount attempted");
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 473ca803365..85cdbf831e7 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -28,6 +28,296 @@
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
+#include "md5.h"
+
+#define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
+#define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
+#define CIFS_MF_SYMLINK_LINK_OFFSET (CIFS_MF_SYMLINK_MD5_OFFSET+(32+1))
+#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
+#define CIFS_MF_SYMLINK_FILE_SIZE \
+ (CIFS_MF_SYMLINK_LINK_OFFSET + CIFS_MF_SYMLINK_LINK_MAXLEN)
+
+#define CIFS_MF_SYMLINK_LEN_FORMAT "XSym\n%04u\n"
+#define CIFS_MF_SYMLINK_MD5_FORMAT \
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n"
+#define CIFS_MF_SYMLINK_MD5_ARGS(md5_hash) \
+ md5_hash[0], md5_hash[1], md5_hash[2], md5_hash[3], \
+ md5_hash[4], md5_hash[5], md5_hash[6], md5_hash[7], \
+ md5_hash[8], md5_hash[9], md5_hash[10], md5_hash[11],\
+ md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
+
+static int
+CIFSParseMFSymlink(const u8 *buf,
+ unsigned int buf_len,
+ unsigned int *_link_len,
+ char **_link_str)
+{
+ int rc;
+ unsigned int link_len;
+ const char *md5_str1;
+ const char *link_str;
+ struct MD5Context md5_ctx;
+ u8 md5_hash[16];
+ char md5_str2[34];
+
+ if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
+ return -EINVAL;
+
+ md5_str1 = (const char *)&buf[CIFS_MF_SYMLINK_MD5_OFFSET];
+ link_str = (const char *)&buf[CIFS_MF_SYMLINK_LINK_OFFSET];
+
+ rc = sscanf(buf, CIFS_MF_SYMLINK_LEN_FORMAT, &link_len);
+ if (rc != 1)
+ return -EINVAL;
+
+ cifs_MD5_init(&md5_ctx);
+ cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
+ cifs_MD5_final(md5_hash, &md5_ctx);
+
+ snprintf(md5_str2, sizeof(md5_str2),
+ CIFS_MF_SYMLINK_MD5_FORMAT,
+ CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+
+ if (strncmp(md5_str1, md5_str2, 17) != 0)
+ return -EINVAL;
+
+ if (_link_str) {
+ *_link_str = kstrndup(link_str, link_len, GFP_KERNEL);
+ if (!*_link_str)
+ return -ENOMEM;
+ }
+
+ *_link_len = link_len;
+ return 0;
+}
+
+static int
+CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
+{
+ unsigned int link_len;
+ unsigned int ofs;
+ struct MD5Context md5_ctx;
+ u8 md5_hash[16];
+
+ if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
+ return -EINVAL;
+
+ link_len = strlen(link_str);
+
+ if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+ return -ENAMETOOLONG;
+
+ cifs_MD5_init(&md5_ctx);
+ cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
+ cifs_MD5_final(md5_hash, &md5_ctx);
+
+ snprintf(buf, buf_len,
+ CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
+ link_len,
+ CIFS_MF_SYMLINK_MD5_ARGS(md5_hash));
+
+ ofs = CIFS_MF_SYMLINK_LINK_OFFSET;
+ memcpy(buf + ofs, link_str, link_len);
+
+ ofs += link_len;
+ if (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
+ buf[ofs] = '\n';
+ ofs++;
+ }
+
+ while (ofs < CIFS_MF_SYMLINK_FILE_SIZE) {
+ buf[ofs] = ' ';
+ ofs++;
+ }
+
+ return 0;
+}
+
+static int
+CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
+ const char *fromName, const char *toName,
+ const struct nls_table *nls_codepage, int remap)
+{
+ int rc;
+ int oplock = 0;
+ __u16 netfid = 0;
+ u8 *buf;
+ unsigned int bytes_written = 0;
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rc = CIFSFormatMFSymlink(buf, CIFS_MF_SYMLINK_FILE_SIZE, toName);
+ if (rc != 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
+ CREATE_NOT_DIR, &netfid, &oplock, NULL,
+ nls_codepage, remap);
+ if (rc != 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ rc = CIFSSMBWrite(xid, tcon, netfid,
+ CIFS_MF_SYMLINK_FILE_SIZE /* length */,
+ 0 /* offset */,
+ &bytes_written, buf, NULL, 0);
+ CIFSSMBClose(xid, tcon, netfid);
+ kfree(buf);
+ if (rc != 0)
+ return rc;
+
+ if (bytes_written != CIFS_MF_SYMLINK_FILE_SIZE)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
+ const unsigned char *searchName, char **symlinkinfo,
+ const struct nls_table *nls_codepage, int remap)
+{
+ int rc;
+ int oplock = 0;
+ __u16 netfid = 0;
+ u8 *buf;
+ char *pbuf;
+ unsigned int bytes_read = 0;
+ int buf_type = CIFS_NO_BUFFER;
+ unsigned int link_len = 0;
+ FILE_ALL_INFO file_info;
+
+ rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
+ CREATE_NOT_DIR, &netfid, &oplock, &file_info,
+ nls_codepage, remap);
+ if (rc != 0)
+ return rc;
+
+ if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ CIFSSMBClose(xid, tcon, netfid);
+ /* it's not a symlink */
+ return -EINVAL;
+ }
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pbuf = buf;
+
+ rc = CIFSSMBRead(xid, tcon, netfid,
+ CIFS_MF_SYMLINK_FILE_SIZE /* length */,
+ 0 /* offset */,
+ &bytes_read, &pbuf, &buf_type);
+ CIFSSMBClose(xid, tcon, netfid);
+ if (rc != 0) {
+ kfree(buf);
+ return rc;
+ }
+
+ rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, symlinkinfo);
+ kfree(buf);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+bool
+CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
+{
+ if (!(fattr->cf_mode & S_IFREG))
+ /* it's not a symlink */
+ return false;
+
+ if (fattr->cf_eof != CIFS_MF_SYMLINK_FILE_SIZE)
+ /* it's not a symlink */
+ return false;
+
+ return true;
+}
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+ const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, int xid)
+{
+ int rc;
+ int oplock = 0;
+ __u16 netfid = 0;
+ struct tcon_link *tlink;
+ struct cifsTconInfo *pTcon;
+ u8 *buf;
+ char *pbuf;
+ unsigned int bytes_read = 0;
+ int buf_type = CIFS_NO_BUFFER;
+ unsigned int link_len = 0;
+ FILE_ALL_INFO file_info;
+
+ if (!CIFSCouldBeMFSymlink(fattr))
+ /* it's not a symlink */
+ return 0;
+
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+ CREATE_NOT_DIR, &netfid, &oplock, &file_info,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (rc != 0)
+ goto out;
+
+ if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ CIFSSMBClose(xid, pTcon, netfid);
+ /* it's not a symlink */
+ goto out;
+ }
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ pbuf = buf;
+
+ rc = CIFSSMBRead(xid, pTcon, netfid,
+ CIFS_MF_SYMLINK_FILE_SIZE /* length */,
+ 0 /* offset */,
+ &bytes_read, &pbuf, &buf_type);
+ CIFSSMBClose(xid, pTcon, netfid);
+ if (rc != 0) {
+ kfree(buf);
+ goto out;
+ }
+
+ rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
+ kfree(buf);
+ if (rc == -EINVAL) {
+ /* it's not a symlink */
+ rc = 0;
+ goto out;
+ }
+
+ if (rc != 0)
+ goto out;
+
+ /* it is a symlink */
+ fattr->cf_eof = link_len;
+ fattr->cf_mode &= ~S_IFMT;
+ fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
+ fattr->cf_dtype = DT_LNK;
+out:
+ cifs_put_tlink(tlink);
+ return rc;
+}
int
cifs_hardlink(struct dentry *old_file, struct inode *inode,
@@ -37,17 +327,17 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
int xid;
char *fromName = NULL;
char *toName = NULL;
- struct cifs_sb_info *cifs_sb_target;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct cifsInodeInfo *cifsInode;
- xid = GetXid();
-
- cifs_sb_target = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb_target->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
-/* No need to check for cross device links since server will do that
- BB note DFS case in future though (when we may have to check) */
+ xid = GetXid();
fromName = build_path_from_dentry(old_file);
toName = build_path_from_dentry(direntry);
@@ -56,16 +346,15 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
goto cifs_hl_exit;
}
-/* if (cifs_sb_target->tcon->ses->capabilities & CAP_UNIX)*/
if (pTcon->unix_ext)
rc = CIFSUnixCreateHardLink(xid, pTcon, fromName, toName,
- cifs_sb_target->local_nls,
- cifs_sb_target->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
else {
rc = CIFSCreateHardLink(xid, pTcon, fromName, toName,
- cifs_sb_target->local_nls,
- cifs_sb_target->mnt_cifs_flags &
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if ((rc == -EIO) || (rc == -EINVAL))
rc = -EOPNOTSUPP;
@@ -101,6 +390,7 @@ cifs_hl_exit:
kfree(fromName);
kfree(toName);
FreeXid(xid);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -113,10 +403,19 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
char *full_path = NULL;
char *target_path = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifsTconInfo *tcon = cifs_sb->tcon;
+ struct tcon_link *tlink = NULL;
+ struct cifsTconInfo *tcon;
xid = GetXid();
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ tlink = NULL;
+ goto out;
+ }
+ tcon = tlink_tcon(tlink);
+
/*
* For now, we just handle symlinks with unix extensions enabled.
* Eventually we should handle NTFS reparse points, and MacOS
@@ -130,7 +429,8 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
* but there doesn't seem to be any harm in allowing the client to
* read them.
*/
- if (!(tcon->ses->capabilities & CAP_UNIX)) {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+ && !(tcon->ses->capabilities & CAP_UNIX)) {
rc = -EACCES;
goto out;
}
@@ -141,8 +441,21 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
cFYI(1, "Full path: %s inode = 0x%p", full_path, inode);
- rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
- cifs_sb->local_nls);
+ rc = -EACCES;
+ /*
+ * First try Minshall+French Symlinks, if configured
+ * and fallback to UNIX Extensions Symlinks.
+ */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+ rc = CIFSQueryMFSymLink(xid, tcon, full_path, &target_path,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+
+ if ((rc != 0) && (tcon->ses->capabilities & CAP_UNIX))
+ rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
+ cifs_sb->local_nls);
+
kfree(full_path);
out:
if (rc != 0) {
@@ -151,6 +464,8 @@ out:
}
FreeXid(xid);
+ if (tlink)
+ cifs_put_tlink(tlink);
nd_set_link(nd, target_path);
return NULL;
}
@@ -160,29 +475,37 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
{
int rc = -EOPNOTSUPP;
int xid;
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
xid = GetXid();
- cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ goto symlink_exit;
+ }
+ pTcon = tlink_tcon(tlink);
full_path = build_path_from_dentry(direntry);
-
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto symlink_exit;
}
cFYI(1, "Full path: %s", full_path);
cFYI(1, "symname is %s", symname);
/* BB what if DFS and this volume is on different share? BB */
- if (pTcon->unix_ext)
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
+ rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ else if (pTcon->unix_ext)
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls);
/* else
@@ -208,8 +531,9 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
d_instantiate(direntry, newinode);
}
}
-
+symlink_exit:
kfree(full_path);
+ cifs_put_tlink(tlink);
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 3ccadc1326d..1c681f6a680 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -347,7 +347,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
if (current_fsuid() != treeCon->ses->linux_uid) {
cFYI(1, "Multiuser mode and UID "
"did not match tcon uid");
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
if (ses->linux_uid == current_fsuid()) {
@@ -361,7 +361,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
}
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
}
}
}
@@ -551,7 +551,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
return false;
/* look up tcon based on tid & uid */
- read_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp, &srv->smb_ses_list) {
ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
list_for_each(tmp1, &ses->tcon_list) {
@@ -560,25 +560,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
continue;
cifs_stats_inc(&tcon->num_oplock_brks);
- read_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
if (pSMB->Fid != netfile->netfid)
continue;
- /*
- * don't do anything if file is about to be
- * closed anyway.
- */
- if (netfile->closePend) {
- read_unlock(&GlobalSMBSeslock);
- read_unlock(&cifs_tcp_ses_lock);
- return true;
- }
-
cFYI(1, "file id match, oplock break");
- pCifsInode = CIFS_I(netfile->pInode);
+ pCifsInode = CIFS_I(netfile->dentry->d_inode);
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
@@ -594,17 +584,17 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cifs_oplock_break_get(netfile);
netfile->oplock_break_cancelled = false;
- read_unlock(&GlobalSMBSeslock);
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return true;
}
- read_unlock(&GlobalSMBSeslock);
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cFYI(1, "No matching file for oplock break");
return true;
}
}
- read_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cFYI(1, "Can not process oplock break for non-existent connection");
return true;
}
@@ -729,6 +719,6 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
"properly. Hardlinks will not be recognized on this "
"mount. Consider mounting with the \"noserverino\" "
"option to silence this message.",
- cifs_sb->tcon->treeName);
+ cifs_sb_master_tcon(cifs_sb)->treeName);
}
}
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 49c9a4e7531..5d52e4a3b1e 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -61,6 +61,21 @@
#define NTLMSSP_NEGOTIATE_KEY_XCH 0x40000000
#define NTLMSSP_NEGOTIATE_56 0x80000000
+/* Define AV Pair Field IDs */
+enum av_field_type {
+ NTLMSSP_AV_EOL = 0,
+ NTLMSSP_AV_NB_COMPUTER_NAME,
+ NTLMSSP_AV_NB_DOMAIN_NAME,
+ NTLMSSP_AV_DNS_COMPUTER_NAME,
+ NTLMSSP_AV_DNS_DOMAIN_NAME,
+ NTLMSSP_AV_DNS_TREE_NAME,
+ NTLMSSP_AV_FLAGS,
+ NTLMSSP_AV_TIMESTAMP,
+ NTLMSSP_AV_RESTRICTION,
+ NTLMSSP_AV_TARGET_NAME,
+ NTLMSSP_AV_CHANNEL_BINDINGS
+};
+
/* Although typedefs are not commonly used for structure definitions */
/* in the Linux kernel, in this particular case they are useful */
/* to more closely match the standards document for NTLMSSP from */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index d5e591fab47..ef7bb7b50f5 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -102,7 +102,7 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
return NULL;
}
- if (CIFS_SB(sb)->tcon->nocase)
+ if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
dentry->d_op = &cifs_ci_dentry_ops;
else
dentry->d_op = &cifs_dentry_ops;
@@ -171,7 +171,7 @@ static void
cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
struct cifs_sb_info *cifs_sb)
{
- int offset = cifs_sb->tcon->ses->server->timeAdj;
+ int offset = cifs_sb_master_tcon(cifs_sb)->ses->server->timeAdj;
memset(fattr, 0, sizeof(*fattr));
fattr->cf_atime = cnvrtDosUnixTm(info->LastAccessDate,
@@ -199,7 +199,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
int len;
int oplock = 0;
int rc;
- struct cifsTconInfo *ptcon = cifs_sb->tcon;
+ struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb);
char *tmpbuffer;
rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,34 +223,35 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
static int initiate_cifs_search(const int xid, struct file *file)
{
int rc = 0;
- char *full_path;
+ char *full_path = NULL;
struct cifsFileInfo *cifsFile;
- struct cifs_sb_info *cifs_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
- if (file->private_data == NULL) {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ if (file->private_data == NULL)
file->private_data =
kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+ if (file->private_data == NULL) {
+ rc = -ENOMEM;
+ goto error_exit;
}
- if (file->private_data == NULL)
- return -ENOMEM;
cifsFile = file->private_data;
cifsFile->invalidHandle = true;
cifsFile->srch_inf.endOfSearch = false;
-
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- if (cifs_sb == NULL)
- return -EINVAL;
-
- pTcon = cifs_sb->tcon;
- if (pTcon == NULL)
- return -EINVAL;
+ cifsFile->tlink = cifs_get_tlink(tlink);
full_path = build_path_from_dentry(file->f_path.dentry);
-
- if (full_path == NULL)
- return -ENOMEM;
+ if (full_path == NULL) {
+ rc = -ENOMEM;
+ goto error_exit;
+ }
cFYI(1, "Full path: %s start at: %lld", full_path, file->f_pos);
@@ -283,7 +284,9 @@ ffirst_retry:
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
goto ffirst_retry;
}
+error_exit:
kfree(full_path);
+ cifs_put_tlink(tlink);
return rc;
}
@@ -525,14 +528,14 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cFYI(1, "search backing up - close and restart search");
- write_lock(&GlobalSMBSeslock);
+ spin_lock(&cifs_file_list_lock);
if (!cifsFile->srch_inf.endOfSearch &&
!cifsFile->invalidHandle) {
cifsFile->invalidHandle = true;
- write_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
CIFSFindClose(xid, pTcon, cifsFile->netfid);
} else
- write_unlock(&GlobalSMBSeslock);
+ spin_unlock(&cifs_file_list_lock);
if (cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1, "freeing SMB ff cache buf on search rewind");
if (cifsFile->srch_inf.smallBuf)
@@ -738,6 +741,15 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
cifs_autodisable_serverino(cifs_sb);
}
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
+ CIFSCouldBeMFSymlink(&fattr))
+ /*
+ * trying to get the type and mode can be slow,
+ * so just call those regular files for now, and mark
+ * for reval
+ */
+ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
+
ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid);
tmp_dentry = cifs_readdir_lookup(file->f_dentry, &qstring, &fattr);
@@ -777,9 +789,17 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
- pTcon = cifs_sb->tcon;
- if (pTcon == NULL)
- return -EINVAL;
+
+ /*
+ * Ensure FindFirst doesn't fail before doing filldir() for '.' and
+ * '..'. Otherwise we won't be able to notify VFS in case of failure.
+ */
+ if (file->private_data == NULL) {
+ rc = initiate_cifs_search(xid, file);
+ cFYI(1, "initiate cifs search rc %d", rc);
+ if (rc)
+ goto rddir2_exit;
+ }
switch ((int) file->f_pos) {
case 0:
@@ -805,14 +825,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
if after then keep searching till find it */
if (file->private_data == NULL) {
- rc = initiate_cifs_search(xid, file);
- cFYI(1, "initiate cifs search rc %d", rc);
- if (rc) {
- FreeXid(xid);
- return rc;
- }
- }
- if (file->private_data == NULL) {
rc = -EINVAL;
FreeXid(xid);
return rc;
@@ -829,6 +841,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
CIFSFindClose(xid, pTcon, cifsFile->netfid);
} */
+ pTcon = tlink_tcon(cifsFile->tlink);
rc = find_cifs_entry(xid, pTcon, file,
&current_entry, &num_to_fill);
if (rc) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 0a57cb7db5d..2a11efd9659 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -80,7 +80,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
if (max_vcs < 2)
max_vcs = 0xFFFF;
- write_lock(&cifs_tcp_ses_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if ((ses->need_reconnect) && is_first_ses_reconnect(ses))
goto get_vc_num_exit; /* vcnum will be zero */
for (i = ses->server->srv_count - 1; i < max_vcs; i++) {
@@ -112,7 +112,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
vcnum = i;
ses->vcnum = vcnum;
get_vc_num_exit:
- write_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return cpu_to_le16(vcnum);
}
@@ -383,6 +383,9 @@ static int decode_ascii_ssetup(char **pbcc_area, int bleft,
static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
struct cifsSesInfo *ses)
{
+ unsigned int tioffset; /* challenge message target info area */
+ unsigned int tilen; /* challenge message target info area length */
+
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
@@ -399,12 +402,25 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
return -EINVAL;
}
- memcpy(ses->server->cryptKey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
+ memcpy(ses->cryptKey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
/* BB we could decode pblob->NegotiateFlags; some may be useful */
/* In particular we can examine sign flags */
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
+ tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
+ tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+ ses->tilen = tilen;
+ if (ses->tilen) {
+ ses->tiblob = kmalloc(tilen, GFP_KERNEL);
+ if (!ses->tiblob) {
+ cERROR(1, "Challenge target info allocation failure");
+ ses->tilen = 0;
+ return -ENOMEM;
+ }
+ memcpy(ses->tiblob, bcc_ptr + tioffset, ses->tilen);
+ }
+
return 0;
}
@@ -425,7 +441,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
/* BB is NTLMV2 session security format easier to use here? */
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
+ NTLMSSP_NEGOTIATE_NTLM;
if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -448,13 +464,16 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
maximum possible size is fixed and small, making this approach cleaner.
This function returns the length of the data in the blob */
static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ u16 *buflen,
struct cifsSesInfo *ses,
- const struct nls_table *nls_cp, bool first)
+ const struct nls_table *nls_cp)
{
+ int rc;
+ unsigned int size;
AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
__u32 flags;
unsigned char *tmp;
- char ntlm_session_key[CIFS_SESS_KEY_SIZE];
+ struct ntlmv2_resp ntlmv2_response = {};
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
@@ -462,7 +481,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
flags = NTLMSSP_NEGOTIATE_56 |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NT_ONLY | NTLMSSP_NEGOTIATE_NTLM;
+ NTLMSSP_NEGOTIATE_NTLM;
if (ses->server->secMode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
flags |= NTLMSSP_NEGOTIATE_SIGN;
@@ -477,19 +496,26 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
sec_blob->LmChallengeResponse.Length = 0;
sec_blob->LmChallengeResponse.MaximumLength = 0;
- /* calculate session key, BB what about adding similar ntlmv2 path? */
- SMBNTencrypt(ses->password, ses->server->cryptKey, ntlm_session_key);
- if (first)
- cifs_calculate_mac_key(&ses->server->mac_signing_key,
- ntlm_session_key, ses->password);
-
- memcpy(tmp, ntlm_session_key, CIFS_SESS_KEY_SIZE);
sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
- sec_blob->NtChallengeResponse.Length = cpu_to_le16(CIFS_SESS_KEY_SIZE);
- sec_blob->NtChallengeResponse.MaximumLength =
- cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ rc = setup_ntlmv2_rsp(ses, (char *)&ntlmv2_response, nls_cp);
+ if (rc) {
+ cERROR(1, "Error %d during NTLMSSP authentication", rc);
+ goto setup_ntlmv2_ret;
+ }
+ size = sizeof(struct ntlmv2_resp);
+ memcpy(tmp, (char *)&ntlmv2_response, size);
+ tmp += size;
+ if (ses->tilen > 0) {
+ memcpy(tmp, ses->tiblob, ses->tilen);
+ tmp += ses->tilen;
+ }
- tmp += CIFS_SESS_KEY_SIZE;
+ sec_blob->NtChallengeResponse.Length = cpu_to_le16(size + ses->tilen);
+ sec_blob->NtChallengeResponse.MaximumLength =
+ cpu_to_le16(size + ses->tilen);
+ kfree(ses->tiblob);
+ ses->tiblob = NULL;
+ ses->tilen = 0;
if (ses->domainName == NULL) {
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -501,7 +527,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- len += 2; /* trailing null */
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->DomainName.Length = cpu_to_le16(len);
sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
@@ -518,7 +543,6 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
- len += 2; /* trailing null */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = cpu_to_le16(len);
sec_blob->UserName.MaximumLength = cpu_to_le16(len);
@@ -533,7 +557,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->SessionKey.Length = 0;
sec_blob->SessionKey.MaximumLength = 0;
- return tmp - pbuffer;
+
+setup_ntlmv2_ret:
+ *buflen = tmp - pbuffer;
+ return rc;
}
@@ -545,19 +572,6 @@ static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB,
return;
}
-
-static int setup_ntlmssp_auth_req(SESSION_SETUP_ANDX *pSMB,
- struct cifsSesInfo *ses,
- const struct nls_table *nls, bool first_time)
-{
- int bloblen;
-
- bloblen = build_ntlmssp_auth_blob(&pSMB->req.SecurityBlob[0], ses, nls,
- first_time);
- pSMB->req.SecurityBlobLength = cpu_to_le16(bloblen);
-
- return bloblen;
-}
#endif
int
@@ -579,15 +593,12 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
int bytes_remaining;
struct key *spnego_key = NULL;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
- bool first_time;
+ u16 blob_len;
+ char *ntlmsspblob = NULL;
if (ses == NULL)
return -EINVAL;
- read_lock(&cifs_tcp_ses_lock);
- first_time = is_first_ses_reconnect(ses);
- read_unlock(&cifs_tcp_ses_lock);
-
type = ses->server->secType;
cFYI(1, "sess setup type %d", type);
@@ -658,7 +669,7 @@ ssetup_ntlmssp_authenticate:
/* BB calculate hash with password */
/* and copy into bcc */
- calc_lanman_hash(ses->password, ses->server->cryptKey,
+ calc_lanman_hash(ses->password, ses->cryptKey,
ses->server->secMode & SECMODE_PW_ENCRYPT ?
true : false, lnm_session_key);
@@ -685,15 +696,11 @@ ssetup_ntlmssp_authenticate:
cpu_to_le16(CIFS_SESS_KEY_SIZE);
/* calculate session key */
- SMBNTencrypt(ses->password, ses->server->cryptKey,
- ntlm_session_key);
+ SMBNTencrypt(ses->password, ses->cryptKey, ntlm_session_key);
- if (first_time) /* should this be moved into common code
- with similar ntlmv2 path? */
- cifs_calculate_mac_key(&ses->server->mac_signing_key,
- ntlm_session_key, ses->password);
+ cifs_calculate_session_key(&ses->auth_key,
+ ntlm_session_key, ses->password);
/* copy session key */
-
memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
bcc_ptr += CIFS_SESS_KEY_SIZE;
memcpy(bcc_ptr, (char *)ntlm_session_key, CIFS_SESS_KEY_SIZE);
@@ -725,16 +732,31 @@ ssetup_ntlmssp_authenticate:
pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
/* cpu_to_le16(LM2_SESS_KEY_SIZE); */
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(sizeof(struct ntlmv2_resp));
-
/* calculate session key */
- setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- /* FIXME: calculate MAC key */
+ rc = setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+ if (rc) {
+ cERROR(1, "Error %d during NTLMv2 authentication", rc);
+ kfree(v2_sess_key);
+ goto ssetup_exit;
+ }
memcpy(bcc_ptr, (char *)v2_sess_key,
- sizeof(struct ntlmv2_resp));
+ sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
kfree(v2_sess_key);
+ /* set case sensitive password length after tilen may get
+ * assigned, tilen is 0 otherwise.
+ */
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(sizeof(struct ntlmv2_resp) + ses->tilen);
+ if (ses->tilen > 0) {
+ memcpy(bcc_ptr, ses->tiblob, ses->tilen);
+ bcc_ptr += ses->tilen;
+ /* we never did allocate ses->domainName to free */
+ kfree(ses->tiblob);
+ ses->tiblob = NULL;
+ ses->tilen = 0;
+ }
+
if (ses->capabilities & CAP_UNICODE) {
if (iov[0].iov_len % 2) {
*bcc_ptr = 0;
@@ -765,17 +787,14 @@ ssetup_ntlmssp_authenticate:
}
/* bail out if key is too long */
if (msg->sesskey_len >
- sizeof(ses->server->mac_signing_key.data.krb5)) {
+ sizeof(ses->auth_key.data.krb5)) {
cERROR(1, "Kerberos signing key too long (%u bytes)",
msg->sesskey_len);
rc = -EOVERFLOW;
goto ssetup_exit;
}
- if (first_time) {
- ses->server->mac_signing_key.len = msg->sesskey_len;
- memcpy(ses->server->mac_signing_key.data.krb5,
- msg->data, msg->sesskey_len);
- }
+ ses->auth_key.len = msg->sesskey_len;
+ memcpy(ses->auth_key.data.krb5, msg->data, msg->sesskey_len);
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
capabilities |= CAP_EXTENDED_SECURITY;
pSMB->req.Capabilities = cpu_to_le32(capabilities);
@@ -815,12 +834,30 @@ ssetup_ntlmssp_authenticate:
if (phase == NtLmNegotiate) {
setup_ntlmssp_neg_req(pSMB, ses);
iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
+ iov[1].iov_base = &pSMB->req.SecurityBlob[0];
} else if (phase == NtLmAuthenticate) {
- int blob_len;
- blob_len = setup_ntlmssp_auth_req(pSMB, ses,
- nls_cp,
- first_time);
+ /* 5 is an empirical value, large enought to
+ * hold authenticate message, max 10 of
+ * av paris, doamin,user,workstation mames,
+ * flags etc..
+ */
+ ntlmsspblob = kmalloc(
+ 5*sizeof(struct _AUTHENTICATE_MESSAGE),
+ GFP_KERNEL);
+ if (!ntlmsspblob) {
+ cERROR(1, "Can't allocate NTLMSSP");
+ rc = -ENOMEM;
+ goto ssetup_exit;
+ }
+
+ rc = build_ntlmssp_auth_blob(ntlmsspblob,
+ &blob_len, ses, nls_cp);
+ if (rc)
+ goto ssetup_exit;
iov[1].iov_len = blob_len;
+ iov[1].iov_base = ntlmsspblob;
+ pSMB->req.SecurityBlobLength =
+ cpu_to_le16(blob_len);
/* Make sure that we tell the server that we
are using the uid that it just gave us back
on the response (challenge) */
@@ -830,7 +867,6 @@ ssetup_ntlmssp_authenticate:
rc = -ENOSYS;
goto ssetup_exit;
}
- iov[1].iov_base = &pSMB->req.SecurityBlob[0];
/* unicode strings must be word aligned */
if ((iov[0].iov_len + iov[1].iov_len) % 2) {
*bcc_ptr = 0;
@@ -895,7 +931,6 @@ ssetup_ntlmssp_authenticate:
bcc_ptr = pByteArea(smb_buf);
if (smb_buf->WordCount == 4) {
- __u16 blob_len;
blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
if (blob_len > bytes_remaining) {
cERROR(1, "bad security blob length %d", blob_len);
@@ -931,6 +966,8 @@ ssetup_exit:
key_put(spnego_key);
}
kfree(str_area);
+ kfree(ntlmsspblob);
+ ntlmsspblob = NULL;
if (resp_buf_type == CIFS_SMALL_BUFFER) {
cFYI(1, "ssetup freeing small buf %p", iov[0].iov_base);
cifs_small_buf_release(iov[0].iov_base);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 82f78c4d697..a66c91eb6eb 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -543,7 +543,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(midQ->resp_buf,
- &ses->server->mac_signing_key,
+ &ses->server->session_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
@@ -731,7 +731,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- &ses->server->mac_signing_key,
+ &ses->server->session_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
@@ -981,7 +981,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
(ses->server->secMode & (SECMODE_SIGN_REQUIRED |
SECMODE_SIGN_ENABLED))) {
rc = cifs_verify_signature(out_buf,
- &ses->server->mac_signing_key,
+ &ses->server->session_key,
midQ->sequence_number+1);
if (rc) {
cERROR(1, "Unexpected SMB signature");
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a1509207bfa..a264b744bb4 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -47,9 +47,10 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct super_block *sb;
- char *full_path;
+ char *full_path = NULL;
if (direntry == NULL)
return -EIO;
@@ -58,16 +59,19 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
sb = direntry->d_inode->i_sb;
if (sb == NULL)
return -EIO;
- xid = GetXid();
cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto remove_ea_exit;
}
if (ea_name == NULL) {
cFYI(1, "Null xattr names not supported");
@@ -91,6 +95,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
remove_ea_exit:
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
#endif
return rc;
}
@@ -102,6 +107,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct super_block *sb;
char *full_path;
@@ -113,16 +119,19 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
sb = direntry->d_inode->i_sb;
if (sb == NULL)
return -EIO;
- xid = GetXid();
cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto set_ea_exit;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -132,9 +141,8 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
returns as xattrs */
if (value_size > MAX_EA_VALUE_SIZE) {
cFYI(1, "size of EA value too large");
- kfree(full_path);
- FreeXid(xid);
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto set_ea_exit;
}
if (ea_name == NULL) {
@@ -198,6 +206,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
set_ea_exit:
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
#endif
return rc;
}
@@ -209,6 +218,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct super_block *sb;
char *full_path;
@@ -221,16 +231,18 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
if (sb == NULL)
return -EIO;
- xid = GetXid();
-
cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
+ xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto get_ea_exit;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -323,6 +335,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
get_ea_exit:
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
#endif
return rc;
}
@@ -333,6 +346,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
#ifdef CONFIG_CIFS_XATTR
int xid;
struct cifs_sb_info *cifs_sb;
+ struct tcon_link *tlink;
struct cifsTconInfo *pTcon;
struct super_block *sb;
char *full_path;
@@ -346,18 +360,20 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
return -EIO;
cifs_sb = CIFS_SB(sb);
- pTcon = cifs_sb->tcon;
-
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
return -EOPNOTSUPP;
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ pTcon = tlink_tcon(tlink);
+
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
- return rc;
+ goto list_ea_exit;
}
/* return dos attributes as pseudo xattr */
/* return alt name if available as pseudo attr */
@@ -370,8 +386,10 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+list_ea_exit:
kfree(full_path);
FreeXid(xid);
+ cifs_put_tlink(tlink);
#endif
return rc;
}
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index a5bf5771a22..9060f08e70c 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/list.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/coda.h>
#include <linux/coda_linux.h>
@@ -31,19 +32,23 @@ void coda_cache_enter(struct inode *inode, int mask)
{
struct coda_inode_info *cii = ITOC(inode);
+ spin_lock(&cii->c_lock);
cii->c_cached_epoch = atomic_read(&permission_epoch);
if (cii->c_uid != current_fsuid()) {
cii->c_uid = current_fsuid();
cii->c_cached_perm = mask;
} else
cii->c_cached_perm |= mask;
+ spin_unlock(&cii->c_lock);
}
/* remove cached acl from an inode */
void coda_cache_clear_inode(struct inode *inode)
{
struct coda_inode_info *cii = ITOC(inode);
+ spin_lock(&cii->c_lock);
cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
+ spin_unlock(&cii->c_lock);
}
/* remove all acl caches */
@@ -57,13 +62,15 @@ void coda_cache_clear_all(struct super_block *sb)
int coda_cache_check(struct inode *inode, int mask)
{
struct coda_inode_info *cii = ITOC(inode);
- int hit;
+ int hit;
- hit = (mask & cii->c_cached_perm) == mask &&
- cii->c_uid == current_fsuid() &&
- cii->c_cached_epoch == atomic_read(&permission_epoch);
+ spin_lock(&cii->c_lock);
+ hit = (mask & cii->c_cached_perm) == mask &&
+ cii->c_uid == current_fsuid() &&
+ cii->c_cached_epoch == atomic_read(&permission_epoch);
+ spin_unlock(&cii->c_lock);
- return hit;
+ return hit;
}
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index a7a780929ee..602240569c8 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -45,13 +45,15 @@ static void coda_fill_inode(struct inode *inode, struct coda_vattr *attr)
static int coda_test_inode(struct inode *inode, void *data)
{
struct CodaFid *fid = (struct CodaFid *)data;
- return coda_fideq(&(ITOC(inode)->c_fid), fid);
+ struct coda_inode_info *cii = ITOC(inode);
+ return coda_fideq(&cii->c_fid, fid);
}
static int coda_set_inode(struct inode *inode, void *data)
{
struct CodaFid *fid = (struct CodaFid *)data;
- ITOC(inode)->c_fid = *fid;
+ struct coda_inode_info *cii = ITOC(inode);
+ cii->c_fid = *fid;
return 0;
}
@@ -71,6 +73,7 @@ struct inode * coda_iget(struct super_block * sb, struct CodaFid * fid,
cii = ITOC(inode);
/* we still need to set i_ino for things like stat(2) */
inode->i_ino = hash;
+ /* inode is locked and unique, no need to grab cii->c_lock */
cii->c_mapcount = 0;
unlock_new_inode(inode);
}
@@ -107,14 +110,20 @@ int coda_cnode_make(struct inode **inode, struct CodaFid *fid, struct super_bloc
}
+/* Although we treat Coda file identifiers as immutable, there is one
+ * special case for files created during a disconnection where they may
+ * not be globally unique. When an identifier collision is detected we
+ * first try to flush the cached inode from the kernel and finally
+ * resort to renaming/rehashing in-place. Userspace remembers both old
+ * and new values of the identifier to handle any in-flight upcalls.
+ * The real solution is to use globally unique UUIDs as identifiers, but
+ * retrofitting the existing userspace code for this is non-trivial. */
void coda_replace_fid(struct inode *inode, struct CodaFid *oldfid,
struct CodaFid *newfid)
{
- struct coda_inode_info *cii;
+ struct coda_inode_info *cii = ITOC(inode);
unsigned long hash = coda_f2i(newfid);
- cii = ITOC(inode);
-
BUG_ON(!coda_fideq(&cii->c_fid, oldfid));
/* replace fid and rehash inode */
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index ccd98b0f2b0..96fbeab77f2 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -17,7 +17,7 @@
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/string.h>
-#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
@@ -116,15 +116,11 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struc
goto exit;
}
- lock_kernel();
-
error = venus_lookup(dir->i_sb, coda_i2f(dir), name, length,
&type, &resfid);
if (!error)
error = coda_cnode_make(&inode, &resfid, dir->i_sb);
- unlock_kernel();
-
if (error && error != -ENOENT)
return ERR_PTR(error);
@@ -140,28 +136,24 @@ exit:
int coda_permission(struct inode *inode, int mask)
{
- int error = 0;
+ int error;
mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
if (!mask)
- return 0;
+ return 0;
if ((mask & MAY_EXEC) && !execute_ok(inode))
return -EACCES;
- lock_kernel();
-
if (coda_cache_check(inode, mask))
- goto out;
+ return 0;
- error = venus_access(inode->i_sb, coda_i2f(inode), mask);
+ error = venus_access(inode->i_sb, coda_i2f(inode), mask);
if (!error)
coda_cache_enter(inode, mask);
- out:
- unlock_kernel();
return error;
}
@@ -200,41 +192,34 @@ static inline void coda_dir_drop_nlink(struct inode *dir)
/* creation routines: create, mknod, mkdir, link, symlink */
static int coda_create(struct inode *dir, struct dentry *de, int mode, struct nameidata *nd)
{
- int error=0;
+ int error;
const char *name=de->d_name.name;
int length=de->d_name.len;
struct inode *inode;
struct CodaFid newfid;
struct coda_vattr attrs;
- lock_kernel();
-
- if (coda_isroot(dir) && coda_iscontrol(name, length)) {
- unlock_kernel();
+ if (coda_isroot(dir) && coda_iscontrol(name, length))
return -EPERM;
- }
error = venus_create(dir->i_sb, coda_i2f(dir), name, length,
0, mode, &newfid, &attrs);
-
- if ( error ) {
- unlock_kernel();
- d_drop(de);
- return error;
- }
+ if (error)
+ goto err_out;
inode = coda_iget(dir->i_sb, &newfid, &attrs);
- if ( IS_ERR(inode) ) {
- unlock_kernel();
- d_drop(de);
- return PTR_ERR(inode);
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
+ goto err_out;
}
/* invalidate the directory cnode's attributes */
coda_dir_update_mtime(dir);
- unlock_kernel();
d_instantiate(de, inode);
return 0;
+err_out:
+ d_drop(de);
+ return error;
}
static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
@@ -246,36 +231,29 @@ static int coda_mkdir(struct inode *dir, struct dentry *de, int mode)
int error;
struct CodaFid newfid;
- lock_kernel();
-
- if (coda_isroot(dir) && coda_iscontrol(name, len)) {
- unlock_kernel();
+ if (coda_isroot(dir) && coda_iscontrol(name, len))
return -EPERM;
- }
attrs.va_mode = mode;
error = venus_mkdir(dir->i_sb, coda_i2f(dir),
name, len, &newfid, &attrs);
-
- if ( error ) {
- unlock_kernel();
- d_drop(de);
- return error;
- }
+ if (error)
+ goto err_out;
inode = coda_iget(dir->i_sb, &newfid, &attrs);
- if ( IS_ERR(inode) ) {
- unlock_kernel();
- d_drop(de);
- return PTR_ERR(inode);
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
+ goto err_out;
}
/* invalidate the directory cnode's attributes */
coda_dir_inc_nlink(dir);
coda_dir_update_mtime(dir);
- unlock_kernel();
d_instantiate(de, inode);
return 0;
+err_out:
+ d_drop(de);
+ return error;
}
/* try to make de an entry in dir_inodde linked to source_de */
@@ -287,52 +265,38 @@ static int coda_link(struct dentry *source_de, struct inode *dir_inode,
int len = de->d_name.len;
int error;
- lock_kernel();
-
- if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
- unlock_kernel();
+ if (coda_isroot(dir_inode) && coda_iscontrol(name, len))
return -EPERM;
- }
error = venus_link(dir_inode->i_sb, coda_i2f(inode),
coda_i2f(dir_inode), (const char *)name, len);
-
if (error) {
d_drop(de);
- goto out;
+ return error;
}
coda_dir_update_mtime(dir_inode);
atomic_inc(&inode->i_count);
d_instantiate(de, inode);
inc_nlink(inode);
-
-out:
- unlock_kernel();
- return(error);
+ return 0;
}
static int coda_symlink(struct inode *dir_inode, struct dentry *de,
const char *symname)
{
- const char *name = de->d_name.name;
+ const char *name = de->d_name.name;
int len = de->d_name.len;
int symlen;
- int error = 0;
-
- lock_kernel();
+ int error;
- if (coda_isroot(dir_inode) && coda_iscontrol(name, len)) {
- unlock_kernel();
+ if (coda_isroot(dir_inode) && coda_iscontrol(name, len))
return -EPERM;
- }
symlen = strlen(symname);
- if ( symlen > CODA_MAXPATHLEN ) {
- unlock_kernel();
+ if (symlen > CODA_MAXPATHLEN)
return -ENAMETOOLONG;
- }
/*
* This entry is now negative. Since we do not create
@@ -343,10 +307,9 @@ static int coda_symlink(struct inode *dir_inode, struct dentry *de,
symname, symlen);
/* mtime is no good anymore */
- if ( !error )
+ if (!error)
coda_dir_update_mtime(dir_inode);
- unlock_kernel();
return error;
}
@@ -357,17 +320,12 @@ static int coda_unlink(struct inode *dir, struct dentry *de)
const char *name = de->d_name.name;
int len = de->d_name.len;
- lock_kernel();
-
error = venus_remove(dir->i_sb, coda_i2f(dir), name, len);
- if ( error ) {
- unlock_kernel();
+ if (error)
return error;
- }
coda_dir_update_mtime(dir);
drop_nlink(de->d_inode);
- unlock_kernel();
return 0;
}
@@ -377,8 +335,6 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
int len = de->d_name.len;
int error;
- lock_kernel();
-
error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
if (!error) {
/* VFS may delete the child */
@@ -389,7 +345,6 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
coda_dir_drop_nlink(dir);
coda_dir_update_mtime(dir);
}
- unlock_kernel();
return error;
}
@@ -403,15 +358,12 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
int new_length = new_dentry->d_name.len;
int error;
- lock_kernel();
-
error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
coda_i2f(new_dir), old_length, new_length,
(const char *) old_name, (const char *)new_name);
-
- if ( !error ) {
- if ( new_dentry->d_inode ) {
- if ( S_ISDIR(new_dentry->d_inode->i_mode) ) {
+ if (!error) {
+ if (new_dentry->d_inode) {
+ if (S_ISDIR(new_dentry->d_inode->i_mode)) {
coda_dir_drop_nlink(old_dir);
coda_dir_inc_nlink(new_dir);
}
@@ -423,8 +375,6 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
coda_flag_inode(new_dir, C_VATTR);
}
}
- unlock_kernel();
-
return error;
}
@@ -594,10 +544,7 @@ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
struct inode *inode = de->d_inode;
struct coda_inode_info *cii;
- if (!inode)
- return 1;
- lock_kernel();
- if (coda_isroot(inode))
+ if (!inode || coda_isroot(inode))
goto out;
if (is_bad_inode(inode))
goto bad;
@@ -617,13 +564,12 @@ static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
goto out;
/* clear the flags. */
+ spin_lock(&cii->c_lock);
cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH);
-
+ spin_unlock(&cii->c_lock);
bad:
- unlock_kernel();
return 0;
out:
- unlock_kernel();
return 1;
}
@@ -656,20 +602,19 @@ static int coda_dentry_delete(struct dentry * dentry)
int coda_revalidate_inode(struct dentry *dentry)
{
struct coda_vattr attr;
- int error = 0;
+ int error;
int old_mode;
ino_t old_ino;
struct inode *inode = dentry->d_inode;
struct coda_inode_info *cii = ITOC(inode);
- lock_kernel();
- if ( !cii->c_flags )
- goto ok;
+ if (!cii->c_flags)
+ return 0;
if (cii->c_flags & (C_VATTR | C_PURGE | C_FLUSH)) {
error = venus_getattr(inode->i_sb, &(cii->c_fid), &attr);
- if ( error )
- goto return_bad;
+ if (error)
+ return -EIO;
/* this inode may be lost if:
- it's ino changed
@@ -688,17 +633,13 @@ int coda_revalidate_inode(struct dentry *dentry)
/* the following can happen when a local fid is replaced
with a global one, here we lose and declare the inode bad */
if (inode->i_ino != old_ino)
- goto return_bad;
+ return -EIO;
coda_flag_inode_children(inode, C_FLUSH);
+
+ spin_lock(&cii->c_lock);
cii->c_flags &= ~(C_VATTR | C_PURGE | C_FLUSH);
+ spin_unlock(&cii->c_lock);
}
-
-ok:
- unlock_kernel();
return 0;
-
-return_bad:
- unlock_kernel();
- return -EIO;
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index ad3cd2abeeb..c8b50ba4366 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -15,7 +15,7 @@
#include <linux/stat.h>
#include <linux/cred.h>
#include <linux/errno.h>
-#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -109,19 +109,24 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
coda_inode = coda_file->f_path.dentry->d_inode;
host_inode = host_file->f_path.dentry->d_inode;
+
+ cii = ITOC(coda_inode);
+ spin_lock(&cii->c_lock);
coda_file->f_mapping = host_file->f_mapping;
if (coda_inode->i_mapping == &coda_inode->i_data)
coda_inode->i_mapping = host_inode->i_mapping;
/* only allow additional mmaps as long as userspace isn't changing
* the container file on us! */
- else if (coda_inode->i_mapping != host_inode->i_mapping)
+ else if (coda_inode->i_mapping != host_inode->i_mapping) {
+ spin_unlock(&cii->c_lock);
return -EBUSY;
+ }
/* keep track of how often the coda_inode/host_file has been mmapped */
- cii = ITOC(coda_inode);
cii->c_mapcount++;
cfi->cfi_mapcount++;
+ spin_unlock(&cii->c_lock);
return host_file->f_op->mmap(host_file, vma);
}
@@ -138,8 +143,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
if (!cfi)
return -ENOMEM;
- lock_kernel();
-
error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
&host_file);
if (!host_file)
@@ -147,7 +150,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
if (error) {
kfree(cfi);
- unlock_kernel();
return error;
}
@@ -159,8 +161,6 @@ int coda_open(struct inode *coda_inode, struct file *coda_file)
BUG_ON(coda_file->private_data != NULL);
coda_file->private_data = cfi;
-
- unlock_kernel();
return 0;
}
@@ -171,9 +171,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
struct coda_file_info *cfi;
struct coda_inode_info *cii;
struct inode *host_inode;
- int err = 0;
-
- lock_kernel();
+ int err;
cfi = CODA_FTOC(coda_file);
BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
@@ -185,18 +183,18 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
cii = ITOC(coda_inode);
/* did we mmap this file? */
+ spin_lock(&cii->c_lock);
if (coda_inode->i_mapping == &host_inode->i_data) {
cii->c_mapcount -= cfi->cfi_mapcount;
if (!cii->c_mapcount)
coda_inode->i_mapping = &coda_inode->i_data;
}
+ spin_unlock(&cii->c_lock);
fput(cfi->cfi_container);
kfree(coda_file->private_data);
coda_file->private_data = NULL;
- unlock_kernel();
-
/* VFS fput ignores the return value from file_operations->release, so
* there is no use returning an error here */
return 0;
@@ -207,7 +205,7 @@ int coda_fsync(struct file *coda_file, int datasync)
struct file *host_file;
struct inode *coda_inode = coda_file->f_path.dentry->d_inode;
struct coda_file_info *cfi;
- int err = 0;
+ int err;
if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
S_ISLNK(coda_inode->i_mode)))
@@ -218,11 +216,8 @@ int coda_fsync(struct file *coda_file, int datasync)
host_file = cfi->cfi_container;
err = vfs_fsync(host_file, datasync);
- if ( !err && !datasync ) {
- lock_kernel();
+ if (!err && !datasync)
err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
- unlock_kernel();
- }
return err;
}
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 6526e6f21ec..7993b96ca34 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -15,7 +15,8 @@
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/file.h>
#include <linux/vfs.h>
#include <linux/slab.h>
@@ -51,6 +52,7 @@ static struct inode *coda_alloc_inode(struct super_block *sb)
ei->c_flags = 0;
ei->c_uid = 0;
ei->c_cached_perm = 0;
+ spin_lock_init(&ei->c_lock);
return &ei->vfs_inode;
}
@@ -143,7 +145,7 @@ static int get_device_index(struct coda_mount_data *data)
static int coda_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root = NULL;
- struct venus_comm *vc = NULL;
+ struct venus_comm *vc;
struct CodaFid fid;
int error;
int idx;
@@ -157,21 +159,26 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
printk(KERN_INFO "coda_read_super: device index: %i\n", idx);
vc = &coda_comms[idx];
+ mutex_lock(&vc->vc_mutex);
+
if (!vc->vc_inuse) {
printk("coda_read_super: No pseudo device\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto unlock_out;
}
- if ( vc->vc_sb ) {
+ if (vc->vc_sb) {
printk("coda_read_super: Device already mounted\n");
- return -EBUSY;
+ error = -EBUSY;
+ goto unlock_out;
}
error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY);
if (error)
- goto bdi_err;
+ goto unlock_out;
vc->vc_sb = sb;
+ mutex_unlock(&vc->vc_mutex);
sb->s_fs_info = vc;
sb->s_flags |= MS_NOATIME;
@@ -200,26 +207,33 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent)
printk("coda_read_super: rootinode is %ld dev %s\n",
root->i_ino, root->i_sb->s_id);
sb->s_root = d_alloc_root(root);
- if (!sb->s_root)
+ if (!sb->s_root) {
+ error = -EINVAL;
goto error;
- return 0;
+ }
+ return 0;
- error:
- bdi_destroy(&vc->bdi);
- bdi_err:
+error:
if (root)
iput(root);
- if (vc)
- vc->vc_sb = NULL;
- return -EINVAL;
+ mutex_lock(&vc->vc_mutex);
+ bdi_destroy(&vc->bdi);
+ vc->vc_sb = NULL;
+ sb->s_fs_info = NULL;
+unlock_out:
+ mutex_unlock(&vc->vc_mutex);
+ return error;
}
static void coda_put_super(struct super_block *sb)
{
- bdi_destroy(&coda_vcp(sb)->bdi);
- coda_vcp(sb)->vc_sb = NULL;
+ struct venus_comm *vcp = coda_vcp(sb);
+ mutex_lock(&vcp->vc_mutex);
+ bdi_destroy(&vcp->bdi);
+ vcp->vc_sb = NULL;
sb->s_fs_info = NULL;
+ mutex_unlock(&vcp->vc_mutex);
printk("Coda: Bye bye.\n");
}
@@ -245,8 +259,6 @@ int coda_setattr(struct dentry *de, struct iattr *iattr)
struct coda_vattr vattr;
int error;
- lock_kernel();
-
memset(&vattr, 0, sizeof(vattr));
inode->i_ctime = CURRENT_TIME_SEC;
@@ -256,13 +268,10 @@ int coda_setattr(struct dentry *de, struct iattr *iattr)
/* Venus is responsible for truncating the container-file!!! */
error = venus_setattr(inode->i_sb, coda_i2f(inode), &vattr);
- if ( !error ) {
+ if (!error) {
coda_vattr_to_iattr(inode, &vattr);
coda_cache_clear_inode(inode);
}
-
- unlock_kernel();
-
return error;
}
@@ -276,12 +285,8 @@ static int coda_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int error;
- lock_kernel();
-
error = venus_statfs(dentry, buf);
- unlock_kernel();
-
if (error) {
/* fake something like AFS does */
buf->f_blocks = 9000000;
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index ca25d96d45c..2fd89b5c5c7 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -23,8 +23,6 @@
#include <linux/coda_fs_i.h>
#include <linux/coda_psdev.h>
-#include <linux/smp_lock.h>
-
/* pioctl ops */
static int coda_ioctl_permission(struct inode *inode, int mask);
static long coda_pioctl(struct file *filp, unsigned int cmd,
@@ -39,6 +37,7 @@ const struct inode_operations coda_ioctl_inode_operations = {
const struct file_operations coda_ioctl_operations = {
.owner = THIS_MODULE,
.unlocked_ioctl = coda_pioctl,
+ .llseek = noop_llseek,
};
/* the coda pioctl inode ops */
@@ -57,13 +56,9 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
struct inode *target_inode = NULL;
struct coda_inode_info *cnp;
- lock_kernel();
-
/* get the Pioctl data arguments from user space */
- if (copy_from_user(&data, (void __user *)user_data, sizeof(data))) {
- error = -EINVAL;
- goto out;
- }
+ if (copy_from_user(&data, (void __user *)user_data, sizeof(data)))
+ return -EINVAL;
/*
* Look up the pathname. Note that the pathname is in
@@ -75,13 +70,12 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
error = user_lpath(data.path, &path);
if (error)
- goto out;
- else
- target_inode = path.dentry->d_inode;
+ return error;
+
+ target_inode = path.dentry->d_inode;
/* return if it is not a Coda inode */
if (target_inode->i_sb != inode->i_sb) {
- path_put(&path);
error = -EINVAL;
goto out;
}
@@ -90,10 +84,7 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
cnp = ITOC(target_inode);
error = venus_pioctl(inode->i_sb, &(cnp->c_fid), cmd, &data);
-
- path_put(&path);
-
out:
- unlock_kernel();
+ path_put(&path);
return error;
}
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 116af7546cf..62647a8595e 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -35,7 +35,7 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/device.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -67,8 +67,10 @@ static unsigned int coda_psdev_poll(struct file *file, poll_table * wait)
unsigned int mask = POLLOUT | POLLWRNORM;
poll_wait(file, &vcp->vc_waitq, wait);
+ mutex_lock(&vcp->vc_mutex);
if (!list_empty(&vcp->vc_pending))
mask |= POLLIN | POLLRDNORM;
+ mutex_unlock(&vcp->vc_mutex);
return mask;
}
@@ -108,16 +110,9 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
return -EFAULT;
if (DOWNCALL(hdr.opcode)) {
- struct super_block *sb = NULL;
- union outputArgs *dcbuf;
+ union outputArgs *dcbuf;
int size = sizeof(*dcbuf);
- sb = vcp->vc_sb;
- if ( !sb ) {
- count = nbytes;
- goto out;
- }
-
if ( nbytes < sizeof(struct coda_out_hdr) ) {
printk("coda_downcall opc %d uniq %d, not enough!\n",
hdr.opcode, hdr.unique);
@@ -137,9 +132,7 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
}
/* what downcall errors does Venus handle ? */
- lock_kernel();
- error = coda_downcall(hdr.opcode, dcbuf, sb);
- unlock_kernel();
+ error = coda_downcall(vcp, hdr.opcode, dcbuf);
CODA_FREE(dcbuf, nbytes);
if (error) {
@@ -152,7 +145,7 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
}
/* Look for the message on the processing queue. */
- lock_kernel();
+ mutex_lock(&vcp->vc_mutex);
list_for_each(lh, &vcp->vc_processing) {
tmp = list_entry(lh, struct upc_req , uc_chain);
if (tmp->uc_unique == hdr.unique) {
@@ -161,7 +154,7 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
break;
}
}
- unlock_kernel();
+ mutex_unlock(&vcp->vc_mutex);
if (!req) {
printk("psdev_write: msg (%d, %d) not found\n",
@@ -216,7 +209,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
if (nbytes == 0)
return 0;
- lock_kernel();
+ mutex_lock(&vcp->vc_mutex);
add_wait_queue(&vcp->vc_waitq, &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -230,7 +223,9 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
retval = -ERESTARTSYS;
break;
}
+ mutex_unlock(&vcp->vc_mutex);
schedule();
+ mutex_lock(&vcp->vc_mutex);
}
set_current_state(TASK_RUNNING);
@@ -263,7 +258,7 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kfree(req);
out:
- unlock_kernel();
+ mutex_unlock(&vcp->vc_mutex);
return (count ? count : retval);
}
@@ -276,10 +271,10 @@ static int coda_psdev_open(struct inode * inode, struct file * file)
if (idx < 0 || idx >= MAX_CODADEVS)
return -ENODEV;
- lock_kernel();
-
err = -EBUSY;
vcp = &coda_comms[idx];
+ mutex_lock(&vcp->vc_mutex);
+
if (!vcp->vc_inuse) {
vcp->vc_inuse++;
@@ -293,7 +288,7 @@ static int coda_psdev_open(struct inode * inode, struct file * file)
err = 0;
}
- unlock_kernel();
+ mutex_unlock(&vcp->vc_mutex);
return err;
}
@@ -308,7 +303,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file)
return -1;
}
- lock_kernel();
+ mutex_lock(&vcp->vc_mutex);
/* Wakeup clients so they can return. */
list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) {
@@ -333,7 +328,7 @@ static int coda_psdev_release(struct inode * inode, struct file * file)
file->private_data = NULL;
vcp->vc_inuse--;
- unlock_kernel();
+ mutex_unlock(&vcp->vc_mutex);
return 0;
}
@@ -346,6 +341,7 @@ static const struct file_operations coda_psdev_fops = {
.unlocked_ioctl = coda_psdev_ioctl,
.open = coda_psdev_open,
.release = coda_psdev_release,
+ .llseek = noop_llseek,
};
static int init_coda_psdev(void)
@@ -361,9 +357,11 @@ static int init_coda_psdev(void)
err = PTR_ERR(coda_psdev_class);
goto out_chrdev;
}
- for (i = 0; i < MAX_CODADEVS; i++)
+ for (i = 0; i < MAX_CODADEVS; i++) {
+ mutex_init(&(&coda_comms[i])->vc_mutex);
device_create(coda_psdev_class, NULL,
MKDEV(CODA_PSDEV_MAJOR, i), NULL, "cfs%d", i);
+ }
coda_sysctl_init();
goto out;
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index 4513b725845..af78f007a2b 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -14,7 +14,6 @@
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/coda.h>
#include <linux/coda_linux.h>
@@ -29,11 +28,9 @@ static int coda_symlink_filler(struct file *file, struct page *page)
unsigned int len = PAGE_SIZE;
char *p = kmap(page);
- lock_kernel();
cii = ITOC(inode);
error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
- unlock_kernel();
if (error)
goto fail;
SetPageUptodate(page);
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index b8893ab6f9e..c3563cab975 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/vfs.h>
@@ -606,7 +607,8 @@ static void coda_unblock_signals(sigset_t *old)
(r)->uc_opcode != CODA_RELEASE) || \
(r)->uc_flags & CODA_REQ_READ))
-static inline void coda_waitfor_upcall(struct upc_req *req)
+static inline void coda_waitfor_upcall(struct venus_comm *vcp,
+ struct upc_req *req)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeout = jiffies + coda_timeout * HZ;
@@ -639,10 +641,12 @@ static inline void coda_waitfor_upcall(struct upc_req *req)
break;
}
+ mutex_unlock(&vcp->vc_mutex);
if (blocked)
schedule_timeout(HZ);
else
schedule();
+ mutex_lock(&vcp->vc_mutex);
}
if (blocked)
coda_unblock_signals(&old);
@@ -667,18 +671,23 @@ static int coda_upcall(struct venus_comm *vcp,
{
union outputArgs *out;
union inputArgs *sig_inputArgs;
- struct upc_req *req, *sig_req;
- int error = 0;
+ struct upc_req *req = NULL, *sig_req;
+ int error;
+
+ mutex_lock(&vcp->vc_mutex);
if (!vcp->vc_inuse) {
printk(KERN_NOTICE "coda: Venus dead, not sending upcall\n");
- return -ENXIO;
+ error = -ENXIO;
+ goto exit;
}
/* Format the request message. */
req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ if (!req) {
+ error = -ENOMEM;
+ goto exit;
+ }
req->uc_data = (void *)buffer;
req->uc_flags = 0;
@@ -705,7 +714,7 @@ static int coda_upcall(struct venus_comm *vcp,
* ENODEV. */
/* Go to sleep. Wake up on signals only after the timeout. */
- coda_waitfor_upcall(req);
+ coda_waitfor_upcall(vcp, req);
/* Op went through, interrupt or not... */
if (req->uc_flags & CODA_REQ_WRITE) {
@@ -759,6 +768,7 @@ static int coda_upcall(struct venus_comm *vcp,
exit:
kfree(req);
+ mutex_unlock(&vcp->vc_mutex);
return error;
}
@@ -796,21 +806,24 @@ exit:
*
* CODA_REPLACE -- replace one CodaFid with another throughout the name cache */
-int coda_downcall(int opcode, union outputArgs * out, struct super_block *sb)
+int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out)
{
struct inode *inode = NULL;
- struct CodaFid *fid, *newfid;
+ struct CodaFid *fid = NULL, *newfid;
+ struct super_block *sb;
/* Handle invalidation requests. */
- if ( !sb || !sb->s_root)
- return 0;
+ mutex_lock(&vcp->vc_mutex);
+ sb = vcp->vc_sb;
+ if (!sb || !sb->s_root)
+ goto unlock_out;
switch (opcode) {
case CODA_FLUSH:
coda_cache_clear_all(sb);
shrink_dcache_sb(sb);
if (sb->s_root->d_inode)
- coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
+ coda_flag_inode(sb->s_root->d_inode, C_FLUSH);
break;
case CODA_PURGEUSER:
@@ -819,45 +832,53 @@ int coda_downcall(int opcode, union outputArgs * out, struct super_block *sb)
case CODA_ZAPDIR:
fid = &out->coda_zapdir.CodaFid;
- inode = coda_fid_to_inode(fid, sb);
- if (inode) {
- coda_flag_inode_children(inode, C_PURGE);
- coda_flag_inode(inode, C_VATTR);
- }
break;
case CODA_ZAPFILE:
fid = &out->coda_zapfile.CodaFid;
- inode = coda_fid_to_inode(fid, sb);
- if (inode)
- coda_flag_inode(inode, C_VATTR);
break;
case CODA_PURGEFID:
fid = &out->coda_purgefid.CodaFid;
+ break;
+
+ case CODA_REPLACE:
+ fid = &out->coda_replace.OldFid;
+ break;
+ }
+ if (fid)
inode = coda_fid_to_inode(fid, sb);
- if (inode) {
- coda_flag_inode_children(inode, C_PURGE);
- /* catch the dentries later if some are still busy */
- coda_flag_inode(inode, C_PURGE);
- d_prune_aliases(inode);
+unlock_out:
+ mutex_unlock(&vcp->vc_mutex);
- }
+ if (!inode)
+ return 0;
+
+ switch (opcode) {
+ case CODA_ZAPDIR:
+ coda_flag_inode_children(inode, C_PURGE);
+ coda_flag_inode(inode, C_VATTR);
+ break;
+
+ case CODA_ZAPFILE:
+ coda_flag_inode(inode, C_VATTR);
+ break;
+
+ case CODA_PURGEFID:
+ coda_flag_inode_children(inode, C_PURGE);
+
+ /* catch the dentries later if some are still busy */
+ coda_flag_inode(inode, C_PURGE);
+ d_prune_aliases(inode);
break;
case CODA_REPLACE:
- fid = &out->coda_replace.OldFid;
newfid = &out->coda_replace.NewFid;
- inode = coda_fid_to_inode(fid, sb);
- if (inode)
- coda_replace_fid(inode, fid, newfid);
+ coda_replace_fid(inode, fid, newfid);
break;
}
-
- if (inode)
- iput(inode);
-
+ iput(inode);
return 0;
}
diff --git a/fs/compat.c b/fs/compat.c
index 0644a154672..f03abdadc40 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1963,7 +1963,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
}
#endif /* HAVE_SET_RESTORE_SIGMASK */
-#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
+#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && !defined(CONFIG_NFSD_DEPRECATED)
/* Stuff for NFS server syscalls... */
struct compat_nfsctl_svc {
u16 svc32_port;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 03e59aa318e..d0ad09d5778 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -599,69 +599,6 @@ static int do_smb_getmountuid(unsigned int fd, unsigned int cmd,
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
-#ifdef CONFIG_BLOCK
-struct raw32_config_request
-{
- compat_int_t raw_minor;
- __u64 block_major;
- __u64 block_minor;
-} __attribute__((packed));
-
-static int get_raw32_request(struct raw_config_request *req, struct raw32_config_request __user *user_req)
-{
- int ret;
-
- if (!access_ok(VERIFY_READ, user_req, sizeof(struct raw32_config_request)))
- return -EFAULT;
-
- ret = __get_user(req->raw_minor, &user_req->raw_minor);
- ret |= __get_user(req->block_major, &user_req->block_major);
- ret |= __get_user(req->block_minor, &user_req->block_minor);
-
- return ret ? -EFAULT : 0;
-}
-
-static int set_raw32_request(struct raw_config_request *req, struct raw32_config_request __user *user_req)
-{
- int ret;
-
- if (!access_ok(VERIFY_WRITE, user_req, sizeof(struct raw32_config_request)))
- return -EFAULT;
-
- ret = __put_user(req->raw_minor, &user_req->raw_minor);
- ret |= __put_user(req->block_major, &user_req->block_major);
- ret |= __put_user(req->block_minor, &user_req->block_minor);
-
- return ret ? -EFAULT : 0;
-}
-
-static int raw_ioctl(unsigned fd, unsigned cmd,
- struct raw32_config_request __user *user_req)
-{
- int ret;
-
- switch (cmd) {
- case RAW_SETBIND:
- default: { /* RAW_GETBIND */
- struct raw_config_request req;
- mm_segment_t oldfs = get_fs();
-
- if ((ret = get_raw32_request(&req, user_req)))
- return ret;
-
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd,cmd,(unsigned long)&req);
- set_fs(oldfs);
-
- if ((!ret) && (cmd == RAW_GETBIND)) {
- ret = set_raw32_request(&req, user_req);
- }
- break;
- }
- }
- return ret;
-}
-#endif /* CONFIG_BLOCK */
struct serial_struct32 {
compat_int_t type;
@@ -1262,9 +1199,6 @@ COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
COMPATIBLE_IOCTL(OSS_GETVERSION)
-/* Raw devices */
-COMPATIBLE_IOCTL(RAW_SETBIND)
-COMPATIBLE_IOCTL(RAW_GETBIND)
/* SMB ioctls which do not need any translations */
COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
/* Watchdog */
@@ -1523,10 +1457,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
case MTIOCGET32:
case MTIOCPOS32:
return mt_ioctl_trans(fd, cmd, argp);
- /* Raw devices */
- case RAW_SETBIND:
- case RAW_GETBIND:
- return raw_ioctl(fd, cmd, argp);
#endif
/* One SMB ioctl needs translations. */
#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 0210898458b..89d394d8fe2 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -43,6 +43,7 @@ const struct file_operations debugfs_file_operations = {
.read = default_read_file,
.write = default_write_file,
.open = default_open,
+ .llseek = noop_llseek,
};
static void *debugfs_follow_link(struct dentry *dentry, struct nameidata *nd)
@@ -454,6 +455,7 @@ static const struct file_operations fops_bool = {
.read = read_file_bool,
.write = write_file_bool,
.open = default_open,
+ .llseek = default_llseek,
};
/**
@@ -498,6 +500,7 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf,
static const struct file_operations fops_blob = {
.read = read_file_blob,
.open = default_open,
+ .llseek = default_llseek,
};
/**
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index c6cf2515874..6b42ba807df 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -643,7 +643,8 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
static const struct file_operations waiters_fops = {
.owner = THIS_MODULE,
.open = waiters_open,
- .read = waiters_read
+ .read = waiters_read,
+ .llseek = default_llseek,
};
void dlm_delete_debug_file(struct dlm_ls *ls)
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 031dbe3a15c..64e5f3efdd8 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1846,6 +1846,9 @@ static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
struct dlm_lkb *gr;
list_for_each_entry(gr, head, lkb_statequeue) {
+ /* skip self when sending basts to convertqueue */
+ if (gr == lkb)
+ continue;
if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
queue_bast(r, gr, lkb->lkb_rqmode);
gr->lkb_highbast = lkb->lkb_rqmode;
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index d45c02db694..30d8b85febb 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -412,7 +412,8 @@ static const struct file_operations dev_fops = {
.read = dev_read,
.write = dev_write,
.poll = dev_poll,
- .owner = THIS_MODULE
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice plock_dev_misc = {
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index b6272853130..66d6c16bf44 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -1009,6 +1009,7 @@ static const struct file_operations device_fops = {
.write = device_write,
.poll = device_poll,
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static const struct file_operations ctl_device_fops = {
@@ -1017,6 +1018,7 @@ static const struct file_operations ctl_device_fops = {
.read = device_read,
.write = device_write,
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice ctl_device = {
@@ -1029,6 +1031,7 @@ static const struct file_operations monitor_device_fops = {
.open = monitor_device_open,
.release = monitor_device_close,
.owner = THIS_MODULE,
+ .llseek = noop_llseek,
};
static struct miscdevice monitor_device = {
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 0032a9f5a3a..40186b95942 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -477,7 +477,7 @@ ecryptfs_lower_header_size(struct ecryptfs_crypt_stat *crypt_stat)
static inline struct ecryptfs_file_info *
ecryptfs_file_to_private(struct file *file)
{
- return (struct ecryptfs_file_info *)file->private_data;
+ return file->private_data;
}
static inline void
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 622c9514080..91da02987bf 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -31,7 +31,6 @@
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/fs_stack.h>
-#include <linux/smp_lock.h>
#include "ecryptfs_kernel.h"
/**
@@ -284,11 +283,9 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
int rc = 0;
struct file *lower_file = NULL;
- lock_kernel();
lower_file = ecryptfs_file_to_lower(file);
if (lower_file->f_op && lower_file->f_op->fasync)
rc = lower_file->f_op->fasync(fd, lower_file, flag);
- unlock_kernel();
return rc;
}
@@ -332,6 +329,7 @@ const struct file_operations ecryptfs_dir_fops = {
.fsync = ecryptfs_fsync,
.fasync = ecryptfs_fasync,
.splice_read = generic_file_splice_read,
+ .llseek = default_llseek,
};
const struct file_operations ecryptfs_main_fops = {
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 00208c3d7e9..940a82e63dc 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -482,6 +482,7 @@ static const struct file_operations ecryptfs_miscdev_fops = {
.read = ecryptfs_miscdev_read,
.write = ecryptfs_miscdev_write,
.release = ecryptfs_miscdev_release,
+ .llseek = noop_llseek,
};
static struct miscdevice ecryptfs_miscdev = {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 6bd3f76fdf8..e0194b3e14d 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -293,6 +293,7 @@ static const struct file_operations eventfd_fops = {
.poll = eventfd_poll,
.read = eventfd_read,
.write = eventfd_write,
+ .llseek = noop_llseek,
};
/**
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3817149919c..256bb7bb102 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -674,7 +674,8 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
.release = ep_eventpoll_release,
- .poll = ep_eventpoll_poll
+ .poll = ep_eventpoll_poll,
+ .llseek = noop_llseek,
};
/* Fast test to see if the file is an evenpoll file */
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index d91e9d829bc..dcc941d82d6 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -420,7 +420,7 @@ int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
err = exofs_write_begin(NULL, page->mapping, pos, len,
AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
if (err)
- EXOFS_ERR("exofs_set_link: exofs_write_begin FAILD => %d\n",
+ EXOFS_ERR("exofs_set_link: exofs_write_begin FAILED => %d\n",
err);
de->inode_no = cpu_to_le64(inode->i_ino);
@@ -556,7 +556,7 @@ int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page)
err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0,
&page, NULL);
if (err)
- EXOFS_ERR("exofs_delete_entry: exofs_write_begin FAILD => %d\n",
+ EXOFS_ERR("exofs_delete_entry: exofs_write_begin FAILED => %d\n",
err);
if (pde)
pde->rec_len = cpu_to_le16(to - from);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 3eadd97324b..42685424817 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -185,7 +185,7 @@ static void update_write_page(struct page *page, int ret)
/* Called at the end of reads, to optionally unlock pages and update their
* status.
*/
-static int __readpages_done(struct page_collect *pcol, bool do_unlock)
+static int __readpages_done(struct page_collect *pcol)
{
int i;
u64 resid;
@@ -221,7 +221,7 @@ static int __readpages_done(struct page_collect *pcol, bool do_unlock)
page_stat ? "bad_bytes" : "good_bytes");
ret = update_read_page(page, page_stat);
- if (do_unlock)
+ if (!pcol->read_4_write)
unlock_page(page);
length += PAGE_SIZE;
}
@@ -236,7 +236,7 @@ static void readpages_done(struct exofs_io_state *ios, void *p)
{
struct page_collect *pcol = p;
- __readpages_done(pcol, true);
+ __readpages_done(pcol);
atomic_dec(&pcol->sbi->s_curr_pending);
kfree(pcol);
}
@@ -257,7 +257,7 @@ static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
}
}
-static int read_exec(struct page_collect *pcol, bool is_sync)
+static int read_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
struct exofs_io_state *ios = pcol->ios;
@@ -267,17 +267,14 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
if (!pcol->pages)
return 0;
- /* see comment in _readpage() about sync reads */
- WARN_ON(is_sync && (pcol->nr_pages != 1));
-
ios->pages = pcol->pages;
ios->nr_pages = pcol->nr_pages;
ios->length = pcol->length;
ios->offset = pcol->pg_first << PAGE_CACHE_SHIFT;
- if (is_sync) {
+ if (pcol->read_4_write) {
exofs_oi_read(oi, pcol->ios);
- return __readpages_done(pcol, false);
+ return __readpages_done(pcol);
}
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
@@ -303,7 +300,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
return 0;
err:
- if (!is_sync)
+ if (!pcol->read_4_write)
_unlock_pcol_pages(pcol, ret, READ);
pcol_free(pcol);
@@ -356,7 +353,7 @@ static int readpage_strip(void *data, struct page *page)
EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
" splitting\n", inode->i_ino, page->index);
- return read_exec(pcol, false);
+ return read_exec(pcol);
}
try_again:
@@ -366,7 +363,7 @@ try_again:
} else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
page->index)) {
/* Discontinuity detected, split the request */
- ret = read_exec(pcol, false);
+ ret = read_exec(pcol);
if (unlikely(ret))
goto fail;
goto try_again;
@@ -391,7 +388,7 @@ try_again:
page, len, pcol->nr_pages, pcol->length);
/* split the request, and start again with current page */
- ret = read_exec(pcol, false);
+ ret = read_exec(pcol);
if (unlikely(ret))
goto fail;
@@ -420,27 +417,24 @@ static int exofs_readpages(struct file *file, struct address_space *mapping,
return ret;
}
- return read_exec(&pcol, false);
+ return read_exec(&pcol);
}
-static int _readpage(struct page *page, bool is_sync)
+static int _readpage(struct page *page, bool read_4_write)
{
struct page_collect pcol;
int ret;
_pcol_init(&pcol, 1, page->mapping->host);
- /* readpage_strip might call read_exec(,is_sync==false) at several
- * places but not if we have a single page.
- */
- pcol.read_4_write = is_sync;
+ pcol.read_4_write = read_4_write;
ret = readpage_strip(&pcol, page);
if (ret) {
EXOFS_ERR("_readpage => %d\n", ret);
return ret;
}
- return read_exec(&pcol, is_sync);
+ return read_exec(&pcol);
}
/*
@@ -511,7 +505,7 @@ static int write_exec(struct page_collect *pcol)
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
if (!pcol_copy) {
- EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
+ EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
ret = -ENOMEM;
goto err;
}
@@ -527,7 +521,7 @@ static int write_exec(struct page_collect *pcol)
ret = exofs_oi_write(oi, ios);
if (unlikely(ret)) {
- EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
+ EXOFS_ERR("write_exec: exofs_oi_write() Failed\n");
goto err;
}
@@ -628,7 +622,7 @@ try_again:
/* split the request, next loop will start again */
ret = write_exec(pcol);
if (unlikely(ret)) {
- EXOFS_DBGMSG("write_exec faild => %d", ret);
+ EXOFS_DBGMSG("write_exec failed => %d", ret);
goto fail;
}
@@ -719,7 +713,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
fsdata);
if (ret) {
- EXOFS_DBGMSG("simple_write_begin faild\n");
+ EXOFS_DBGMSG("simple_write_begin failed\n");
goto out;
}
@@ -732,7 +726,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
if (ret) {
/*SetPageError was done by _readpage. Is it ok?*/
unlock_page(page);
- EXOFS_DBGMSG("__readpage_filler faild\n");
+ EXOFS_DBGMSG("__readpage_filler failed\n");
}
}
out:
@@ -1036,6 +1030,7 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
@@ -1072,8 +1067,10 @@ bad_inode:
int __exofs_wait_obj_created(struct exofs_i_info *oi)
{
if (!obj_created(oi)) {
+ EXOFS_DBGMSG("!obj_created\n");
BUG_ON(!obj_2bcreated(oi));
wait_event(oi->i_wq, obj_created(oi));
+ EXOFS_DBGMSG("wait_event done\n");
}
return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
}
@@ -1095,7 +1092,7 @@ static void create_done(struct exofs_io_state *ios, void *p)
atomic_dec(&sbi->s_curr_pending);
if (unlikely(ret)) {
- EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
+ EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
_LLU(exofs_oi_objno(oi)), _LLU(sbi->layout.s_pid));
/*TODO: When FS is corrupted creation can fail, object already
* exist. Get rid of this asynchronous creation, if exist
@@ -1107,7 +1104,6 @@ static void create_done(struct exofs_io_state *ios, void *p)
set_obj_created(oi);
- atomic_dec(&inode->i_count);
wake_up(&oi->i_wq);
}
@@ -1135,6 +1131,7 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
sbi = sb->s_fs_info;
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
sb->s_dirt = 1;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
@@ -1157,17 +1154,11 @@ struct inode *exofs_new_inode(struct inode *dir, int mode)
ios->obj.id = exofs_oi_objno(oi);
exofs_make_credential(oi->i_cred, &ios->obj);
- /* increment the refcount so that the inode will still be around when we
- * reach the callback
- */
- atomic_inc(&inode->i_count);
-
ios->done = create_done;
ios->private = inode;
ios->cred = oi->i_cred;
ret = exofs_sbi_create(ios);
if (ret) {
- atomic_dec(&inode->i_count);
exofs_put_io_state(ios);
return ERR_PTR(ret);
}
@@ -1215,7 +1206,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
- EXOFS_DBGMSG("Faild kzalloc of args\n");
+ EXOFS_DBGMSG("Failed kzalloc of args\n");
return -ENOMEM;
}
@@ -1257,12 +1248,7 @@ static int exofs_update_inode(struct inode *inode, int do_sync)
ios->out_attr_len = 1;
ios->out_attr = &attr;
- if (!obj_created(oi)) {
- EXOFS_DBGMSG("!obj_created\n");
- BUG_ON(!obj_2bcreated(oi));
- wait_event(oi->i_wq, obj_created(oi));
- EXOFS_DBGMSG("wait_event done\n");
- }
+ wait_obj_created(oi);
if (!do_sync) {
args->sbi = sbi;
@@ -1325,12 +1311,12 @@ void exofs_evict_inode(struct inode *inode)
inode->i_size = 0;
end_writeback(inode);
- /* if we are deleting an obj that hasn't been created yet, wait */
- if (!obj_created(oi)) {
- BUG_ON(!obj_2bcreated(oi));
- wait_event(oi->i_wq, obj_created(oi));
- /* ignore the error attempt a remove anyway */
- }
+ /* if we are deleting an obj that hasn't been created yet, wait.
+ * This also makes sure that create_done cannot be called with an
+ * already evicted inode.
+ */
+ wait_obj_created(oi);
+ /* ignore the error, attempt a remove anyway */
/* Now Remove the OSD objects */
ret = exofs_get_io_state(&sbi->layout, &ios);
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index 6550bf70e41..f74a2ec027a 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -55,7 +55,7 @@ int exofs_read_kern(struct osd_dev *od, u8 *cred, struct osd_obj_id *obj,
ret = osd_finalize_request(or, 0, cred, NULL);
if (unlikely(ret)) {
- EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n", ret);
+ EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n", ret);
goto out;
}
@@ -79,7 +79,7 @@ int exofs_get_io_state(struct exofs_layout *layout,
*/
ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL);
if (unlikely(!ios)) {
- EXOFS_DBGMSG("Faild kzalloc bytes=%d\n",
+ EXOFS_DBGMSG("Failed kzalloc bytes=%d\n",
exofs_io_state_size(layout->s_numdevs));
*pios = NULL;
return -ENOMEM;
@@ -172,7 +172,7 @@ static int exofs_io_execute(struct exofs_io_state *ios)
ret = osd_finalize_request(or, 0, ios->cred, NULL);
if (unlikely(ret)) {
- EXOFS_DBGMSG("Faild to osd_finalize_request() => %d\n",
+ EXOFS_DBGMSG("Failed to osd_finalize_request() => %d\n",
ret);
return ret;
}
@@ -361,7 +361,7 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
- EXOFS_DBGMSG("Faild to allocate BIO size=%u\n",
+ EXOFS_DBGMSG("Failed to allocate BIO size=%u\n",
bio_size);
return -ENOMEM;
}
@@ -564,7 +564,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
master_dev->bio->bi_max_vecs);
if (unlikely(!bio)) {
EXOFS_DBGMSG(
- "Faild to allocate BIO size=%u\n",
+ "Failed to allocate BIO size=%u\n",
master_dev->bio->bi_max_vecs);
ret = -ENOMEM;
goto out;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 940c9616886..533699c1604 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -458,7 +458,7 @@ failed_out:
* the same format as ext2_get_branch() would do. We are calling it after
* we had read the existing part of chain and partial points to the last
* triple of that (one with zero ->key). Upon the exit we have the same
- * picture as after the successful ext2_get_block(), excpet that in one
+ * picture as after the successful ext2_get_block(), except that in one
* place chain is disconnected - *branch->p is still zero (we did not
* set the last link), but branch->key contains the number that should
* be placed into *branch->p to fill that gap.
@@ -662,7 +662,7 @@ static int ext2_get_blocks(struct inode *inode,
mutex_lock(&ei->truncate_mutex);
/*
* If the indirect block is missing while we are reading
- * the chain(ext3_get_branch() returns -EAGAIN err), or
+ * the chain(ext2_get_branch() returns -EAGAIN err), or
* if the chain has been changed after we grab the semaphore,
* (either because another process truncated this branch, or
* another get_block allocated this branch) re-grab the chain to see if
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1ec602673ea..85df87d0f7b 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -747,15 +747,16 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
__le32 features;
int err;
+ err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- return -ENOMEM;
+ goto failed_unlock;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
- return -ENOMEM;
+ goto failed_unlock;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
@@ -1107,6 +1108,7 @@ failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
+failed_unlock:
return ret;
}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index d7e9f74dc3a..09b13bb34c9 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -90,7 +90,6 @@ int ext3_sync_file(struct file *file, int datasync)
* storage
*/
if (needs_barrier)
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
return ret;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index a367dd04428..37776800910 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -411,9 +411,6 @@ static void ext3_put_super (struct super_block * sb)
int i, err;
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-
- lock_kernel();
-
ext3_xattr_put_super(sb);
err = journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
@@ -462,8 +459,6 @@ static void ext3_put_super (struct super_block * sb)
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-
- unlock_kernel();
}
static struct kmem_cache *ext3_inode_cachep;
@@ -1627,8 +1622,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_resgid = EXT3_DEF_RESGID;
sbi->s_sb_block = sb_block;
- unlock_kernel();
-
blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
if (!blocksize) {
ext3_msg(sb, KERN_ERR, "error: unable to set blocksize");
@@ -2025,7 +2018,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
"writeback");
- lock_kernel();
return 0;
cantfind_ext3:
@@ -2055,7 +2047,6 @@ out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
- lock_kernel();
return ret;
}
@@ -2538,8 +2529,6 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
int i;
#endif
- lock_kernel();
-
/* Store the original options */
lock_super(sb);
old_sb_flags = sb->s_flags;
@@ -2648,7 +2637,6 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
kfree(old_opts.s_qf_names[i]);
#endif
unlock_super(sb);
- unlock_kernel();
if (enable_quota)
dquot_resume(sb, -1);
@@ -2669,7 +2657,6 @@ restore_opts:
}
#endif
unlock_super(sb);
- unlock_kernel();
return err;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 592adf2e546..3f3ff5ee8f9 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -128,10 +128,9 @@ int ext4_sync_file(struct file *file, int datasync)
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
- NULL, BLKDEV_IFL_WAIT);
+ NULL);
ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
return ret;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4b4ad4b7ce5..19aa0d44d82 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2566,7 +2566,7 @@ static inline void ext4_issue_discard(struct super_block *sb,
discard_block = block + ext4_group_first_block_no(sb, block_group);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
- ret = sb_issue_discard(sb, discard_block, count);
+ ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
if (ret == EOPNOTSUPP) {
ext4_warning(sb, "discard not supported, disabling");
clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7f47c366bf1..8ecc1e59030 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -26,7 +26,6 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/parser.h>
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/vfs.h>
@@ -708,7 +707,6 @@ static void ext4_put_super(struct super_block *sb)
destroy_workqueue(sbi->dio_unwritten_wq);
lock_super(sb);
- lock_kernel();
if (sb->s_dirt)
ext4_commit_super(sb, 1);
@@ -775,7 +773,6 @@ static void ext4_put_super(struct super_block *sb)
* Now that we are completely done shutting down the
* superblock, we need to actually destroy the kobject.
*/
- unlock_kernel();
unlock_super(sb);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
@@ -2588,8 +2585,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
- unlock_kernel();
-
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
@@ -3164,7 +3159,6 @@ no_journal:
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
- lock_kernel();
kfree(orig_data);
return 0;
@@ -3211,7 +3205,6 @@ out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
- lock_kernel();
out_free_orig:
kfree(orig_data);
return ret;
@@ -3720,8 +3713,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
- lock_kernel();
-
/* Store the original options */
lock_super(sb);
old_sb_flags = sb->s_flags;
@@ -3856,7 +3847,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
kfree(old_opts.s_qf_names[i]);
#endif
unlock_super(sb);
- unlock_kernel();
if (enable_quota)
dquot_resume(sb, -1);
@@ -3882,7 +3872,6 @@ restore_opts:
}
#endif
unlock_super(sb);
- unlock_kernel();
kfree(orig_data);
return err;
}
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 81184d3b75a..b47d2c9f4fa 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -577,7 +577,8 @@ int fat_free_clusters(struct inode *inode, int cluster)
sb_issue_discard(sb,
fat_clus_to_blknr(sbi, first_cl),
- nr_clus * sbi->sec_per_clus);
+ nr_clus * sbi->sec_per_clus,
+ GFP_NOFS, 0);
first_cl = cluster;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 830058057d3..ad6998a92c3 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/time.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/mpage.h>
@@ -489,8 +488,6 @@ static void fat_put_super(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- lock_kernel();
-
if (sb->s_dirt)
fat_write_super(sb);
@@ -504,8 +501,6 @@ static void fat_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
kfree(sbi);
-
- unlock_kernel();
}
static struct kmem_cache *fat_inode_cachep;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 1736f235638..970e682ea75 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -255,10 +255,7 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
for (i = 0; i < nr_bhs; i++) {
wait_on_buffer(bhs[i]);
- if (buffer_eopnotsupp(bhs[i])) {
- clear_buffer_eopnotsupp(bhs[i]);
- err = -EOPNOTSUPP;
- } else if (!err && !buffer_uptodate(bhs[i]))
+ if (!err && !buffer_uptodate(bhs[i]))
err = -EIO;
}
return err;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index bbc94ae4fd7..bbca5c186ae 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -662,12 +662,16 @@ static int msdos_fill_super(struct super_block *sb, void *data, int silent)
{
int res;
+ lock_super(sb);
res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0);
- if (res)
+ if (res) {
+ unlock_super(sb);
return res;
+ }
sb->s_flags |= MS_NOATIME;
sb->s_root->d_op = &msdos_dentry_operations;
+ unlock_super(sb);
return 0;
}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6fcc7e71fba..6f0f6c9a015 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -1055,15 +1055,19 @@ static int vfat_fill_super(struct super_block *sb, void *data, int silent)
{
int res;
+ lock_super(sb);
res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1);
- if (res)
+ if (res) {
+ unlock_super(sb);
return res;
+ }
if (MSDOS_SB(sb)->options.name_check != 's')
sb->s_root->d_op = &vfat_ci_dentry_ops;
else
sb->s_root->d_op = &vfat_dentry_ops;
+ unlock_super(sb);
return 0;
}
diff --git a/fs/fifo.c b/fs/fifo.c
index 5d6606ffc2d..4e303c22d5e 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -151,4 +151,5 @@ err_nocleanup:
*/
const struct file_operations def_fifo_fops = {
.open = fifo_open, /* will set read_ or write_pipefifo_fops */
+ .llseek = noop_llseek,
};
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 0ec7bb2c95c..6c5131d592f 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -36,7 +36,6 @@
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include "vxfs.h"
#include "vxfs_dir.h"
@@ -212,16 +211,12 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd)
if (dp->d_name.len > VXFS_NAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- lock_kernel();
ino = vxfs_inode_by_name(dip, dp);
if (ino) {
ip = vxfs_iget(dip->i_sb, ino);
- if (IS_ERR(ip)) {
- unlock_kernel();
+ if (IS_ERR(ip))
return ERR_CAST(ip);
- }
}
- unlock_kernel();
d_add(dp, ip);
return NULL;
}
@@ -248,8 +243,6 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
u_long page, npages, block, pblocks, nblocks, offset;
loff_t pos;
- lock_kernel();
-
switch ((long)fp->f_pos) {
case 0:
if (filler(retp, ".", 1, fp->f_pos, ip->i_ino, DT_DIR) < 0)
@@ -265,10 +258,8 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
pos = fp->f_pos - 2;
- if (pos > VXFS_DIRROUND(ip->i_size)) {
- unlock_kernel();
+ if (pos > VXFS_DIRROUND(ip->i_size))
return 0;
- }
npages = dir_pages(ip);
nblocks = dir_blocks(ip);
@@ -327,6 +318,5 @@ vxfs_readdir(struct file *fp, void *retp, filldir_t filler)
done:
fp->f_pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2;
out:
- unlock_kernel();
return 0;
}
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index dc0c041e85c..71b0148b878 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -38,7 +38,6 @@
#include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/stat.h>
#include <linux/vfs.h>
#include <linux/mount.h>
@@ -81,16 +80,12 @@ vxfs_put_super(struct super_block *sbp)
{
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
- lock_kernel();
-
vxfs_put_fake_inode(infp->vsi_fship);
vxfs_put_fake_inode(infp->vsi_ilist);
vxfs_put_fake_inode(infp->vsi_stilist);
brelse(infp->vsi_bp);
kfree(infp);
-
- unlock_kernel();
}
/**
@@ -148,7 +143,7 @@ static int vxfs_remount(struct super_block *sb, int *flags, char *data)
* The superblock on success, else %NULL.
*
* Locking:
- * We are under the bkl and @sbp->s_lock.
+ * We are under @sbp->s_lock.
*/
static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
{
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 3773fd63d2f..7367e177186 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -179,23 +179,27 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
static const struct file_operations fuse_ctl_abort_ops = {
.open = nonseekable_open,
.write = fuse_conn_abort_write,
+ .llseek = no_llseek,
};
static const struct file_operations fuse_ctl_waiting_ops = {
.open = nonseekable_open,
.read = fuse_conn_waiting_read,
+ .llseek = no_llseek,
};
static const struct file_operations fuse_conn_max_background_ops = {
.open = nonseekable_open,
.read = fuse_conn_max_background_read,
.write = fuse_conn_max_background_write,
+ .llseek = no_llseek,
};
static const struct file_operations fuse_conn_congestion_threshold_ops = {
.open = nonseekable_open,
.read = fuse_conn_congestion_threshold_read,
.write = fuse_conn_congestion_threshold_write,
+ .llseek = no_llseek,
};
static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index e1f8171278b..3e87cce5837 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -182,6 +182,7 @@ static const struct file_operations cuse_frontend_fops = {
.unlocked_ioctl = cuse_file_ioctl,
.compat_ioctl = cuse_file_compat_ioctl,
.poll = fuse_file_poll,
+ .llseek = noop_llseek,
};
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 237ee6a940d..aa996471ec5 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -622,6 +622,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
* cluster; until we do, disable leases (by just returning -EINVAL),
* unless the administrator has requested purely local locking.
*
+ * Locking: called under lock_flocks
+ *
* Returns: errno
*/
@@ -773,6 +775,7 @@ const struct file_operations gfs2_dir_fops = {
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
+ .llseek = default_llseek,
};
#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
@@ -799,5 +802,6 @@ const struct file_operations gfs2_dir_fops_nolock = {
.open = gfs2_open,
.release = gfs2_close,
.fsync = gfs2_fsync,
+ .llseek = default_llseek,
};
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ac750bd31a6..eb01f3575e1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -592,22 +592,13 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
lh->lh_hash = cpu_to_be32(hash);
bh->b_end_io = end_buffer_write_sync;
- if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
- goto skip_barrier;
get_bh(bh);
- submit_bh(WRITE_BARRIER | REQ_META, bh);
- wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- set_buffer_uptodate(bh);
- fs_info(sdp, "barrier sync failed - disabling barriers\n");
- set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- lock_buffer(bh);
-skip_barrier:
- get_bh(bh);
+ if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
submit_bh(WRITE_SYNC | REQ_META, bh);
- wait_on_buffer(bh);
- }
+ else
+ submit_bh(WRITE_FLUSH_FUA | REQ_META, bh);
+ wait_on_buffer(bh);
+
if (!buffer_uptodate(bh))
gfs2_io_error_bh(sdp, bh);
brelse(bh);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index d7eb1e209aa..ebef7ab6e17 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void)
error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
- WQ_RESCUER | WQ_FREEZEABLE, 0);
+ WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
if (!gfs_recovery_wq)
goto fail_wq;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index fb67f593f40..bef3ab6cf5c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -866,8 +866,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
if ((start + nr_sects) != blk) {
rv = blkdev_issue_discard(bdev, start,
nr_sects, GFP_NOFS,
- BLKDEV_IFL_WAIT |
- BLKDEV_IFL_BARRIER);
+ 0);
if (rv)
goto fail;
nr_sects = 0;
@@ -881,8 +880,7 @@ start_new_extent:
}
}
if (nr_sects) {
- rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
if (rv)
goto fail;
}
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 34235d4bf08..33254160f65 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -20,7 +20,6 @@
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include "hfs_fs.h"
@@ -79,15 +78,11 @@ static int hfs_sync_fs(struct super_block *sb, int wait)
*/
static void hfs_put_super(struct super_block *sb)
{
- lock_kernel();
-
if (sb->s_dirt)
hfs_write_super(sb);
hfs_mdb_close(sb);
/* release the MDB's resources */
hfs_mdb_put(sb);
-
- unlock_kernel();
}
/*
@@ -385,6 +380,7 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
+
sb->s_fs_info = sbi;
INIT_HLIST_HEAD(&sbi->rsrc_inodes);
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 6bbd75c5589..7c232c1487e 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -28,12 +28,7 @@
* #define ATTR_KILL_SUID 2048
* #define ATTR_KILL_SGID 4096
*
- * and this is because they were added in 2.5 development in this patch:
- *
- * http://linux.bkbits.net:8080/linux-2.5/
- * cset@3caf4a12k4XgDzK7wyK-TGpSZ9u2Ww?nav=index.html
- * |src/.|src/include|src/include/linux|related/include/linux/fs.h
- *
+ * and this is because they were added in 2.5 development.
* Actually, they are not needed by most ->setattr() methods - they are set by
* callers of notify_change() to notify that the setuid/setgid bits must be
* dropped.
diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig
index 56bd15c5bf6..63b6f563231 100644
--- a/fs/hpfs/Kconfig
+++ b/fs/hpfs/Kconfig
@@ -1,6 +1,7 @@
config HPFS_FS
tristate "OS/2 HPFS file system support"
depends on BLOCK
+ depends on BKL # nontrivial to fix
help
OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
is the file system used for organizing files on OS/2 hard disk
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 2607010be2f..c969a1aa163 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -477,11 +477,15 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
int o;
+ lock_kernel();
+
save_mount_options(s, options);
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
+ if (!sbi) {
+ unlock_kernel();
return -ENOMEM;
+ }
s->s_fs_info = sbi;
sbi->sb_bmp_dir = NULL;
@@ -666,6 +670,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
root->i_blocks = 5;
hpfs_brelse4(&qbh);
}
+ unlock_kernel();
return 0;
bail4: brelse(bh2);
@@ -677,6 +682,7 @@ bail0:
kfree(sbi->sb_cp_table);
s->s_fs_info = NULL;
kfree(sbi);
+ unlock_kernel();
return -EINVAL;
}
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 7b027720d82..4e2a45ea614 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -598,6 +598,7 @@ static const struct file_operations hppfs_dir_fops = {
.readdir = hppfs_readdir,
.open = hppfs_dir_open,
.fsync = hppfs_fsync,
+ .llseek = default_llseek,
};
static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1f7ca505d48..a14328d270e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -689,6 +689,7 @@ const struct file_operations hugetlbfs_file_operations = {
.mmap = hugetlbfs_file_mmap,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
+ .llseek = default_llseek,
};
static const struct inode_operations hugetlbfs_dir_inode_operations = {
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index e0aca9a0ac6..0542b6eedf8 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -10,7 +10,6 @@
*
* isofs directory handling functions
*/
-#include <linux/smp_lock.h>
#include <linux/gfp.h>
#include "isofs.h"
@@ -255,18 +254,19 @@ static int isofs_readdir(struct file *filp,
char *tmpname;
struct iso_directory_record *tmpde;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
tmpname = (char *)__get_free_page(GFP_KERNEL);
if (tmpname == NULL)
return -ENOMEM;
- lock_kernel();
+ mutex_lock(&sbi->s_mutex);
tmpde = (struct iso_directory_record *) (tmpname+1024);
result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde);
free_page((unsigned long) tmpname);
- unlock_kernel();
+ mutex_unlock(&sbi->s_mutex);
return result;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 5a44811b502..09ff41a752a 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/nls.h>
#include <linux/ctype.h>
-#include <linux/smp_lock.h>
#include <linux/statfs.h>
#include <linux/cdrom.h>
#include <linux/parser.h>
@@ -44,11 +43,7 @@ static void isofs_put_super(struct super_block *sb)
struct isofs_sb_info *sbi = ISOFS_SB(sb);
#ifdef CONFIG_JOLIET
- lock_kernel();
-
unload_nls(sbi->s_nls_iocharset);
-
- unlock_kernel();
#endif
kfree(sbi);
@@ -823,6 +818,7 @@ root_found:
sbi->s_utf8 = opt.utf8;
sbi->s_nocompress = opt.nocompress;
sbi->s_overriderockperm = opt.overriderockperm;
+ mutex_init(&sbi->s_mutex);
/*
* It would be incredibly stupid to allow people to mark every file
* on the disk as suid, so we merely allow them to set the default
@@ -977,8 +973,6 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
int section, rv, error;
struct iso_inode_info *ei = ISOFS_I(inode);
- lock_kernel();
-
error = -EIO;
rv = 0;
if (iblock < 0 || iblock != iblock_s) {
@@ -1054,7 +1048,6 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
error = 0;
abort:
- unlock_kernel();
return rv != 0 ? rv : error;
}
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 7d33de84f52..2882dc089f8 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -55,6 +55,7 @@ struct isofs_sb_info {
gid_t s_gid;
uid_t s_uid;
struct nls_table *s_nls_iocharset; /* Native language support table */
+ struct mutex s_mutex; /* replaces BKL, please remove if possible */
};
#define ISOFS_INVALID_MODE ((mode_t) -1)
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index ab438beb867..0d23abfd428 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -6,7 +6,6 @@
* (C) 1991 Linus Torvalds - minix filesystem
*/
-#include <linux/smp_lock.h>
#include <linux/gfp.h>
#include "isofs.h"
@@ -168,6 +167,7 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
int found;
unsigned long uninitialized_var(block);
unsigned long uninitialized_var(offset);
+ struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb);
struct inode *inode;
struct page *page;
@@ -177,7 +177,7 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
if (!page)
return ERR_PTR(-ENOMEM);
- lock_kernel();
+ mutex_lock(&sbi->s_mutex);
found = isofs_find_entry(dir, dentry,
&block, &offset,
page_address(page),
@@ -188,10 +188,10 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
if (found) {
inode = isofs_iget(dir->i_sb, block, offset);
if (IS_ERR(inode)) {
- unlock_kernel();
+ mutex_unlock(&sbi->s_mutex);
return ERR_CAST(inode);
}
}
- unlock_kernel();
+ mutex_unlock(&sbi->s_mutex);
return d_splice_alias(inode, dentry);
}
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 96a685c550f..f9cd04db6ea 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -8,7 +8,6 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include "isofs.h"
#include "rock.h"
@@ -661,6 +660,7 @@ static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
+ struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
char *link = kmap(page);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
@@ -673,12 +673,12 @@ static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
struct rock_state rs;
int ret;
- if (!ISOFS_SB(inode->i_sb)->s_rock)
+ if (!sbi->s_rock)
goto error;
init_rock_state(&rs, inode);
block = ei->i_iget5_block;
- lock_kernel();
+ mutex_lock(&sbi->s_mutex);
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
@@ -748,7 +748,7 @@ repeat:
goto fail;
brelse(bh);
*rpnt = '\0';
- unlock_kernel();
+ mutex_unlock(&sbi->s_mutex);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
@@ -765,7 +765,7 @@ out_bad_span:
printk("symlink spans iso9660 blocks\n");
fail:
brelse(bh);
- unlock_kernel();
+ mutex_unlock(&sbi->s_mutex);
error:
SetPageError(page);
kunmap(page);
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 95d8c11c929..85a6883c0ac 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -137,34 +137,10 @@ static int journal_write_commit_record(journal_t *journal,
JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh);
- if (journal->j_flags & JFS_BARRIER) {
- ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
-
- /*
- * Is it possible for another commit to fail at roughly
- * the same time as this one? If so, we don't want to
- * trust the barrier flag in the super, but instead want
- * to remember if we sent a barrier request
- */
- if (ret == -EOPNOTSUPP) {
- char b[BDEVNAME_SIZE];
-
- printk(KERN_WARNING
- "JBD: barrier-based sync failed on %s - "
- "disabling barriers\n",
- bdevname(journal->j_dev, b));
- spin_lock(&journal->j_state_lock);
- journal->j_flags &= ~JFS_BARRIER;
- spin_unlock(&journal->j_state_lock);
-
- /* And try again, without the barrier */
- set_buffer_uptodate(bh);
- set_buffer_dirty(bh);
- ret = sync_dirty_buffer(bh);
- }
- } else {
+ if (journal->j_flags & JFS_BARRIER)
+ ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
+ else
ret = sync_dirty_buffer(bh);
- }
put_bh(bh); /* One for getblk() */
journal_put_journal_head(descriptor);
@@ -318,7 +294,7 @@ void journal_commit_transaction(journal_t *journal)
int first_tag = 0;
int tag_flag;
int i;
- int write_op = WRITE;
+ int write_op = WRITE_SYNC;
/*
* First job: lock down the current transaction and wait for
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 5247e7ffdcb..6571a056e55 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -532,8 +532,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
*/
if ((journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
if (!(journal->j_flags & JBD2_ABORT))
jbd2_journal_update_superblock(journal, 1);
return 0;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7c068c189d8..bc6be8bda1c 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -134,25 +134,11 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal,
- JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
- ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
- if (ret == -EOPNOTSUPP) {
- printk(KERN_WARNING
- "JBD2: Disabling barriers on %s, "
- "not supported by device\n", journal->j_devname);
- write_lock(&journal->j_state_lock);
- journal->j_flags &= ~JBD2_BARRIER;
- write_unlock(&journal->j_state_lock);
-
- /* And try again, without the barrier */
- lock_buffer(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- ret = submit_bh(WRITE_SYNC_PLUG, bh);
- }
- } else {
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
+ ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh);
+ else
ret = submit_bh(WRITE_SYNC_PLUG, bh);
- }
+
*cbh = bh;
return ret;
}
@@ -166,29 +152,8 @@ static int journal_wait_on_commit_record(journal_t *journal,
{
int ret = 0;
-retry:
clear_buffer_dirty(bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
- printk(KERN_WARNING
- "JBD2: %s: disabling barries on %s - not supported "
- "by device\n", __func__, journal->j_devname);
- write_lock(&journal->j_state_lock);
- journal->j_flags &= ~JBD2_BARRIER;
- write_unlock(&journal->j_state_lock);
-
- lock_buffer(bh);
- clear_buffer_dirty(bh);
- set_buffer_uptodate(bh);
- bh->b_end_io = journal_end_buffer_io_sync;
-
- ret = submit_bh(WRITE_SYNC_PLUG, bh);
- if (ret) {
- unlock_buffer(bh);
- return ret;
- }
- goto retry;
- }
if (unlikely(!buffer_uptodate(bh)))
ret = -EIO;
@@ -360,7 +325,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
int tag_bytes = journal_tag_bytes(journal);
struct buffer_head *cbh = NULL; /* For transactional checksums */
__u32 crc32_sum = ~0;
- int write_op = WRITE;
+ int write_op = WRITE_SYNC;
/*
* First job: lock down the current transaction and wait for
@@ -701,6 +666,16 @@ start_journal_io:
}
}
+ err = journal_finish_inode_data_buffers(journal, commit_transaction);
+ if (err) {
+ printk(KERN_WARNING
+ "JBD2: Detected IO errors while flushing file data "
+ "on %s\n", journal->j_devname);
+ if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
+ jbd2_journal_abort(journal, err);
+ err = 0;
+ }
+
/*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
@@ -709,8 +684,7 @@ start_journal_io:
if (commit_transaction->t_flushed_data_blocks &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
/* Done it all: now write the commit record asynchronously. */
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -719,19 +693,6 @@ start_journal_io:
&cbh, crc32_sum);
if (err)
__jbd2_journal_abort_hard(journal);
- if (journal->j_flags & JBD2_BARRIER)
- blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
- }
-
- err = journal_finish_inode_data_buffers(journal, commit_transaction);
- if (err) {
- printk(KERN_WARNING
- "JBD2: Detected IO errors while flushing file data "
- "on %s\n", journal->j_devname);
- if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
- jbd2_journal_abort(journal, err);
- err = 0;
}
/* Lo and behold: we have just managed to send a transaction to
@@ -845,6 +806,11 @@ wait_for_iobuf:
}
if (!err && !is_journal_aborted(journal))
err = journal_wait_on_commit_record(journal, cbh);
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal,
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
+ journal->j_flags & JBD2_BARRIER) {
+ blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
+ }
if (err)
jbd2_journal_abort(journal, err);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 6b2964a1985..d9beb06e6fc 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -21,7 +21,6 @@
#include <linux/vmalloc.h>
#include <linux/vfs.h>
#include <linux/crc32.h>
-#include <linux/smp_lock.h>
#include "nodelist.h"
static int jffs2_flash_setup(struct jffs2_sb_info *c);
@@ -391,7 +390,6 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
This also catches the case where it was stopped and this
is just a remount to restart it.
Flush the writebuffer, if neccecary, else we loose it */
- lock_kernel();
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_stop_garbage_collect_thread(c);
mutex_lock(&c->alloc_sem);
@@ -403,8 +401,6 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
jffs2_start_garbage_collect_thread(c);
*flags |= MS_NOATIME;
-
- unlock_kernel();
return 0;
}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 662bba09950..d1ae5dfc22b 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -146,6 +145,7 @@ static const struct super_operations jffs2_super_operations =
static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
{
struct jffs2_sb_info *c;
+ int ret;
D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():"
" New superblock for device %d (\"%s\")\n",
@@ -175,7 +175,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
- return jffs2_do_fill_super(sb, data, silent);
+ ret = jffs2_do_fill_super(sb, data, silent);
+ return ret;
}
static int jffs2_get_sb(struct file_system_type *fs_type,
@@ -192,8 +193,6 @@ static void jffs2_put_super (struct super_block *sb)
D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
- lock_kernel();
-
if (sb->s_dirt)
jffs2_write_super(sb);
@@ -215,8 +214,6 @@ static void jffs2_put_super (struct super_block *sb)
if (c->mtd->sync)
c->mtd->sync(c->mtd);
- unlock_kernel();
-
D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index c51af2a1451..e1b8493b9aa 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1010,15 +1010,13 @@ static int lmLogSync(struct jfs_log * log, int hard_sync)
* option 2 - shutdown file systems
* associated with log ?
* option 3 - extend log ?
- */
- /*
* option 4 - second chance
*
* mark log wrapped, and continue.
* when all active transactions are completed,
- * mark log vaild for recovery.
+ * mark log valid for recovery.
* if crashed during invalid state, log state
- * implies invald log, forcing fsck().
+ * implies invalid log, forcing fsck().
*/
/* mark log state log wrap in log superblock */
/* log->state = LOGWRAP; */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index 7b698f2ec45..9895595fd2f 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -97,7 +97,7 @@ int jfs_mount(struct super_block *sb)
ipaimap = diReadSpecial(sb, AGGREGATE_I, 0);
if (ipaimap == NULL) {
- jfs_err("jfs_mount: Faild to read AGGREGATE_I");
+ jfs_err("jfs_mount: Failed to read AGGREGATE_I");
rc = -EIO;
goto errout20;
}
@@ -148,7 +148,7 @@ int jfs_mount(struct super_block *sb)
if ((sbi->mntflag & JFS_BAD_SAIT) == 0) {
ipaimap2 = diReadSpecial(sb, AGGREGATE_I, 1);
if (!ipaimap2) {
- jfs_err("jfs_mount: Faild to read AGGREGATE_I");
+ jfs_err("jfs_mount: Failed to read AGGREGATE_I");
rc = -EIO;
goto errout35;
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index ec8c3e4baca..68eee2bf629 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -33,7 +33,6 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
@@ -176,8 +175,6 @@ static void jfs_put_super(struct super_block *sb)
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- lock_kernel();
-
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
@@ -188,8 +185,6 @@ static void jfs_put_super(struct super_block *sb)
iput(sbi->direct_inode);
kfree(sbi);
-
- unlock_kernel();
}
enum {
@@ -369,19 +364,16 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
if (!parse_options(data, sb, &newLVSize, &flag)) {
return -EINVAL;
}
- lock_kernel();
+
if (newLVSize) {
if (sb->s_flags & MS_RDONLY) {
printk(KERN_ERR
"JFS: resize requires volume to be mounted read-write\n");
- unlock_kernel();
return -EROFS;
}
rc = jfs_extendfs(sb, newLVSize, 0);
- if (rc) {
- unlock_kernel();
+ if (rc)
return rc;
- }
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
@@ -397,36 +389,30 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
/* mark the fs r/w for quota activity */
sb->s_flags &= ~MS_RDONLY;
- unlock_kernel();
dquot_resume(sb, -1);
return ret;
}
if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
rc = dquot_suspend(sb, -1);
if (rc < 0) {
- unlock_kernel();
return rc;
}
rc = jfs_umount_rw(sb);
JFS_SBI(sb)->flag = flag;
- unlock_kernel();
return rc;
}
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
if (!(sb->s_flags & MS_RDONLY)) {
rc = jfs_umount_rw(sb);
- if (rc) {
- unlock_kernel();
+ if (rc)
return rc;
- }
+
JFS_SBI(sb)->flag = flag;
ret = jfs_mount_rw(sb, 1);
- unlock_kernel();
return ret;
}
JFS_SBI(sb)->flag = flag;
- unlock_kernel();
return 0;
}
@@ -446,6 +432,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
+
sb->s_fs_info = sbi;
sbi->sb = sb;
sbi->uid = sbi->gid = sbi->umask = -1;
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 64fd427c993..d5bb86866e6 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -42,6 +42,7 @@ struct nlm_wait {
};
static LIST_HEAD(nlm_blocked);
+static DEFINE_SPINLOCK(nlm_blocked_lock);
/**
* nlmclnt_init - Set up per-NFS mount point lockd data structures
@@ -97,7 +98,10 @@ struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *
block->b_lock = fl;
init_waitqueue_head(&block->b_wait);
block->b_status = nlm_lck_blocked;
+
+ spin_lock(&nlm_blocked_lock);
list_add(&block->b_list, &nlm_blocked);
+ spin_unlock(&nlm_blocked_lock);
}
return block;
}
@@ -106,7 +110,9 @@ void nlmclnt_finish_block(struct nlm_wait *block)
{
if (block == NULL)
return;
+ spin_lock(&nlm_blocked_lock);
list_del(&block->b_list);
+ spin_unlock(&nlm_blocked_lock);
kfree(block);
}
@@ -154,6 +160,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
* Look up blocked request based on arguments.
* Warning: must not use cookie to match it!
*/
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
struct file_lock *fl_blocked = block->b_lock;
@@ -178,6 +185,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock)
wake_up(&block->b_wait);
res = nlm_granted;
}
+ spin_unlock(&nlm_blocked_lock);
return res;
}
@@ -216,10 +224,6 @@ reclaimer(void *ptr)
allow_signal(SIGKILL);
down_write(&host->h_rwsem);
-
- /* This one ensures that our parent doesn't terminate while the
- * reclaim is in progress */
- lock_kernel();
lockd_up(); /* note: this cannot fail as lockd is already running */
dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
@@ -260,16 +264,17 @@ restart:
dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
/* Now, wake up all processes that sleep on a blocked lock */
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (block->b_host == host) {
block->b_status = nlm_lck_denied_grace_period;
wake_up(&block->b_wait);
}
}
+ spin_unlock(&nlm_blocked_lock);
/* Release host handle after use */
nlm_release_host(host);
lockd_down();
- unlock_kernel();
return 0;
}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 7932c399fab..47ea1e1925b 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -166,7 +166,6 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
- lock_kernel();
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
@@ -177,10 +176,8 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
status = nlmclnt_test(call, fl);
else
status = -EINVAL;
-
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
- unlock_kernel();
dprintk("lockd: clnt proc returns %d\n", status);
return status;
@@ -226,9 +223,7 @@ void nlm_release_call(struct nlm_rqst *call)
static void nlmclnt_rpc_release(void *data)
{
- lock_kernel();
nlm_release_call(data);
- unlock_kernel();
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -448,14 +443,18 @@ out:
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
+ spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
+ spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
}
static void nlmclnt_locks_release_private(struct file_lock *fl)
{
+ spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
list_del(&fl->fl_u.nfs_fl.list);
+ spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
nlm_put_lockowner(fl->fl_u.nfs_fl.owner);
}
@@ -721,9 +720,7 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
die:
return;
retry_rebind:
- lock_kernel();
nlm_rebind_host(req->a_host);
- unlock_kernel();
retry_unlock:
rpc_restart_call(task);
}
@@ -801,9 +798,7 @@ retry_cancel:
/* Don't ever retry more than 3 times */
if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
goto die;
- lock_kernel();
nlm_rebind_host(req->a_host);
- unlock_kernel();
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index bb464d12104..25e21e4023b 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -353,6 +353,7 @@ nlm_bind_host(struct nlm_host *host)
.to_retries = 5U,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = host->h_proto,
.address = nlm_addr(host),
.addrsize = host->h_addrlen,
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e3015464fba..e0c91894964 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -69,6 +69,7 @@ static struct rpc_clnt *nsm_create(void)
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = XPRT_TRANSPORT_UDP,
.address = (struct sockaddr *)&sin,
.addrsize = sizeof(sin),
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index f1bacf1a039..b13aabc1229 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -206,7 +206,7 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
xprt = svc_find_xprt(serv, name, family, 0);
if (xprt == NULL)
- return svc_create_xprt(serv, name, family, port,
+ return svc_create_xprt(serv, name, &init_net, family, port,
SVC_SOCK_DEFAULTS);
svc_xprt_put(xprt);
return 0;
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 031c6569a13..a336e832475 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -230,9 +230,7 @@ static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
static void nlm4svc_callback_release(void *data)
{
- lock_kernel();
nlm_release_call(data);
- unlock_kernel();
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 84055d31bfc..6f1ef000975 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -52,12 +52,13 @@ static const struct rpc_call_ops nlmsvc_grant_ops;
* The list of blocked locks to retry
*/
static LIST_HEAD(nlm_blocked);
+static DEFINE_SPINLOCK(nlm_blocked_lock);
/*
* Insert a blocked lock into the global list
*/
static void
-nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when)
{
struct nlm_block *b;
struct list_head *pos;
@@ -87,6 +88,13 @@ nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
block->b_when = when;
}
+static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
+{
+ spin_lock(&nlm_blocked_lock);
+ nlmsvc_insert_block_locked(block, when);
+ spin_unlock(&nlm_blocked_lock);
+}
+
/*
* Remove a block from the global list
*/
@@ -94,7 +102,9 @@ static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
if (!list_empty(&block->b_list)) {
+ spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
+ spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
}
}
@@ -651,7 +661,7 @@ static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
struct nlm_block *block;
int rc = -ENOENT;
- lock_kernel();
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
@@ -665,13 +675,13 @@ static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
} else if (result == 0)
block->b_granted = 1;
- nlmsvc_insert_block(block, 0);
+ nlmsvc_insert_block_locked(block, 0);
svc_wake_up(block->b_daemon);
rc = 0;
break;
}
}
- unlock_kernel();
+ spin_unlock(&nlm_blocked_lock);
if (rc == -ENOENT)
printk(KERN_WARNING "lockd: grant for unknown block\n");
return rc;
@@ -803,7 +813,7 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
dprintk("lockd: GRANT_MSG RPC callback\n");
- lock_kernel();
+ spin_lock(&nlm_blocked_lock);
/* if the block is not on a list at this point then it has
* been invalidated. Don't try to requeue it.
*
@@ -825,19 +835,20 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
}
- nlmsvc_insert_block(block, timeout);
+ nlmsvc_insert_block_locked(block, timeout);
svc_wake_up(block->b_daemon);
out:
- unlock_kernel();
+ spin_unlock(&nlm_blocked_lock);
}
+/*
+ * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an
+ * .rpc_release rpc_call_op
+ */
static void nlmsvc_grant_release(void *data)
{
struct nlm_rqst *call = data;
-
- lock_kernel();
nlmsvc_release_block(call->a_block);
- unlock_kernel();
}
static const struct rpc_call_ops nlmsvc_grant_ops = {
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0f2ab741ae7..c3069f38d60 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -260,9 +260,7 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
static void nlmsvc_callback_release(void *data)
{
- lock_kernel();
nlm_release_call(data);
- unlock_kernel();
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
diff --git a/fs/locks.c b/fs/locks.c
index ab24d49fc04..8b2b6ad56a0 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -143,6 +143,22 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
+/*
+ * Protects the two list heads above, plus the inode->i_flock list
+ * FIXME: should use a spinlock, once lockd and ceph are ready.
+ */
+void lock_flocks(void)
+{
+ lock_kernel();
+}
+EXPORT_SYMBOL_GPL(lock_flocks);
+
+void unlock_flocks(void)
+{
+ unlock_kernel();
+}
+EXPORT_SYMBOL_GPL(unlock_flocks);
+
static struct kmem_cache *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
@@ -511,9 +527,9 @@ static void __locks_delete_block(struct file_lock *waiter)
*/
static void locks_delete_block(struct file_lock *waiter)
{
- lock_kernel();
+ lock_flocks();
__locks_delete_block(waiter);
- unlock_kernel();
+ unlock_flocks();
}
/* Insert waiter into blocker's block list.
@@ -644,7 +660,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
- lock_kernel();
+ lock_flocks();
for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
if (!IS_POSIX(cfl))
continue;
@@ -657,7 +673,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
fl->fl_pid = pid_vnr(cfl->fl_nspid);
} else
fl->fl_type = F_UNLCK;
- unlock_kernel();
+ unlock_flocks();
return;
}
EXPORT_SYMBOL(posix_test_lock);
@@ -730,18 +746,16 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
int error = 0;
int found = 0;
- lock_kernel();
- if (request->fl_flags & FL_ACCESS)
- goto find_conflict;
-
- if (request->fl_type != F_UNLCK) {
- error = -ENOMEM;
+ if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
- error = 0;
+ if (!new_fl)
+ return -ENOMEM;
}
+ lock_flocks();
+ if (request->fl_flags & FL_ACCESS)
+ goto find_conflict;
+
for_each_lock(inode, before) {
struct file_lock *fl = *before;
if (IS_POSIX(fl))
@@ -767,8 +781,11 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
* If a higher-priority process was blocked on the old file lock,
* give it the opportunity to lock the file.
*/
- if (found)
+ if (found) {
+ unlock_flocks();
cond_resched();
+ lock_flocks();
+ }
find_conflict:
for_each_lock(inode, before) {
@@ -794,7 +811,7 @@ find_conflict:
error = 0;
out:
- unlock_kernel();
+ unlock_flocks();
if (new_fl)
locks_free_lock(new_fl);
return error;
@@ -823,7 +840,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
new_fl2 = locks_alloc_lock();
}
- lock_kernel();
+ lock_flocks();
if (request->fl_type != F_UNLCK) {
for_each_lock(inode, before) {
fl = *before;
@@ -991,7 +1008,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_wake_up_blocks(left);
}
out:
- unlock_kernel();
+ unlock_flocks();
/*
* Free any unused locks.
*/
@@ -1066,14 +1083,14 @@ int locks_mandatory_locked(struct inode *inode)
/*
* Search the lock list for this inode for any POSIX locks.
*/
- lock_kernel();
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!IS_POSIX(fl))
continue;
if (fl->fl_owner != owner)
break;
}
- unlock_kernel();
+ unlock_flocks();
return fl ? -EAGAIN : 0;
}
@@ -1186,7 +1203,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
- lock_kernel();
+ lock_flocks();
time_out_leases(inode);
@@ -1247,8 +1264,10 @@ restart:
break_time++;
}
locks_insert_block(flock, new_fl);
+ unlock_flocks();
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
+ lock_flocks();
__locks_delete_block(new_fl);
if (error >= 0) {
if (error == 0)
@@ -1263,7 +1282,7 @@ restart:
}
out:
- unlock_kernel();
+ unlock_flocks();
if (!IS_ERR(new_fl))
locks_free_lock(new_fl);
return error;
@@ -1319,7 +1338,7 @@ int fcntl_getlease(struct file *filp)
struct file_lock *fl;
int type = F_UNLCK;
- lock_kernel();
+ lock_flocks();
time_out_leases(filp->f_path.dentry->d_inode);
for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
fl = fl->fl_next) {
@@ -1328,7 +1347,7 @@ int fcntl_getlease(struct file *filp)
break;
}
}
- unlock_kernel();
+ unlock_flocks();
return type;
}
@@ -1341,7 +1360,7 @@ int fcntl_getlease(struct file *filp)
* The (input) flp->fl_lmops->fl_break function is required
* by break_lease().
*
- * Called with kernel lock held.
+ * Called with file_lock_lock held.
*/
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
@@ -1436,7 +1455,15 @@ out:
}
EXPORT_SYMBOL(generic_setlease);
- /**
+static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
+{
+ if (filp->f_op && filp->f_op->setlease)
+ return filp->f_op->setlease(filp, arg, lease);
+ else
+ return generic_setlease(filp, arg, lease);
+}
+
+/**
* vfs_setlease - sets a lease on an open file
* @filp: file pointer
* @arg: type of lease to obtain
@@ -1467,12 +1494,9 @@ int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
{
int error;
- lock_kernel();
- if (filp->f_op && filp->f_op->setlease)
- error = filp->f_op->setlease(filp, arg, lease);
- else
- error = generic_setlease(filp, arg, lease);
- unlock_kernel();
+ lock_flocks();
+ error = __vfs_setlease(filp, arg, lease);
+ unlock_flocks();
return error;
}
@@ -1499,9 +1523,9 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
if (error)
return error;
- lock_kernel();
+ lock_flocks();
- error = vfs_setlease(filp, arg, &flp);
+ error = __vfs_setlease(filp, arg, &flp);
if (error || arg == F_UNLCK)
goto out_unlock;
@@ -1516,7 +1540,7 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
out_unlock:
- unlock_kernel();
+ unlock_flocks();
return error;
}
@@ -2020,7 +2044,7 @@ void locks_remove_flock(struct file *filp)
fl.fl_ops->fl_release_private(&fl);
}
- lock_kernel();
+ lock_flocks();
before = &inode->i_flock;
while ((fl = *before) != NULL) {
@@ -2038,7 +2062,7 @@ void locks_remove_flock(struct file *filp)
}
before = &fl->fl_next;
}
- unlock_kernel();
+ unlock_flocks();
}
/**
@@ -2053,12 +2077,12 @@ posix_unblock_lock(struct file *filp, struct file_lock *waiter)
{
int status = 0;
- lock_kernel();
+ lock_flocks();
if (waiter->fl_next)
__locks_delete_block(waiter);
else
status = -ENOENT;
- unlock_kernel();
+ unlock_flocks();
return status;
}
@@ -2172,7 +2196,7 @@ static int locks_show(struct seq_file *f, void *v)
static void *locks_start(struct seq_file *f, loff_t *pos)
{
- lock_kernel();
+ lock_flocks();
f->private = (void *)1;
return seq_list_start(&file_lock_list, *pos);
}
@@ -2184,7 +2208,7 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
static void locks_stop(struct seq_file *f, void *v)
{
- unlock_kernel();
+ unlock_flocks();
}
static const struct seq_operations locks_seq_operations = {
@@ -2231,7 +2255,7 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
{
struct file_lock *fl;
int result = 1;
- lock_kernel();
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (IS_POSIX(fl)) {
if (fl->fl_type == F_RDLCK)
@@ -2248,7 +2272,7 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
result = 0;
break;
}
- unlock_kernel();
+ unlock_flocks();
return result;
}
@@ -2271,7 +2295,7 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
{
struct file_lock *fl;
int result = 1;
- lock_kernel();
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (IS_POSIX(fl)) {
if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
@@ -2286,7 +2310,7 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
result = 0;
break;
}
- unlock_kernel();
+ unlock_flocks();
return result;
}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9777eb5b552..1eb4e89e045 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -827,4 +827,5 @@ const struct file_operations logfs_dir_fops = {
.unlocked_ioctl = logfs_ioctl,
.readdir = logfs_readdir,
.read = generic_read_dir,
+ .llseek = default_llseek,
};
diff --git a/fs/namespace.c b/fs/namespace.c
index a72eaabfe8f..7ca5182c0be 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1744,9 +1744,7 @@ static int do_new_mount(struct path *path, char *type, int flags,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- lock_kernel();
mnt = do_kern_mount(type, flags, name, data);
- unlock_kernel();
if (IS_ERR(mnt))
return PTR_ERR(mnt);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 9578cbe0cd5..aac8832e919 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -95,6 +95,34 @@ const struct dentry_operations ncp_root_dentry_operations =
};
+#define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
+
+static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
+{
+#ifdef CONFIG_NCPFS_SMALLDOS
+ int ns = ncp_namespace(i);
+
+ if ((ns == NW_NS_DOS)
+#ifdef CONFIG_NCPFS_OS2_NS
+ || ((ns == NW_NS_OS2) && (nscreator == NW_NS_DOS))
+#endif /* CONFIG_NCPFS_OS2_NS */
+ )
+ return 0;
+#endif /* CONFIG_NCPFS_SMALLDOS */
+ return 1;
+}
+
+#define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS)
+
+static inline int ncp_case_sensitive(struct dentry *dentry)
+{
+#ifdef CONFIG_NCPFS_NFS_NS
+ return ncp_namespace(dentry->d_inode) == NW_NS_NFS;
+#else
+ return 0;
+#endif /* CONFIG_NCPFS_NFS_NS */
+}
+
/*
* Note: leave the hash unchanged if the directory
* is case-sensitive.
@@ -102,13 +130,12 @@ const struct dentry_operations ncp_root_dentry_operations =
static int
ncp_hash_dentry(struct dentry *dentry, struct qstr *this)
{
- struct nls_table *t;
- unsigned long hash;
- int i;
-
- t = NCP_IO_TABLE(dentry);
+ if (!ncp_case_sensitive(dentry)) {
+ struct nls_table *t;
+ unsigned long hash;
+ int i;
- if (!ncp_case_sensitive(dentry->d_inode)) {
+ t = NCP_IO_TABLE(dentry);
hash = init_name_hash();
for (i=0; i<this->len ; i++)
hash = partial_name_hash(ncp_tolower(t, this->name[i]),
@@ -124,7 +151,7 @@ ncp_compare_dentry(struct dentry *dentry, struct qstr *a, struct qstr *b)
if (a->len != b->len)
return 1;
- if (ncp_case_sensitive(dentry->d_inode))
+ if (ncp_case_sensitive(dentry))
return strncmp(a->name, b->name, a->len);
return ncp_strnicmp(NCP_IO_TABLE(dentry), a->name, b->name, a->len);
@@ -266,7 +293,7 @@ leave_me:;
static int
-__ncp_lookup_validate(struct dentry *dentry)
+ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
{
struct ncp_server *server;
struct dentry *parent;
@@ -283,9 +310,6 @@ __ncp_lookup_validate(struct dentry *dentry)
server = NCP_SERVER(dir);
- if (!ncp_conn_valid(server))
- goto finished;
-
/*
* Inspired by smbfs:
* The default validation is based on dentry age:
@@ -304,8 +328,11 @@ __ncp_lookup_validate(struct dentry *dentry)
if (ncp_is_server_root(dir)) {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, 1);
- if (!res)
+ if (!res) {
res = ncp_lookup_volume(server, __name, &(finfo.i));
+ if (!res)
+ ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
+ }
} else {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
@@ -320,13 +347,17 @@ __ncp_lookup_validate(struct dentry *dentry)
* what we remember, it's not valid any more.
*/
if (!res) {
- if (finfo.i.dirEntNum == NCP_FINFO(dentry->d_inode)->dirEntNum) {
+ struct inode *inode = dentry->d_inode;
+
+ mutex_lock(&inode->i_mutex);
+ if (finfo.i.dirEntNum == NCP_FINFO(inode)->dirEntNum) {
ncp_new_dentry(dentry);
val=1;
} else
DDPRINTK("ncp_lookup_validate: found, but dirEntNum changed\n");
- ncp_update_inode2(dentry->d_inode, &finfo);
+ ncp_update_inode2(inode, &finfo);
+ mutex_unlock(&inode->i_mutex);
}
finished:
@@ -335,16 +366,6 @@ finished:
return val;
}
-static int
-ncp_lookup_validate(struct dentry * dentry, struct nameidata *nd)
-{
- int res;
- lock_kernel();
- res = __ncp_lookup_validate(dentry);
- unlock_kernel();
- return res;
-}
-
static struct dentry *
ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
{
@@ -411,8 +432,6 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
int result, mtime_valid = 0;
time_t mtime = 0;
- lock_kernel();
-
ctl.page = NULL;
ctl.cache = NULL;
@@ -421,6 +440,7 @@ static int ncp_readdir(struct file *filp, void *dirent, filldir_t filldir)
(int) filp->f_pos);
result = -EIO;
+ /* Do not generate '.' and '..' when server is dead. */
if (!ncp_conn_valid(server))
goto out;
@@ -532,6 +552,12 @@ read_really:
ctl.head.end = ctl.fpos - 1;
ctl.head.eof = ctl.valid;
finished:
+ if (ctl.page) {
+ kunmap(ctl.page);
+ SetPageUptodate(ctl.page);
+ unlock_page(ctl.page);
+ page_cache_release(ctl.page);
+ }
if (page) {
cache->head = ctl.head;
kunmap(page);
@@ -539,23 +565,17 @@ finished:
unlock_page(page);
page_cache_release(page);
}
- if (ctl.page) {
- kunmap(ctl.page);
- SetPageUptodate(ctl.page);
- unlock_page(ctl.page);
- page_cache_release(ctl.page);
- }
out:
- unlock_kernel();
return result;
}
static int
ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
- struct ncp_cache_control *ctrl, struct ncp_entry_info *entry)
+ struct ncp_cache_control *ctrl, struct ncp_entry_info *entry,
+ int inval_childs)
{
struct dentry *newdent, *dentry = filp->f_path.dentry;
- struct inode *newino, *inode = dentry->d_inode;
+ struct inode *dir = dentry->d_inode;
struct ncp_cache_control ctl = *ctrl;
struct qstr qname;
int valid = 0;
@@ -564,9 +584,9 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
__u8 __name[NCP_MAXPATHLEN + 1];
qname.len = sizeof(__name);
- if (ncp_vol2io(NCP_SERVER(inode), __name, &qname.len,
+ if (ncp_vol2io(NCP_SERVER(dir), __name, &qname.len,
entry->i.entryName, entry->i.nameLen,
- !ncp_preserve_entry_case(inode, entry->i.NSCreator)))
+ !ncp_preserve_entry_case(dir, entry->i.NSCreator)))
return 1; /* I'm not sure */
qname.name = __name;
@@ -584,22 +604,64 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
goto end_advance;
} else {
hashed = 1;
- memcpy((char *) newdent->d_name.name, qname.name,
- newdent->d_name.len);
+
+ /* If case sensitivity changed for this volume, all entries below this one
+ should be thrown away. This entry itself is not affected, as its case
+ sensitivity is controlled by its own parent. */
+ if (inval_childs)
+ shrink_dcache_parent(newdent);
+
+ /*
+ * It is not as dangerous as it looks. NetWare's OS2 namespace is
+ * case preserving yet case insensitive. So we update dentry's name
+ * as received from server. We found dentry via d_lookup with our
+ * hash, so we know that hash does not change, and so replacing name
+ * should be reasonably safe.
+ */
+ if (qname.len == newdent->d_name.len &&
+ memcmp(newdent->d_name.name, qname.name, newdent->d_name.len)) {
+ struct inode *inode = newdent->d_inode;
+
+ /*
+ * Inside ncpfs all uses of d_name are either for debugging,
+ * or on functions which acquire inode mutex (mknod, creat,
+ * lookup). So grab i_mutex here, to be sure. d_path
+ * uses dcache_lock when generating path, so we should too.
+ * And finally d_compare is protected by dentry's d_lock, so
+ * here we go.
+ */
+ if (inode)
+ mutex_lock(&inode->i_mutex);
+ spin_lock(&dcache_lock);
+ spin_lock(&newdent->d_lock);
+ memcpy((char *) newdent->d_name.name, qname.name,
+ newdent->d_name.len);
+ spin_unlock(&newdent->d_lock);
+ spin_unlock(&dcache_lock);
+ if (inode)
+ mutex_unlock(&inode->i_mutex);
+ }
}
if (!newdent->d_inode) {
+ struct inode *inode;
+
entry->opened = 0;
- entry->ino = iunique(inode->i_sb, 2);
- newino = ncp_iget(inode->i_sb, entry);
- if (newino) {
+ entry->ino = iunique(dir->i_sb, 2);
+ inode = ncp_iget(dir->i_sb, entry);
+ if (inode) {
newdent->d_op = &ncp_dentry_operations;
- d_instantiate(newdent, newino);
+ d_instantiate(newdent, inode);
if (!hashed)
d_rehash(newdent);
}
- } else
- ncp_update_inode2(newdent->d_inode, entry);
+ } else {
+ struct inode *inode = newdent->d_inode;
+
+ mutex_lock(&inode->i_mutex);
+ ncp_update_inode2(inode, entry);
+ mutex_unlock(&inode->i_mutex);
+ }
if (newdent->d_inode) {
ino = newdent->d_inode->i_ino;
@@ -617,7 +679,7 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
ctl.cache = NULL;
ctl.idx -= NCP_DIRCACHE_SIZE;
ctl.ofs += 1;
- ctl.page = grab_cache_page(&inode->i_data, ctl.ofs);
+ ctl.page = grab_cache_page(&dir->i_data, ctl.ofs);
if (ctl.page)
ctl.cache = kmap(ctl.page);
}
@@ -633,7 +695,7 @@ end_advance:
if (!ino)
ino = find_inode_number(dentry, &qname);
if (!ino)
- ino = iunique(inode->i_sb, 2);
+ ino = iunique(dir->i_sb, 2);
ctl.filled = filldir(dirent, qname.name, qname.len,
filp->f_pos, ino, DT_UNKNOWN);
if (!ctl.filled)
@@ -660,6 +722,7 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
(unsigned long) filp->f_pos);
for (i = 0; i < NCP_NUMBER_OF_VOLUMES; i++) {
+ int inval_dentry;
if (ncp_get_volume_info_with_number(server, i, &info) != 0)
return;
@@ -675,8 +738,9 @@ ncp_read_volume_list(struct file *filp, void *dirent, filldir_t filldir,
info.volume_name);
continue;
}
+ inval_dentry = ncp_update_known_namespace(server, entry.i.volNumber, NULL);
entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry))
+ if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, inval_dentry))
return;
}
}
@@ -739,7 +803,7 @@ ncp_do_readdir(struct file *filp, void *dirent, filldir_t filldir,
rpl += onerpl;
rpls -= onerpl;
entry.volume = entry.i.volNumber;
- if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry))
+ if (!ncp_fill_cache(filp, dirent, filldir, ctl, &entry, 0))
break;
}
} while (more);
@@ -775,17 +839,19 @@ int ncp_conn_logged_in(struct super_block *sb)
if (dent) {
struct inode* ino = dent->d_inode;
if (ino) {
+ ncp_update_known_namespace(server, volNumber, NULL);
NCP_FINFO(ino)->volNumber = volNumber;
NCP_FINFO(ino)->dirEntNum = dirEntNum;
NCP_FINFO(ino)->DosDirNum = DosDirNum;
+ result = 0;
} else {
DPRINTK("ncpfs: sb->s_root->d_inode == NULL!\n");
}
} else {
DPRINTK("ncpfs: sb->s_root == NULL!\n");
}
- }
- result = 0;
+ } else
+ result = 0;
out:
return result;
@@ -799,7 +865,6 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
int error, res, len;
__u8 __name[NCP_MAXPATHLEN + 1];
- lock_kernel();
error = -EIO;
if (!ncp_conn_valid(server))
goto finished;
@@ -813,6 +878,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
dentry->d_name.len, 1);
if (!res)
res = ncp_lookup_volume(server, __name, &(finfo.i));
+ if (!res)
+ ncp_update_known_namespace(server, finfo.i.volNumber, NULL);
} else {
res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
dentry->d_name.len, !ncp_preserve_case(dir));
@@ -846,7 +913,6 @@ add_entry:
finished:
PPRINTK("ncp_lookup: result=%d\n", error);
- unlock_kernel();
return ERR_PTR(error);
}
@@ -887,11 +953,6 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
dentry->d_parent->d_name.name, dentry->d_name.name, mode);
- error = -EIO;
- lock_kernel();
- if (!ncp_conn_valid(server))
- goto out;
-
ncp_age_dentry(server, dentry);
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -917,6 +978,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
if (result) {
if (result == 0x87)
error = -ENAMETOOLONG;
+ else if (result < 0)
+ error = result;
DPRINTK("ncp_create: %s/%s failed\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
goto out;
@@ -935,7 +998,6 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
error = ncp_instantiate(dir, dentry, &finfo);
out:
- unlock_kernel();
return error;
}
@@ -955,11 +1017,6 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
DPRINTK("ncp_mkdir: making %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- error = -EIO;
- lock_kernel();
- if (!ncp_conn_valid(server))
- goto out;
-
ncp_age_dentry(server, dentry);
len = sizeof(__name);
error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -967,12 +1024,11 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
if (error)
goto out;
- error = -EACCES;
- if (ncp_open_create_file_or_subdir(server, dir, __name,
+ error = ncp_open_create_file_or_subdir(server, dir, __name,
OC_MODE_CREATE, aDIR,
cpu_to_le16(0xffff),
- &finfo) == 0)
- {
+ &finfo);
+ if (error == 0) {
if (ncp_is_nfs_extras(server, finfo.volume)) {
mode |= S_IFDIR;
finfo.i.nfs.mode = mode;
@@ -983,9 +1039,10 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
goto out;
}
error = ncp_instantiate(dir, dentry, &finfo);
+ } else if (error > 0) {
+ error = -EACCES;
}
out:
- unlock_kernel();
return error;
}
@@ -998,11 +1055,6 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
DPRINTK("ncp_rmdir: removing %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- error = -EIO;
- lock_kernel();
- if (!ncp_conn_valid(server))
- goto out;
-
error = -EBUSY;
if (!d_unhashed(dentry))
goto out;
@@ -1036,11 +1088,10 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
error = -ENOENT;
break;
default:
- error = -EACCES;
+ error = result < 0 ? result : -EACCES;
break;
}
out:
- unlock_kernel();
return error;
}
@@ -1050,15 +1101,10 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
struct ncp_server *server;
int error;
- lock_kernel();
server = NCP_SERVER(dir);
DPRINTK("ncp_unlink: unlinking %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- error = -EIO;
- if (!ncp_conn_valid(server))
- goto out;
-
/*
* Check whether to close the file ...
*/
@@ -1097,12 +1143,9 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
error = -ENOENT;
break;
default:
- error = -EACCES;
+ error = error < 0 ? error : -EACCES;
break;
}
-
-out:
- unlock_kernel();
return error;
}
@@ -1118,11 +1161,6 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
- error = -EIO;
- lock_kernel();
- if (!ncp_conn_valid(server))
- goto out;
-
ncp_age_dentry(server, old_dentry);
ncp_age_dentry(server, new_dentry);
@@ -1161,11 +1199,10 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
error = -ENOENT;
break;
default:
- error = -EACCES;
+ error = error < 0 ? error : -EACCES;
break;
}
out:
- unlock_kernel();
return error;
}
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 3639cc5cbda..6c754f70c52 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -113,9 +113,6 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
DPRINTK("ncp_file_read: enter %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- if (!ncp_conn_valid(NCP_SERVER(inode)))
- return -EIO;
-
pos = *ppos;
if ((ssize_t) count < 0) {
@@ -192,13 +189,11 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
DPRINTK("ncp_file_write: enter %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
- if (!ncp_conn_valid(NCP_SERVER(inode)))
- return -EIO;
if ((ssize_t) count < 0)
return -EINVAL;
pos = *ppos;
if (file->f_flags & O_APPEND) {
- pos = inode->i_size;
+ pos = i_size_read(inode);
}
if (pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
@@ -264,8 +259,11 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
*ppos = pos;
- if (pos > inode->i_size) {
- inode->i_size = pos;
+ if (pos > i_size_read(inode)) {
+ mutex_lock(&inode->i_mutex);
+ if (pos > i_size_read(inode))
+ i_size_write(inode, pos);
+ mutex_unlock(&inode->i_mutex);
}
DPRINTK("ncp_file_write: exit %s/%s\n",
dentry->d_parent->d_name.name, dentry->d_name.name);
@@ -281,18 +279,9 @@ static int ncp_release(struct inode *inode, struct file *file) {
return 0;
}
-static loff_t ncp_remote_llseek(struct file *file, loff_t offset, int origin)
-{
- loff_t ret;
- lock_kernel();
- ret = generic_file_llseek_unlocked(file, offset, origin);
- unlock_kernel();
- return ret;
-}
-
const struct file_operations ncp_file_operations =
{
- .llseek = ncp_remote_llseek,
+ .llseek = generic_file_llseek,
.read = ncp_file_read,
.write = ncp_file_write,
.unlocked_ioctl = ncp_ioctl,
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index b4de38cf49f..985fabb26ac 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -139,7 +139,7 @@ static void ncp_update_dates(struct inode *inode, struct nw_info_struct *nwi)
inode->i_mode = nwi->nfs.mode;
}
- inode->i_blocks = (inode->i_size + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT;
+ inode->i_blocks = (i_size_read(inode) + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT;
inode->i_mtime.tv_sec = ncp_date_dos2unix(nwi->modifyTime, nwi->modifyDate);
inode->i_ctime.tv_sec = ncp_date_dos2unix(nwi->creationTime, nwi->creationDate);
@@ -158,18 +158,21 @@ static void ncp_update_attrs(struct inode *inode, struct ncp_entry_info *nwinfo)
inode->i_mode = server->m.dir_mode;
/* for directories dataStreamSize seems to be some
Object ID ??? */
- inode->i_size = NCP_BLOCK_SIZE;
+ i_size_write(inode, NCP_BLOCK_SIZE);
} else {
+ u32 size;
+
inode->i_mode = server->m.file_mode;
- inode->i_size = le32_to_cpu(nwi->dataStreamSize);
+ size = le32_to_cpu(nwi->dataStreamSize);
+ i_size_write(inode, size);
#ifdef CONFIG_NCPFS_EXTRAS
if ((server->m.flags & (NCP_MOUNT_EXTRAS|NCP_MOUNT_SYMLINKS))
&& (nwi->attributes & aSHARED)) {
switch (nwi->attributes & (aHIDDEN|aSYSTEM)) {
case aHIDDEN:
if (server->m.flags & NCP_MOUNT_SYMLINKS) {
- if (/* (inode->i_size >= NCP_MIN_SYMLINK_SIZE)
- && */ (inode->i_size <= NCP_MAX_SYMLINK_SIZE)) {
+ if (/* (size >= NCP_MIN_SYMLINK_SIZE)
+ && */ (size <= NCP_MAX_SYMLINK_SIZE)) {
inode->i_mode = (inode->i_mode & ~S_IFMT) | S_IFLNK;
NCP_FINFO(inode)->flags |= NCPI_KLUDGE_SYMLINK;
break;
@@ -208,7 +211,7 @@ void ncp_update_inode2(struct inode* inode, struct ncp_entry_info *nwinfo)
}
/*
- * Fill in the inode based on the ncp_entry_info structure.
+ * Fill in the inode based on the ncp_entry_info structure. Used only for brand new inodes.
*/
static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo)
{
@@ -254,6 +257,7 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
if (inode) {
atomic_set(&NCP_FINFO(inode)->opened, info->opened);
+ inode->i_mapping->backing_dev_info = sb->s_bdi;
inode->i_ino = info->ino;
ncp_set_attr(inode, info);
if (S_ISREG(inode->i_mode)) {
@@ -299,10 +303,12 @@ ncp_evict_inode(struct inode *inode)
static void ncp_stop_tasks(struct ncp_server *server) {
struct sock* sk = server->ncp_sock->sk;
-
+
+ lock_sock(sk);
sk->sk_error_report = server->error_report;
sk->sk_data_ready = server->data_ready;
sk->sk_write_space = server->write_space;
+ release_sock(sk);
del_timer_sync(&server->timeout_tm);
flush_scheduled_work();
}
@@ -565,10 +571,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
/* server->conn_status = 0; */
/* server->root_dentry = NULL; */
/* server->root_setuped = 0; */
+ mutex_init(&server->root_setup_lock);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
/* server->sign_wanted = 0; */
/* server->sign_active = 0; */
#endif
+ init_rwsem(&server->auth_rwsem);
server->auth.auth_type = NCP_AUTH_NONE;
/* server->auth.object_name_len = 0; */
/* server->auth.object_name = NULL; */
@@ -593,16 +601,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
server->nls_io = load_nls_default();
#endif /* CONFIG_NCPFS_NLS */
- server->dentry_ttl = 0; /* no caching */
+ atomic_set(&server->dentry_ttl, 0); /* no caching */
INIT_LIST_HEAD(&server->tx.requests);
mutex_init(&server->rcv.creq_mutex);
server->tx.creq = NULL;
server->rcv.creq = NULL;
- server->data_ready = sock->sk->sk_data_ready;
- server->write_space = sock->sk->sk_write_space;
- server->error_report = sock->sk->sk_error_report;
- sock->sk->sk_user_data = server;
init_timer(&server->timeout_tm);
#undef NCP_PACKET_SIZE
@@ -619,6 +623,11 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
if (server->rxbuf == NULL)
goto out_txbuf;
+ lock_sock(sock->sk);
+ server->data_ready = sock->sk->sk_data_ready;
+ server->write_space = sock->sk->sk_write_space;
+ server->error_report = sock->sk->sk_error_report;
+ sock->sk->sk_user_data = server;
sock->sk->sk_data_ready = ncp_tcp_data_ready;
sock->sk->sk_error_report = ncp_tcp_error_report;
if (sock->type == SOCK_STREAM) {
@@ -634,6 +643,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
server->timeout_tm.data = (unsigned long)server;
server->timeout_tm.function = ncpdgram_timeout_call;
}
+ release_sock(sock->sk);
ncp_lock_server(server);
error = ncp_connect(server);
@@ -658,8 +668,10 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
goto out_disconnect;
}
}
+ ncp_lock_server(server);
if (options & 2)
server->sign_wanted = 1;
+ ncp_unlock_server(server);
}
else
#endif /* CONFIG_NCPFS_PACKET_SIGNING */
@@ -720,6 +732,9 @@ out_nls:
unload_nls(server->nls_io);
unload_nls(server->nls_vol);
#endif
+ mutex_destroy(&server->rcv.creq_mutex);
+ mutex_destroy(&server->root_setup_lock);
+ mutex_destroy(&server->mutex);
out_fput2:
if (server->info_filp)
fput(server->info_filp);
@@ -743,8 +758,6 @@ static void ncp_put_super(struct super_block *sb)
{
struct ncp_server *server = NCP_SBP(sb);
- lock_kernel();
-
ncp_lock_server(server);
ncp_disconnect(server);
ncp_unlock_server(server);
@@ -756,6 +769,9 @@ static void ncp_put_super(struct super_block *sb)
unload_nls(server->nls_vol);
unload_nls(server->nls_io);
#endif /* CONFIG_NCPFS_NLS */
+ mutex_destroy(&server->rcv.creq_mutex);
+ mutex_destroy(&server->root_setup_lock);
+ mutex_destroy(&server->mutex);
if (server->info_filp)
fput(server->info_filp);
@@ -771,8 +787,6 @@ static void ncp_put_super(struct super_block *sb)
vfree(server->packet);
sb->s_fs_info = NULL;
kfree(server);
-
- unlock_kernel();
}
static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -851,10 +865,8 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
result = -EIO;
- lock_kernel();
-
server = NCP_SERVER(inode);
- if ((!server) || !ncp_conn_valid(server))
+ if (!server) /* How this could happen? */
goto out;
/* ageing the dentry to force validation */
@@ -981,8 +993,6 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
result = ncp_modify_file_or_subdir_dos_info(NCP_SERVER(inode),
inode, info_mask, &info);
if (result != 0) {
- result = -EACCES;
-
if (info_mask == (DM_CREATE_TIME | DM_CREATE_DATE)) {
/* NetWare seems not to allow this. I
do not know why. So, just tell the
@@ -1005,7 +1015,8 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
mark_inode_dirty(inode);
out:
- unlock_kernel();
+ if (result > 0)
+ result = -EACCES;
return result;
}
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 84a8cfc4e38..c2a1f9a155c 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -35,16 +35,11 @@
#define NCP_PACKET_SIZE_INTERNAL 65536
static int
-ncp_get_fs_info(struct ncp_server * server, struct file *file,
+ncp_get_fs_info(struct ncp_server * server, struct inode *inode,
struct ncp_fs_info __user *arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
struct ncp_fs_info info;
- if (file_permission(file, MAY_WRITE) != 0
- && current_uid() != server->m.mounted_uid)
- return -EACCES;
-
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
@@ -65,16 +60,11 @@ ncp_get_fs_info(struct ncp_server * server, struct file *file,
}
static int
-ncp_get_fs_info_v2(struct ncp_server * server, struct file *file,
+ncp_get_fs_info_v2(struct ncp_server * server, struct inode *inode,
struct ncp_fs_info_v2 __user * arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
struct ncp_fs_info_v2 info2;
- if (file_permission(file, MAY_WRITE) != 0
- && current_uid() != server->m.mounted_uid)
- return -EACCES;
-
if (copy_from_user(&info2, arg, sizeof(info2)))
return -EFAULT;
@@ -136,16 +126,11 @@ struct compat_ncp_privatedata_ioctl
#define NCP_IOC_SETPRIVATEDATA_32 _IOR('n', 10, struct compat_ncp_privatedata_ioctl)
static int
-ncp_get_compat_fs_info_v2(struct ncp_server * server, struct file *file,
+ncp_get_compat_fs_info_v2(struct ncp_server * server, struct inode *inode,
struct compat_ncp_fs_info_v2 __user * arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
struct compat_ncp_fs_info_v2 info2;
- if (file_permission(file, MAY_WRITE) != 0
- && current_uid() != server->m.mounted_uid)
- return -EACCES;
-
if (copy_from_user(&info2, arg, sizeof(info2)))
return -EFAULT;
@@ -182,11 +167,8 @@ ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
struct nls_table *iocharset;
struct nls_table *oldset_io;
struct nls_table *oldset_cp;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (server->root_setuped)
- return -EBUSY;
+ int utf8;
+ int err;
if (copy_from_user(&user, arg, sizeof(user)))
return -EFAULT;
@@ -206,28 +188,40 @@ ncp_set_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
user.iocharset[NCP_IOCSNAME_LEN] = 0;
if (!user.iocharset[0] || !strcmp(user.iocharset, "default")) {
iocharset = load_nls_default();
- NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
+ utf8 = 0;
} else if (!strcmp(user.iocharset, "utf8")) {
iocharset = load_nls_default();
- NCP_SET_FLAG(server, NCP_FLAG_UTF8);
+ utf8 = 1;
} else {
iocharset = load_nls(user.iocharset);
if (!iocharset) {
unload_nls(codepage);
return -EBADRQC;
}
- NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
+ utf8 = 0;
}
- oldset_cp = server->nls_vol;
- server->nls_vol = codepage;
- oldset_io = server->nls_io;
- server->nls_io = iocharset;
-
+ mutex_lock(&server->root_setup_lock);
+ if (server->root_setuped) {
+ oldset_cp = codepage;
+ oldset_io = iocharset;
+ err = -EBUSY;
+ } else {
+ if (utf8)
+ NCP_SET_FLAG(server, NCP_FLAG_UTF8);
+ else
+ NCP_CLR_FLAG(server, NCP_FLAG_UTF8);
+ oldset_cp = server->nls_vol;
+ server->nls_vol = codepage;
+ oldset_io = server->nls_io;
+ server->nls_io = iocharset;
+ err = 0;
+ }
+ mutex_unlock(&server->root_setup_lock);
unload_nls(oldset_cp);
unload_nls(oldset_io);
- return 0;
+ return err;
}
static int
@@ -237,6 +231,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
int len;
memset(&user, 0, sizeof(user));
+ mutex_lock(&server->root_setup_lock);
if (server->nls_vol && server->nls_vol->charset) {
len = strlen(server->nls_vol->charset);
if (len > NCP_IOCSNAME_LEN)
@@ -254,6 +249,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
strncpy(user.iocharset, server->nls_io->charset, len);
user.iocharset[len] = 0;
}
+ mutex_unlock(&server->root_setup_lock);
if (copy_to_user(arg, &user, sizeof(user)))
return -EFAULT;
@@ -261,25 +257,19 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
}
#endif /* CONFIG_NCPFS_NLS */
-static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = filp->f_dentry->d_inode;
struct ncp_server *server = NCP_SERVER(inode);
int result;
struct ncp_ioctl_request request;
char* bouncebuffer;
void __user *argp = (void __user *)arg;
- uid_t uid = current_uid();
switch (cmd) {
#ifdef CONFIG_COMPAT
case NCP_IOC_NCPREQUEST_32:
#endif
case NCP_IOC_NCPREQUEST:
- if (file_permission(filp, MAY_WRITE) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
#ifdef CONFIG_COMPAT
if (cmd == NCP_IOC_NCPREQUEST_32) {
struct compat_ncp_ioctl_request request32;
@@ -314,7 +304,7 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
server->current_size = request.size;
memcpy(server->packet, bouncebuffer, request.size);
- result = ncp_request2(server, request.function,
+ result = ncp_request2(server, request.function,
bouncebuffer, NCP_PACKET_SIZE_INTERNAL);
if (result < 0)
result = -EIO;
@@ -331,69 +321,69 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case NCP_IOC_CONN_LOGGED_IN:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
if (!(server->m.int_flags & NCP_IMOUNT_LOGGEDIN_POSSIBLE))
return -EINVAL;
+ mutex_lock(&server->root_setup_lock);
if (server->root_setuped)
- return -EBUSY;
- server->root_setuped = 1;
- return ncp_conn_logged_in(inode->i_sb);
+ result = -EBUSY;
+ else {
+ result = ncp_conn_logged_in(inode->i_sb);
+ if (result == 0)
+ server->root_setuped = 1;
+ }
+ mutex_unlock(&server->root_setup_lock);
+ return result;
case NCP_IOC_GET_FS_INFO:
- return ncp_get_fs_info(server, filp, argp);
+ return ncp_get_fs_info(server, inode, argp);
case NCP_IOC_GET_FS_INFO_V2:
- return ncp_get_fs_info_v2(server, filp, argp);
+ return ncp_get_fs_info_v2(server, inode, argp);
#ifdef CONFIG_COMPAT
case NCP_IOC_GET_FS_INFO_V2_32:
- return ncp_get_compat_fs_info_v2(server, filp, argp);
+ return ncp_get_compat_fs_info_v2(server, inode, argp);
#endif
/* we have too many combinations of CONFIG_COMPAT,
* CONFIG_64BIT and CONFIG_UID16, so just handle
* any of the possible ioctls */
case NCP_IOC_GETMOUNTUID16:
- case NCP_IOC_GETMOUNTUID32:
- case NCP_IOC_GETMOUNTUID64:
- if (file_permission(filp, MAY_READ) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
- if (cmd == NCP_IOC_GETMOUNTUID16) {
+ {
u16 uid;
+
SET_UID(uid, server->m.mounted_uid);
if (put_user(uid, (u16 __user *)argp))
return -EFAULT;
- } else if (cmd == NCP_IOC_GETMOUNTUID32) {
- if (put_user(server->m.mounted_uid,
- (u32 __user *)argp))
- return -EFAULT;
- } else {
- if (put_user(server->m.mounted_uid,
- (u64 __user *)argp))
- return -EFAULT;
+ return 0;
}
+ case NCP_IOC_GETMOUNTUID32:
+ if (put_user(server->m.mounted_uid,
+ (u32 __user *)argp))
+ return -EFAULT;
+ return 0;
+ case NCP_IOC_GETMOUNTUID64:
+ if (put_user(server->m.mounted_uid,
+ (u64 __user *)argp))
+ return -EFAULT;
return 0;
case NCP_IOC_GETROOT:
{
struct ncp_setroot_ioctl sr;
- if (file_permission(filp, MAY_READ) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
+ result = -EACCES;
+ mutex_lock(&server->root_setup_lock);
if (server->m.mounted_vol[0]) {
struct dentry* dentry = inode->i_sb->s_root;
if (dentry) {
struct inode* s_inode = dentry->d_inode;
-
+
if (s_inode) {
sr.volNumber = NCP_FINFO(s_inode)->volNumber;
sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum;
sr.namespace = server->name_space[sr.volNumber];
+ result = 0;
} else
DPRINTK("ncpfs: s_root->d_inode==NULL\n");
} else
@@ -402,10 +392,12 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
sr.volNumber = -1;
sr.namespace = 0;
sr.dirEntNum = 0;
+ result = 0;
}
- if (copy_to_user(argp, &sr, sizeof(sr)))
- return -EFAULT;
- return 0;
+ mutex_unlock(&server->root_setup_lock);
+ if (!result && copy_to_user(argp, &sr, sizeof(sr)))
+ result = -EFAULT;
+ return result;
}
case NCP_IOC_SETROOT:
@@ -416,103 +408,114 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
__le32 dosde;
struct dentry* dentry;
- if (!capable(CAP_SYS_ADMIN))
- {
- return -EACCES;
- }
- if (server->root_setuped) return -EBUSY;
if (copy_from_user(&sr, argp, sizeof(sr)))
return -EFAULT;
- if (sr.volNumber < 0) {
- server->m.mounted_vol[0] = 0;
- vnum = NCP_NUMBER_OF_VOLUMES;
- de = 0;
- dosde = 0;
- } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) {
- return -EINVAL;
- } else if (ncp_mount_subdir(server, sr.volNumber,
- sr.namespace, sr.dirEntNum,
- &vnum, &de, &dosde)) {
- return -ENOENT;
- }
-
- dentry = inode->i_sb->s_root;
- server->root_setuped = 1;
- if (dentry) {
- struct inode* s_inode = dentry->d_inode;
-
- if (s_inode) {
- NCP_FINFO(s_inode)->volNumber = vnum;
- NCP_FINFO(s_inode)->dirEntNum = de;
- NCP_FINFO(s_inode)->DosDirNum = dosde;
+ mutex_lock(&server->root_setup_lock);
+ if (server->root_setuped)
+ result = -EBUSY;
+ else {
+ if (sr.volNumber < 0) {
+ server->m.mounted_vol[0] = 0;
+ vnum = NCP_NUMBER_OF_VOLUMES;
+ de = 0;
+ dosde = 0;
+ result = 0;
+ } else if (sr.volNumber >= NCP_NUMBER_OF_VOLUMES) {
+ result = -EINVAL;
+ } else if (ncp_mount_subdir(server, sr.volNumber,
+ sr.namespace, sr.dirEntNum,
+ &vnum, &de, &dosde)) {
+ result = -ENOENT;
} else
- DPRINTK("ncpfs: s_root->d_inode==NULL\n");
- } else
- DPRINTK("ncpfs: s_root==NULL\n");
+ result = 0;
+
+ if (result == 0) {
+ dentry = inode->i_sb->s_root;
+ if (dentry) {
+ struct inode* s_inode = dentry->d_inode;
+
+ if (s_inode) {
+ NCP_FINFO(s_inode)->volNumber = vnum;
+ NCP_FINFO(s_inode)->dirEntNum = de;
+ NCP_FINFO(s_inode)->DosDirNum = dosde;
+ server->root_setuped = 1;
+ } else {
+ DPRINTK("ncpfs: s_root->d_inode==NULL\n");
+ result = -EIO;
+ }
+ } else {
+ DPRINTK("ncpfs: s_root==NULL\n");
+ result = -EIO;
+ }
+ }
+ result = 0;
+ }
+ mutex_unlock(&server->root_setup_lock);
- return 0;
+ return result;
}
-#ifdef CONFIG_NCPFS_PACKET_SIGNING
+#ifdef CONFIG_NCPFS_PACKET_SIGNING
case NCP_IOC_SIGN_INIT:
- if (file_permission(filp, MAY_WRITE) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
- if (argp) {
- if (server->sign_wanted)
- {
- struct ncp_sign_init sign;
+ {
+ struct ncp_sign_init sign;
+ if (argp)
if (copy_from_user(&sign, argp, sizeof(sign)))
return -EFAULT;
- memcpy(server->sign_root,sign.sign_root,8);
- memcpy(server->sign_last,sign.sign_last,16);
- server->sign_active = 1;
+ ncp_lock_server(server);
+ mutex_lock(&server->rcv.creq_mutex);
+ if (argp) {
+ if (server->sign_wanted) {
+ memcpy(server->sign_root,sign.sign_root,8);
+ memcpy(server->sign_last,sign.sign_last,16);
+ server->sign_active = 1;
+ }
+ /* ignore when signatures not wanted */
+ } else {
+ server->sign_active = 0;
}
- /* ignore when signatures not wanted */
- } else {
- server->sign_active = 0;
+ mutex_unlock(&server->rcv.creq_mutex);
+ ncp_unlock_server(server);
+ return 0;
}
- return 0;
-
+
case NCP_IOC_SIGN_WANTED:
- if (file_permission(filp, MAY_READ) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
- if (put_user(server->sign_wanted, (int __user *)argp))
- return -EFAULT;
- return 0;
+ {
+ int state;
+
+ ncp_lock_server(server);
+ state = server->sign_wanted;
+ ncp_unlock_server(server);
+ if (put_user(state, (int __user *)argp))
+ return -EFAULT;
+ return 0;
+ }
case NCP_IOC_SET_SIGN_WANTED:
{
int newstate;
- if (file_permission(filp, MAY_WRITE) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
/* get only low 8 bits... */
if (get_user(newstate, (unsigned char __user *)argp))
return -EFAULT;
+ result = 0;
+ ncp_lock_server(server);
if (server->sign_active) {
/* cannot turn signatures OFF when active */
- if (!newstate) return -EINVAL;
+ if (!newstate)
+ result = -EINVAL;
} else {
server->sign_wanted = newstate != 0;
}
- return 0;
+ ncp_unlock_server(server);
+ return result;
}
#endif /* CONFIG_NCPFS_PACKET_SIGNING */
#ifdef CONFIG_NCPFS_IOCTL_LOCKING
case NCP_IOC_LOCKUNLOCK:
- if (file_permission(filp, MAY_WRITE) != 0
- && uid != server->m.mounted_uid)
- return -EACCES;
-
{
struct ncp_lock_ioctl rqdata;
@@ -541,16 +544,13 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
return result;
}
- result = -EIO;
- if (!ncp_conn_valid(server))
- goto outrel;
result = -EISDIR;
if (!S_ISREG(inode->i_mode))
goto outrel;
if (rqdata.cmd == NCP_LOCK_CLEAR)
{
result = ncp_ClearPhysicalRecord(NCP_SERVER(inode),
- NCP_FINFO(inode)->file_handle,
+ NCP_FINFO(inode)->file_handle,
rqdata.offset,
rqdata.length);
if (result > 0) result = 0; /* no such lock */
@@ -573,7 +573,7 @@ static long __ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
rqdata.timeout);
if (result > 0) result = -EAGAIN;
}
-outrel:
+outrel:
ncp_inode_close(inode);
return result;
}
@@ -581,60 +581,62 @@ outrel:
#ifdef CONFIG_COMPAT
case NCP_IOC_GETOBJECTNAME_32:
- if (uid != server->m.mounted_uid)
- return -EACCES;
{
struct compat_ncp_objectname_ioctl user;
size_t outl;
if (copy_from_user(&user, argp, sizeof(user)))
return -EFAULT;
+ down_read(&server->auth_rwsem);
user.auth_type = server->auth.auth_type;
outl = user.object_name_len;
user.object_name_len = server->auth.object_name_len;
if (outl > user.object_name_len)
outl = user.object_name_len;
+ result = 0;
if (outl) {
if (copy_to_user(compat_ptr(user.object_name),
server->auth.object_name,
- outl)) return -EFAULT;
+ outl))
+ result = -EFAULT;
}
- if (copy_to_user(argp, &user, sizeof(user)))
- return -EFAULT;
- return 0;
+ up_read(&server->auth_rwsem);
+ if (!result && copy_to_user(argp, &user, sizeof(user)))
+ result = -EFAULT;
+ return result;
}
#endif
case NCP_IOC_GETOBJECTNAME:
- if (uid != server->m.mounted_uid)
- return -EACCES;
{
struct ncp_objectname_ioctl user;
size_t outl;
if (copy_from_user(&user, argp, sizeof(user)))
return -EFAULT;
+ down_read(&server->auth_rwsem);
user.auth_type = server->auth.auth_type;
outl = user.object_name_len;
user.object_name_len = server->auth.object_name_len;
if (outl > user.object_name_len)
outl = user.object_name_len;
+ result = 0;
if (outl) {
if (copy_to_user(user.object_name,
server->auth.object_name,
- outl)) return -EFAULT;
+ outl))
+ result = -EFAULT;
}
- if (copy_to_user(argp, &user, sizeof(user)))
- return -EFAULT;
- return 0;
+ up_read(&server->auth_rwsem);
+ if (!result && copy_to_user(argp, &user, sizeof(user)))
+ result = -EFAULT;
+ return result;
}
#ifdef CONFIG_COMPAT
case NCP_IOC_SETOBJECTNAME_32:
#endif
case NCP_IOC_SETOBJECTNAME:
- if (uid != server->m.mounted_uid)
- return -EACCES;
{
struct ncp_objectname_ioctl user;
void* newname;
@@ -666,9 +668,7 @@ outrel:
} else {
newname = NULL;
}
- /* enter critical section */
- /* maybe that kfree can sleep so do that this way */
- /* it is at least more SMP friendly (in future...) */
+ down_write(&server->auth_rwsem);
oldname = server->auth.object_name;
oldnamelen = server->auth.object_name_len;
oldprivate = server->priv.data;
@@ -678,7 +678,7 @@ outrel:
server->auth.object_name = newname;
server->priv.len = 0;
server->priv.data = NULL;
- /* leave critical section */
+ up_write(&server->auth_rwsem);
kfree(oldprivate);
kfree(oldname);
return 0;
@@ -688,8 +688,6 @@ outrel:
case NCP_IOC_GETPRIVATEDATA_32:
#endif
case NCP_IOC_GETPRIVATEDATA:
- if (uid != server->m.mounted_uid)
- return -EACCES;
{
struct ncp_privatedata_ioctl user;
size_t outl;
@@ -706,14 +704,20 @@ outrel:
if (copy_from_user(&user, argp, sizeof(user)))
return -EFAULT;
+ down_read(&server->auth_rwsem);
outl = user.len;
user.len = server->priv.len;
if (outl > user.len) outl = user.len;
+ result = 0;
if (outl) {
if (copy_to_user(user.data,
server->priv.data,
- outl)) return -EFAULT;
+ outl))
+ result = -EFAULT;
}
+ up_read(&server->auth_rwsem);
+ if (result)
+ return result;
#ifdef CONFIG_COMPAT
if (cmd == NCP_IOC_GETPRIVATEDATA_32) {
struct compat_ncp_privatedata_ioctl user32;
@@ -733,8 +737,6 @@ outrel:
case NCP_IOC_SETPRIVATEDATA_32:
#endif
case NCP_IOC_SETPRIVATEDATA:
- if (uid != server->m.mounted_uid)
- return -EACCES;
{
struct ncp_privatedata_ioctl user;
void* new;
@@ -762,12 +764,12 @@ outrel:
} else {
new = NULL;
}
- /* enter critical section */
+ down_write(&server->auth_rwsem);
old = server->priv.data;
oldlen = server->priv.len;
server->priv.len = user.len;
server->priv.data = new;
- /* leave critical section */
+ up_write(&server->auth_rwsem);
kfree(old);
return 0;
}
@@ -775,17 +777,13 @@ outrel:
#ifdef CONFIG_NCPFS_NLS
case NCP_IOC_SETCHARSETS:
return ncp_set_charsets(server, argp);
-
+
case NCP_IOC_GETCHARSETS:
return ncp_get_charsets(server, argp);
#endif /* CONFIG_NCPFS_NLS */
case NCP_IOC_SETDENTRYTTL:
- if (file_permission(filp, MAY_WRITE) != 0 &&
- uid != server->m.mounted_uid)
- return -EACCES;
-
{
u_int32_t user;
@@ -795,13 +793,13 @@ outrel:
if (user > 20000)
return -EINVAL;
user = (user * HZ) / 1000;
- server->dentry_ttl = user;
+ atomic_set(&server->dentry_ttl, user);
return 0;
}
-
+
case NCP_IOC_GETDENTRYTTL:
{
- u_int32_t user = (server->dentry_ttl * 1000) / HZ;
+ u_int32_t user = (atomic_read(&server->dentry_ttl) * 1000) / HZ;
if (copy_to_user(argp, &user, sizeof(user)))
return -EFAULT;
return 0;
@@ -811,59 +809,103 @@ outrel:
return -EINVAL;
}
-static int ncp_ioctl_need_write(unsigned int cmd)
+long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ncp_server *server = NCP_SERVER(inode);
+ uid_t uid = current_uid();
+ int need_drop_write = 0;
+ long ret;
+
switch (cmd) {
- case NCP_IOC_GET_FS_INFO:
- case NCP_IOC_GET_FS_INFO_V2:
- case NCP_IOC_NCPREQUEST:
- case NCP_IOC_SETDENTRYTTL:
- case NCP_IOC_SIGN_INIT:
- case NCP_IOC_LOCKUNLOCK:
- case NCP_IOC_SET_SIGN_WANTED:
- return 1;
- case NCP_IOC_GETOBJECTNAME:
- case NCP_IOC_SETOBJECTNAME:
- case NCP_IOC_GETPRIVATEDATA:
- case NCP_IOC_SETPRIVATEDATA:
case NCP_IOC_SETCHARSETS:
- case NCP_IOC_GETCHARSETS:
case NCP_IOC_CONN_LOGGED_IN:
- case NCP_IOC_GETDENTRYTTL:
- case NCP_IOC_GETMOUNTUID2:
- case NCP_IOC_SIGN_WANTED:
- case NCP_IOC_GETROOT:
case NCP_IOC_SETROOT:
- return 0;
- default:
- /* unknown IOCTL command, assume write */
- return 1;
+ if (!capable(CAP_SYS_ADMIN)) {
+ ret = -EACCES;
+ goto out;
+ }
+ break;
}
-}
-
-long ncp_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- lock_kernel();
- if (ncp_ioctl_need_write(cmd)) {
+ if (server->m.mounted_uid != uid) {
+ switch (cmd) {
/*
- * inside the ioctl(), any failures which
- * are because of file_permission() are
- * -EACCESS, so it seems consistent to keep
- * that here.
+ * Only mount owner can issue these ioctls. Information
+ * necessary to authenticate to other NDS servers are
+ * stored here.
*/
- if (mnt_want_write(filp->f_path.mnt)) {
+ case NCP_IOC_GETOBJECTNAME:
+ case NCP_IOC_SETOBJECTNAME:
+ case NCP_IOC_GETPRIVATEDATA:
+ case NCP_IOC_SETPRIVATEDATA:
+#ifdef CONFIG_COMPAT
+ case NCP_IOC_GETOBJECTNAME_32:
+ case NCP_IOC_SETOBJECTNAME_32:
+ case NCP_IOC_GETPRIVATEDATA_32:
+ case NCP_IOC_SETPRIVATEDATA_32:
+#endif
ret = -EACCES;
goto out;
+ /*
+ * These require write access on the inode if user id
+ * does not match. Note that they do not write to the
+ * file... But old code did mnt_want_write, so I keep
+ * it as is. Of course not for mountpoint owner, as
+ * that breaks read-only mounts altogether as ncpmount
+ * needs working NCP_IOC_NCPREQUEST and
+ * NCP_IOC_GET_FS_INFO. Some of these codes (setdentryttl,
+ * signinit, setsignwanted) should be probably restricted
+ * to owner only, or even more to CAP_SYS_ADMIN).
+ */
+ case NCP_IOC_GET_FS_INFO:
+ case NCP_IOC_GET_FS_INFO_V2:
+ case NCP_IOC_NCPREQUEST:
+ case NCP_IOC_SETDENTRYTTL:
+ case NCP_IOC_SIGN_INIT:
+ case NCP_IOC_LOCKUNLOCK:
+ case NCP_IOC_SET_SIGN_WANTED:
+#ifdef CONFIG_COMPAT
+ case NCP_IOC_GET_FS_INFO_V2_32:
+ case NCP_IOC_NCPREQUEST_32:
+#endif
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ goto out;
+ need_drop_write = 1;
+ ret = inode_permission(inode, MAY_WRITE);
+ if (ret)
+ goto outDropWrite;
+ break;
+ /*
+ * Read access required.
+ */
+ case NCP_IOC_GETMOUNTUID16:
+ case NCP_IOC_GETMOUNTUID32:
+ case NCP_IOC_GETMOUNTUID64:
+ case NCP_IOC_GETROOT:
+ case NCP_IOC_SIGN_WANTED:
+ ret = inode_permission(inode, MAY_READ);
+ if (ret)
+ goto out;
+ break;
+ /*
+ * Anybody can read these.
+ */
+ case NCP_IOC_GETCHARSETS:
+ case NCP_IOC_GETDENTRYTTL:
+ default:
+ /* Three codes below are protected by CAP_SYS_ADMIN above. */
+ case NCP_IOC_SETCHARSETS:
+ case NCP_IOC_CONN_LOGGED_IN:
+ case NCP_IOC_SETROOT:
+ break;
}
}
- ret = __ncp_ioctl(filp, cmd, arg);
- if (ncp_ioctl_need_write(cmd))
+ ret = __ncp_ioctl(inode, cmd, arg);
+outDropWrite:
+ if (need_drop_write)
mnt_drop_write(filp->f_path.mnt);
-
out:
- unlock_kernel();
return ret;
}
@@ -872,10 +914,8 @@ long ncp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
- lock_kernel();
arg = (unsigned long) compat_ptr(arg);
ret = ncp_ioctl(file, cmd, arg);
- unlock_kernel();
return ret;
}
#endif
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 0ec6237a597..a95615a0b6a 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -107,17 +107,17 @@ ncp_reply_data(struct ncp_server *server, int offset)
return &(server->packet[sizeof(struct ncp_reply_header) + offset]);
}
-static inline u8 BVAL(void *data)
+static inline u8 BVAL(const void *data)
{
- return *(u8 *)data;
+ return *(const u8 *)data;
}
static u8 ncp_reply_byte(struct ncp_server *server, int offset)
{
- return *(u8 *)ncp_reply_data(server, offset);
+ return *(const u8 *)ncp_reply_data(server, offset);
}
-static inline u16 WVAL_LH(void *data)
+static inline u16 WVAL_LH(const void *data)
{
return get_unaligned_le16(data);
}
@@ -134,7 +134,7 @@ ncp_reply_be16(struct ncp_server *server, int offset)
return get_unaligned_be16(ncp_reply_data(server, offset));
}
-static inline u32 DVAL_LH(void *data)
+static inline u32 DVAL_LH(const void *data)
{
return get_unaligned_le32(data);
}
@@ -349,9 +349,9 @@ int ncp_dirhandle_free(struct ncp_server* server, __u8 dirhandle) {
return result;
}
-void ncp_extract_file_info(void *structure, struct nw_info_struct *target)
+void ncp_extract_file_info(const void *structure, struct nw_info_struct *target)
{
- __u8 *name_len;
+ const __u8 *name_len;
const int info_struct_size = offsetof(struct nw_info_struct, nameLen);
memcpy(target, structure, info_struct_size);
@@ -364,7 +364,7 @@ void ncp_extract_file_info(void *structure, struct nw_info_struct *target)
}
#ifdef CONFIG_NCPFS_NFS_NS
-static inline void ncp_extract_nfs_info(unsigned char *structure,
+static inline void ncp_extract_nfs_info(const unsigned char *structure,
struct nw_nfs_info *target)
{
target->mode = DVAL_LH(structure);
@@ -417,7 +417,7 @@ int ncp_obtain_nfs_info(struct ncp_server *server,
* Returns information for a (one-component) name relative to
* the specified directory.
*/
-int ncp_obtain_info(struct ncp_server *server, struct inode *dir, char *path,
+int ncp_obtain_info(struct ncp_server *server, struct inode *dir, const char *path,
struct nw_info_struct *target)
{
__u8 volnum = NCP_FINFO(dir)->volNumber;
@@ -452,16 +452,16 @@ out:
#ifdef CONFIG_NCPFS_NFS_NS
static int
ncp_obtain_DOS_dir_base(struct ncp_server *server,
- __u8 volnum, __le32 dirent,
- char *path, /* At most 1 component */
+ __u8 ns, __u8 volnum, __le32 dirent,
+ const char *path, /* At most 1 component */
__le32 *DOS_dir_base)
{
int result;
ncp_init_request(server);
ncp_add_byte(server, 6); /* subfunction */
- ncp_add_byte(server, server->name_space[volnum]);
- ncp_add_byte(server, server->name_space[volnum]);
+ ncp_add_byte(server, ns);
+ ncp_add_byte(server, ns);
ncp_add_word(server, cpu_to_le16(0x8006)); /* get all */
ncp_add_dword(server, RIM_DIRECTORY);
ncp_add_handle_path(server, volnum, dirent, 1, path);
@@ -523,10 +523,27 @@ ncp_get_known_namespace(struct ncp_server *server, __u8 volume)
#endif /* defined(CONFIG_NCPFS_OS2_NS) || defined(CONFIG_NCPFS_NFS_NS) */
}
+int
+ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns)
+{
+ int ns = ncp_get_known_namespace(server, volume);
+
+ if (ret_ns)
+ *ret_ns = ns;
+
+ DPRINTK("lookup_vol: namespace[%d] = %d\n",
+ volume, server->name_space[volume]);
+
+ if (server->name_space[volume] == ns)
+ return 0;
+ server->name_space[volume] = ns;
+ return 1;
+}
+
static int
ncp_ObtainSpecificDirBase(struct ncp_server *server,
__u8 nsSrc, __u8 nsDst, __u8 vol_num, __le32 dir_base,
- char *path, /* At most 1 component */
+ const char *path, /* At most 1 component */
__le32 *dirEntNum, __le32 *DosDirNum)
{
int result;
@@ -560,14 +577,13 @@ ncp_mount_subdir(struct ncp_server *server,
{
int dstNS;
int result;
-
- dstNS = ncp_get_known_namespace(server, volNumber);
+
+ ncp_update_known_namespace(server, volNumber, &dstNS);
if ((result = ncp_ObtainSpecificDirBase(server, srcNS, dstNS, volNumber,
dirEntNum, NULL, newDirEnt, newDosEnt)) != 0)
{
return result;
}
- server->name_space[volNumber] = dstNS;
*volume = volNumber;
server->m.mounted_vol[1] = 0;
server->m.mounted_vol[0] = 'X';
@@ -575,11 +591,10 @@ ncp_mount_subdir(struct ncp_server *server,
}
int
-ncp_get_volume_root(struct ncp_server *server, const char *volname,
- __u32* volume, __le32* dirent, __le32* dosdirent)
+ncp_get_volume_root(struct ncp_server *server,
+ const char *volname, __u32* volume, __le32* dirent, __le32* dosdirent)
{
int result;
- __u8 volnum;
DPRINTK("ncp_get_volume_root: looking up vol %s\n", volname);
@@ -601,21 +616,14 @@ ncp_get_volume_root(struct ncp_server *server, const char *volname,
return result;
}
*dirent = *dosdirent = ncp_reply_dword(server, 4);
- volnum = ncp_reply_byte(server, 8);
+ *volume = ncp_reply_byte(server, 8);
ncp_unlock_server(server);
- *volume = volnum;
-
- server->name_space[volnum] = ncp_get_known_namespace(server, volnum);
-
- DPRINTK("lookup_vol: namespace[%d] = %d\n",
- volnum, server->name_space[volnum]);
-
return 0;
}
int
-ncp_lookup_volume(struct ncp_server *server, const char *volname,
- struct nw_info_struct *target)
+ncp_lookup_volume(struct ncp_server *server,
+ const char *volname, struct nw_info_struct *target)
{
int result;
@@ -625,6 +633,7 @@ ncp_lookup_volume(struct ncp_server *server, const char *volname,
if (result) {
return result;
}
+ ncp_update_known_namespace(server, target->volNumber, NULL);
target->nameLen = strlen(volname);
memcpy(target->entryName, volname, target->nameLen+1);
target->attributes = aDIR;
@@ -676,8 +685,8 @@ int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent,
{
int result = 0;
+ ncp_init_request(server);
if (server->name_space[volnum] == NW_NS_NFS) {
- ncp_init_request(server);
ncp_add_byte(server, 25); /* subfunction */
ncp_add_byte(server, server->name_space[volnum]);
ncp_add_byte(server, NW_NS_NFS);
@@ -690,8 +699,8 @@ int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent,
ncp_add_dword_lh(server, 1); /* nlinks */
ncp_add_dword_lh(server, rdev);
result = ncp_request(server, 87);
- ncp_unlock_server(server);
}
+ ncp_unlock_server(server);
return result;
}
#endif
@@ -700,7 +709,7 @@ int ncp_modify_nfs_info(struct ncp_server *server, __u8 volnum, __le32 dirent,
static int
ncp_DeleteNSEntry(struct ncp_server *server,
__u8 have_dir_base, __u8 volnum, __le32 dirent,
- char* name, __u8 ns, __le16 attr)
+ const char* name, __u8 ns, __le16 attr)
{
int result;
@@ -734,23 +743,25 @@ ncp_del_file_or_subdir2(struct ncp_server *server,
int
ncp_del_file_or_subdir(struct ncp_server *server,
- struct inode *dir, char *name)
+ struct inode *dir, const char *name)
{
__u8 volnum = NCP_FINFO(dir)->volNumber;
__le32 dirent = NCP_FINFO(dir)->dirEntNum;
+ int name_space;
+ name_space = server->name_space[volnum];
#ifdef CONFIG_NCPFS_NFS_NS
- if (server->name_space[volnum]==NW_NS_NFS)
+ if (name_space == NW_NS_NFS)
{
int result;
- result=ncp_obtain_DOS_dir_base(server, volnum, dirent, name, &dirent);
+ result=ncp_obtain_DOS_dir_base(server, name_space, volnum, dirent, name, &dirent);
if (result) return result;
- return ncp_DeleteNSEntry(server, 1, volnum, dirent, NULL, NW_NS_DOS, cpu_to_le16(0x8006));
+ name = NULL;
+ name_space = NW_NS_DOS;
}
- else
#endif /* CONFIG_NCPFS_NFS_NS */
- return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, server->name_space[volnum], cpu_to_le16(0x8006));
+ return ncp_DeleteNSEntry(server, 1, volnum, dirent, name, name_space, cpu_to_le16(0x8006));
}
static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6])
@@ -765,7 +776,7 @@ static inline void ConvertToNWfromDWORD(__u16 v0, __u16 v1, __u8 ret[6])
/* If both dir and name are NULL, then in target there's already a
looked-up entry that wants to be opened. */
int ncp_open_create_file_or_subdir(struct ncp_server *server,
- struct inode *dir, char *name,
+ struct inode *dir, const char *name,
int open_create_mode,
__le32 create_attributes,
__le16 desired_acc_rights,
@@ -890,8 +901,8 @@ int ncp_search_for_fileset(struct ncp_server *server,
static int
ncp_RenameNSEntry(struct ncp_server *server,
- struct inode *old_dir, char *old_name, __le16 old_type,
- struct inode *new_dir, char *new_name)
+ struct inode *old_dir, const char *old_name, __le16 old_type,
+ struct inode *new_dir, const char *new_name)
{
int result = -EINVAL;
@@ -929,8 +940,8 @@ out:
}
int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
- struct inode *old_dir, char *old_name,
- struct inode *new_dir, char *new_name)
+ struct inode *old_dir, const char *old_name,
+ struct inode *new_dir, const char *new_name)
{
int result;
__le16 old_type = cpu_to_le16(0x06);
@@ -958,7 +969,7 @@ int
ncp_read_kernel(struct ncp_server *server, const char *file_id,
__u32 offset, __u16 to_read, char *target, int *bytes_read)
{
- char *source;
+ const char *source;
int result;
ncp_init_request(server);
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 2441d1ab57d..3c57eca634c 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -65,10 +65,11 @@ static inline void ncp_inode_close(struct inode *inode) {
atomic_dec(&NCP_FINFO(inode)->opened);
}
-void ncp_extract_file_info(void* src, struct nw_info_struct* target);
-int ncp_obtain_info(struct ncp_server *server, struct inode *, char *,
+void ncp_extract_file_info(const void* src, struct nw_info_struct* target);
+int ncp_obtain_info(struct ncp_server *server, struct inode *, const char *,
struct nw_info_struct *target);
int ncp_obtain_nfs_info(struct ncp_server *server, struct nw_info_struct *target);
+int ncp_update_known_namespace(struct ncp_server *server, __u8 volume, int *ret_ns);
int ncp_get_volume_root(struct ncp_server *server, const char *volname,
__u32 *volume, __le32 *dirent, __le32 *dosdirent);
int ncp_lookup_volume(struct ncp_server *, const char *, struct nw_info_struct *);
@@ -80,8 +81,8 @@ int ncp_modify_nfs_info(struct ncp_server *, __u8 volnum, __le32 dirent,
__u32 mode, __u32 rdev);
int ncp_del_file_or_subdir2(struct ncp_server *, struct dentry*);
-int ncp_del_file_or_subdir(struct ncp_server *, struct inode *, char *);
-int ncp_open_create_file_or_subdir(struct ncp_server *, struct inode *, char *,
+int ncp_del_file_or_subdir(struct ncp_server *, struct inode *, const char *);
+int ncp_open_create_file_or_subdir(struct ncp_server *, struct inode *, const char *,
int, __le32, __le16, struct ncp_entry_info *);
int ncp_initialize_search(struct ncp_server *, struct inode *,
@@ -93,7 +94,7 @@ int ncp_search_for_fileset(struct ncp_server *server,
char** rbuf, size_t* rsize);
int ncp_ren_or_mov_file_or_subdir(struct ncp_server *server,
- struct inode *, char *, struct inode *, char *);
+ struct inode *, const char *, struct inode *, const char *);
int
@@ -170,13 +171,13 @@ static inline int ncp_strnicmp(struct nls_table *t, const unsigned char *s1,
#endif /* CONFIG_NCPFS_NLS */
#define NCP_GET_AGE(dentry) (jiffies - (dentry)->d_time)
-#define NCP_MAX_AGE(server) ((server)->dentry_ttl)
+#define NCP_MAX_AGE(server) atomic_read(&(server)->dentry_ttl)
#define NCP_TEST_AGE(server,dentry) (NCP_GET_AGE(dentry) < NCP_MAX_AGE(server))
static inline void
ncp_age_dentry(struct ncp_server* server, struct dentry* dentry)
{
- dentry->d_time = jiffies - server->dentry_ttl;
+ dentry->d_time = jiffies - NCP_MAX_AGE(server);
}
static inline void
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
index 7c0b5c21e6c..d8b2d7e6910 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -15,21 +15,21 @@
/* i386: 32-bit, little endian, handles mis-alignment */
#ifdef __i386__
-#define GET_LE32(p) (*(int *)(p))
+#define GET_LE32(p) (*(const int *)(p))
#define PUT_LE32(p,v) { *(int *)(p)=v; }
#else
/* from include/ncplib.h */
-#define BVAL(buf,pos) (((__u8 *)(buf))[pos])
+#define BVAL(buf,pos) (((const __u8 *)(buf))[pos])
#define PVAL(buf,pos) ((unsigned)BVAL(buf,pos))
-#define BSET(buf,pos,val) (BVAL(buf,pos) = (val))
+#define BSET(buf,pos,val) (((__u8 *)(buf))[pos] = (val))
static inline __u16
-WVAL_LH(__u8 * buf, int pos)
+WVAL_LH(const __u8 * buf, int pos)
{
return PVAL(buf, pos) | PVAL(buf, pos + 1) << 8;
}
static inline __u32
-DVAL_LH(__u8 * buf, int pos)
+DVAL_LH(const __u8 * buf, int pos)
{
return WVAL_LH(buf, pos) | WVAL_LH(buf, pos + 2) << 16;
}
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index c7ff6c700a6..668bd267346 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -746,7 +746,6 @@ static int ncp_do_request(struct ncp_server *server, int size,
return -EIO;
}
if (!ncp_conn_valid(server)) {
- printk(KERN_ERR "ncpfs: Connection invalid!\n");
return -EIO;
}
{
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index f7e13db613c..fd667652c50 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,6 +1,7 @@
config NFS_FS
tristate "NFS client support"
depends on INET && FILE_LOCKING
+ depends on BKL # fix as soon as lockd is done
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
@@ -76,13 +77,17 @@ config NFS_V4
config NFS_V4_1
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
- depends on NFS_V4 && EXPERIMENTAL
+ depends on NFS_FS && NFS_V4 && EXPERIMENTAL
+ select PNFS_FILE_LAYOUT
help
This option enables support for minor version 1 of the NFSv4 protocol
- (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+ (RFC 5661) in the kernel's NFS client.
If unsure, say N.
+config PNFS_FILE_LAYOUT
+ tristate
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
@@ -117,3 +122,14 @@ config NFS_USE_KERNEL_DNS
select DNS_RESOLVER
select KEYS
default y
+
+config NFS_USE_NEW_IDMAPPER
+ bool "Use the new idmapper upcall routine"
+ depends on NFS_V4 && KEYS
+ help
+ Say Y here if you want NFS to use the new idmapper upcall functions.
+ You will need /sbin/request-key (usually provided by the keyutils
+ package). For details, read
+ <file:Documentation/filesystems/nfs/idmapper.txt>.
+
+ If you are unsure, say N.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index da7fda639ea..4776ff9e381 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -15,5 +15,9 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
+nfs-$(CONFIG_NFS_V4_1) += pnfs.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+
+obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
+nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index e17b49e2eab..aeec017fe81 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -109,7 +109,7 @@ nfs4_callback_up(struct svc_serv *serv)
{
int ret;
- ret = svc_create_xprt(serv, "tcp", PF_INET,
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret <= 0)
goto out_err;
@@ -117,7 +117,7 @@ nfs4_callback_up(struct svc_serv *serv)
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport, PF_INET);
- ret = svc_create_xprt(serv, "tcp", PF_INET6,
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret > 0) {
nfs_callback_tcpport6 = ret;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 930d10fecda..2950fca0c61 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -118,11 +118,11 @@ int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const n
if (delegation == NULL)
return 0;
- /* seqid is 4-bytes long */
- if (((u32 *) &stateid->data)[0] != 0)
+ if (stateid->stateid.seqid != 0)
return 0;
- if (memcmp(&delegation->stateid.data[4], &stateid->data[4],
- sizeof(stateid->data)-4))
+ if (memcmp(&delegation->stateid.stateid.other,
+ &stateid->stateid.other,
+ NFS4_STATEID_OTHER_SIZE))
return 0;
return 1;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index e7340729af8..0870d0d4efc 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -48,6 +48,7 @@
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -155,7 +156,9 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
-
+#if defined(CONFIG_NFS_V4_1)
+ INIT_LIST_HEAD(&clp->cl_layouts);
+#endif
nfs_fscache_get_client_cookie(clp);
return clp;
@@ -252,6 +255,7 @@ void nfs_put_client(struct nfs_client *clp)
nfs_free_client(clp);
}
}
+EXPORT_SYMBOL_GPL(nfs_put_client);
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
/*
@@ -601,6 +605,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
{
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = clp->cl_proto,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
@@ -635,7 +640,8 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
*/
static void nfs_destroy_server(struct nfs_server *server)
{
- if (!(server->flags & NFS_MOUNT_NONLM))
+ if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
+ !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
nlmclnt_done(server->nlm_host);
}
@@ -657,7 +663,8 @@ static int nfs_start_lockd(struct nfs_server *server)
if (nlm_init.nfs_version > 3)
return 0;
- if (server->flags & NFS_MOUNT_NONLM)
+ if ((server->flags & NFS_MOUNT_LOCAL_FLOCK) &&
+ (server->flags & NFS_MOUNT_LOCAL_FCNTL))
return 0;
switch (clp->cl_proto) {
@@ -898,11 +905,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ set_pnfs_layoutdriver(server, fsinfo->layouttype);
+
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
- if (server->dtsize > PAGE_CACHE_SIZE)
- server->dtsize = PAGE_CACHE_SIZE;
+ if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES)
+ server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES;
if (server->dtsize > server->rsize)
server->dtsize = server->rsize;
@@ -913,6 +922,8 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
server->maxfilesize = fsinfo->maxfilesize;
+ server->time_delta = fsinfo->time_delta;
+
/* We're airborne Set socket buffersize */
rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
}
@@ -935,6 +946,7 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
}
fsinfo.fattr = fattr;
+ fsinfo.layouttype = 0;
error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
if (error < 0)
goto out_error;
@@ -1017,6 +1029,7 @@ void nfs_free_server(struct nfs_server *server)
{
dprintk("--> nfs_free_server()\n");
+ unset_pnfs_layoutdriver(server);
spin_lock(&nfs_client_lock);
list_del(&server->client_link);
list_del(&server->master_link);
@@ -1356,8 +1369,9 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|
- NFS_CAP_POSIX_LOCK;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|NFS_CAP_POSIX_LOCK;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
server->options = data->options;
/* Get a client record */
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index b9c3c43cea1..232a7eead33 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -71,20 +71,20 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
if (inode->i_flock == NULL)
goto out;
- /* Protect inode->i_flock using the BKL */
- lock_kernel();
+ /* Protect inode->i_flock using the file locks lock */
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
- unlock_kernel();
+ unlock_flocks();
status = nfs4_lock_delegation_recall(state, fl);
if (status < 0)
goto out;
- lock_kernel();
+ lock_flocks();
}
- unlock_kernel();
+ unlock_flocks();
out:
return status;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e257172d438..257e4052492 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,11 +33,12 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
-#include "nfs4_fs.h"
#include "delegation.h"
#include "iostat.h"
#include "internal.h"
+#include "fscache.h"
/* #define NFS_DEBUG_VERBOSE 1 */
@@ -55,6 +56,7 @@ static int nfs_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *);
static int nfs_fsync_dir(struct file *, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
+static int nfs_readdir_clear_array(struct page*, gfp_t);
const struct file_operations nfs_dir_operations = {
.llseek = nfs_llseek_dir,
@@ -80,6 +82,10 @@ const struct inode_operations nfs_dir_inode_operations = {
.setattr = nfs_setattr,
};
+const struct address_space_operations nfs_dir_addr_space_ops = {
+ .releasepage = nfs_readdir_clear_array,
+};
+
#ifdef CONFIG_NFS_V3
const struct inode_operations nfs3_dir_inode_operations = {
.create = nfs_create,
@@ -104,8 +110,9 @@ const struct inode_operations nfs3_dir_inode_operations = {
#ifdef CONFIG_NFS_V4
static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd);
const struct inode_operations nfs4_dir_inode_operations = {
- .create = nfs_create,
+ .create = nfs_open_create,
.lookup = nfs_atomic_lookup,
.link = nfs_link,
.unlink = nfs_unlink,
@@ -150,51 +157,197 @@ nfs_opendir(struct inode *inode, struct file *filp)
return res;
}
-typedef __be32 * (*decode_dirent_t)(__be32 *, struct nfs_entry *, int);
+struct nfs_cache_array_entry {
+ u64 cookie;
+ u64 ino;
+ struct qstr string;
+};
+
+struct nfs_cache_array {
+ unsigned int size;
+ int eof_index;
+ u64 last_cookie;
+ struct nfs_cache_array_entry array[0];
+};
+
+#define MAX_READDIR_ARRAY ((PAGE_SIZE - sizeof(struct nfs_cache_array)) / sizeof(struct nfs_cache_array_entry))
+
+typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
typedef struct {
struct file *file;
struct page *page;
unsigned long page_index;
- __be32 *ptr;
u64 *dir_cookie;
loff_t current_index;
- struct nfs_entry *entry;
decode_dirent_t decode;
- int plus;
+
unsigned long timestamp;
unsigned long gencount;
- int timestamp_valid;
+ unsigned int cache_entry_index;
+ unsigned int plus:1;
+ unsigned int eof:1;
} nfs_readdir_descriptor_t;
-/* Now we cache directories properly, by stuffing the dirent
- * data directly in the page cache.
- *
- * Inode invalidation due to refresh etc. takes care of
- * _everything_, no sloppy entry flushing logic, no extraneous
- * copying, network direct to page cache, the way it was meant
- * to be.
- *
- * NOTE: Dirent information verification is done always by the
- * page-in of the RPC reply, nowhere else, this simplies
- * things substantially.
+/*
+ * The caller is responsible for calling nfs_readdir_release_array(page)
*/
static
-int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
+struct nfs_cache_array *nfs_readdir_get_array(struct page *page)
+{
+ if (page == NULL)
+ return ERR_PTR(-EIO);
+ return (struct nfs_cache_array *)kmap(page);
+}
+
+static
+void nfs_readdir_release_array(struct page *page)
+{
+ kunmap(page);
+}
+
+/*
+ * we are freeing strings created by nfs_add_to_readdir_array()
+ */
+static
+int nfs_readdir_clear_array(struct page *page, gfp_t mask)
+{
+ struct nfs_cache_array *array = nfs_readdir_get_array(page);
+ int i;
+ for (i = 0; i < array->size; i++)
+ kfree(array->array[i].string.name);
+ nfs_readdir_release_array(page);
+ return 0;
+}
+
+/*
+ * the caller is responsible for freeing qstr.name
+ * when called by nfs_readdir_add_to_array, the strings will be freed in
+ * nfs_clear_readdir_array()
+ */
+static
+int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int len)
+{
+ string->len = len;
+ string->name = kmemdup(name, len, GFP_KERNEL);
+ if (string->name == NULL)
+ return -ENOMEM;
+ string->hash = full_name_hash(name, len);
+ return 0;
+}
+
+static
+int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
+{
+ struct nfs_cache_array *array = nfs_readdir_get_array(page);
+ struct nfs_cache_array_entry *cache_entry;
+ int ret;
+
+ if (IS_ERR(array))
+ return PTR_ERR(array);
+ ret = -EIO;
+ if (array->size >= MAX_READDIR_ARRAY)
+ goto out;
+
+ cache_entry = &array->array[array->size];
+ cache_entry->cookie = entry->prev_cookie;
+ cache_entry->ino = entry->ino;
+ ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
+ if (ret)
+ goto out;
+ array->last_cookie = entry->cookie;
+ if (entry->eof == 1)
+ array->eof_index = array->size;
+ array->size++;
+out:
+ nfs_readdir_release_array(page);
+ return ret;
+}
+
+static
+int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
+{
+ loff_t diff = desc->file->f_pos - desc->current_index;
+ unsigned int index;
+
+ if (diff < 0)
+ goto out_eof;
+ if (diff >= array->size) {
+ if (array->eof_index > 0)
+ goto out_eof;
+ desc->current_index += array->size;
+ return -EAGAIN;
+ }
+
+ index = (unsigned int)diff;
+ *desc->dir_cookie = array->array[index].cookie;
+ desc->cache_entry_index = index;
+ if (index == array->eof_index)
+ desc->eof = 1;
+ return 0;
+out_eof:
+ desc->eof = 1;
+ return -EBADCOOKIE;
+}
+
+static
+int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
+{
+ int i;
+ int status = -EAGAIN;
+
+ for (i = 0; i < array->size; i++) {
+ if (i == array->eof_index) {
+ desc->eof = 1;
+ status = -EBADCOOKIE;
+ }
+ if (array->array[i].cookie == *desc->dir_cookie) {
+ desc->cache_entry_index = i;
+ status = 0;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static
+int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
+{
+ struct nfs_cache_array *array;
+ int status = -EBADCOOKIE;
+
+ if (desc->dir_cookie == NULL)
+ goto out;
+
+ array = nfs_readdir_get_array(desc->page);
+ if (IS_ERR(array)) {
+ status = PTR_ERR(array);
+ goto out;
+ }
+
+ if (*desc->dir_cookie == 0)
+ status = nfs_readdir_search_for_pos(array, desc);
+ else
+ status = nfs_readdir_search_for_cookie(array, desc);
+
+ nfs_readdir_release_array(desc->page);
+out:
+ return status;
+}
+
+/* Fill a page with xdr information before transferring to the cache page */
+static
+int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
+ struct nfs_entry *entry, struct file *file, struct inode *inode)
{
- struct file *file = desc->file;
- struct inode *inode = file->f_path.dentry->d_inode;
struct rpc_cred *cred = nfs_file_cred(file);
unsigned long timestamp, gencount;
int error;
- dfprintk(DIRCACHE, "NFS: %s: reading cookie %Lu into page %lu\n",
- __func__, (long long)desc->entry->cookie,
- page->index);
-
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
- error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, desc->entry->cookie, page,
+ error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
/* We requested READDIRPLUS, but the server doesn't grok it */
@@ -208,190 +361,292 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
}
desc->timestamp = timestamp;
desc->gencount = gencount;
- desc->timestamp_valid = 1;
- SetPageUptodate(page);
- /* Ensure consistent page alignment of the data.
- * Note: assumes we have exclusive access to this mapping either
- * through inode->i_mutex or some other mechanism.
- */
- if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
- /* Should never happen */
- nfs_zap_mapping(inode, inode->i_mapping);
- }
- unlock_page(page);
- return 0;
- error:
- unlock_page(page);
- return -EIO;
+error:
+ return error;
}
-static inline
-int dir_decode(nfs_readdir_descriptor_t *desc)
+/* Fill in an entry based on the xdr code stored in desc->page */
+static
+int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
{
- __be32 *p = desc->ptr;
- p = desc->decode(p, desc->entry, desc->plus);
+ __be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus);
if (IS_ERR(p))
return PTR_ERR(p);
- desc->ptr = p;
- if (desc->timestamp_valid) {
- desc->entry->fattr->time_start = desc->timestamp;
- desc->entry->fattr->gencount = desc->gencount;
- } else
- desc->entry->fattr->valid &= ~NFS_ATTR_FATTR;
+
+ entry->fattr->time_start = desc->timestamp;
+ entry->fattr->gencount = desc->gencount;
return 0;
}
-static inline
-void dir_page_release(nfs_readdir_descriptor_t *desc)
+static
+int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
{
- kunmap(desc->page);
- page_cache_release(desc->page);
- desc->page = NULL;
- desc->ptr = NULL;
+ struct nfs_inode *node;
+ if (dentry->d_inode == NULL)
+ goto different;
+ node = NFS_I(dentry->d_inode);
+ if (node->fh.size != entry->fh->size)
+ goto different;
+ if (strncmp(node->fh.data, entry->fh->data, node->fh.size) != 0)
+ goto different;
+ return 1;
+different:
+ return 0;
}
-/*
- * Given a pointer to a buffer that has already been filled by a call
- * to readdir, find the next entry with cookie '*desc->dir_cookie'.
- *
- * If the end of the buffer has been reached, return -EAGAIN, if not,
- * return the offset within the buffer of the next entry to be
- * read.
- */
-static inline
-int find_dirent(nfs_readdir_descriptor_t *desc)
+static
+void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
- struct nfs_entry *entry = desc->entry;
- int loop_count = 0,
- status;
+ struct qstr filename = {
+ .len = entry->len,
+ .name = entry->name,
+ };
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
- while((status = dir_decode(desc)) == 0) {
- dfprintk(DIRCACHE, "NFS: %s: examining cookie %Lu\n",
- __func__, (unsigned long long)entry->cookie);
- if (entry->prev_cookie == *desc->dir_cookie)
- break;
- if (loop_count++ > 200) {
- loop_count = 0;
- schedule();
+ if (filename.name[0] == '.') {
+ if (filename.len == 1)
+ return;
+ if (filename.len == 2 && filename.name[1] == '.')
+ return;
+ }
+ filename.hash = full_name_hash(filename.name, filename.len);
+
+ dentry = d_lookup(parent, &filename);
+ if (dentry != NULL) {
+ if (nfs_same_file(dentry, entry)) {
+ nfs_refresh_inode(dentry->d_inode, entry->fattr);
+ goto out;
+ } else {
+ d_drop(dentry);
+ dput(dentry);
}
}
- return status;
+
+ dentry = d_alloc(parent, &filename);
+ if (dentry == NULL)
+ return;
+
+ dentry->d_op = NFS_PROTO(dir)->dentry_ops;
+ inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
+ if (IS_ERR(inode))
+ goto out;
+
+ alias = d_materialise_unique(dentry, inode);
+ if (IS_ERR(alias))
+ goto out;
+ else if (alias) {
+ nfs_set_verifier(alias, nfs_save_change_attribute(dir));
+ dput(alias);
+ } else
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+out:
+ dput(dentry);
+}
+
+/* Perform conversion from xdr to cache array */
+static
+void nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
+ void *xdr_page, struct page *page, unsigned int buflen)
+{
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ __be32 *ptr = xdr_page;
+ int status;
+ struct nfs_cache_array *array;
+
+ buf.head->iov_base = xdr_page;
+ buf.head->iov_len = buflen;
+ buf.tail->iov_len = 0;
+ buf.page_base = 0;
+ buf.page_len = 0;
+ buf.buflen = buf.head->iov_len;
+ buf.len = buf.head->iov_len;
+
+ xdr_init_decode(&stream, &buf, ptr);
+
+
+ do {
+ status = xdr_decode(desc, entry, &stream);
+ if (status != 0)
+ break;
+
+ if (nfs_readdir_add_to_array(entry, page) == -1)
+ break;
+ if (desc->plus == 1)
+ nfs_prime_dcache(desc->file->f_path.dentry, entry);
+ } while (!entry->eof);
+
+ if (status == -EBADCOOKIE && entry->eof) {
+ array = nfs_readdir_get_array(page);
+ array->eof_index = array->size - 1;
+ status = 0;
+ nfs_readdir_release_array(page);
+ }
+}
+
+static
+void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages)
+{
+ unsigned int i;
+ for (i = 0; i < npages; i++)
+ put_page(pages[i]);
+}
+
+static
+void nfs_readdir_free_large_page(void *ptr, struct page **pages,
+ unsigned int npages)
+{
+ vm_unmap_ram(ptr, npages);
+ nfs_readdir_free_pagearray(pages, npages);
}
/*
- * Given a pointer to a buffer that has already been filled by a call
- * to readdir, find the entry at offset 'desc->file->f_pos'.
- *
- * If the end of the buffer has been reached, return -EAGAIN, if not,
- * return the offset within the buffer of the next entry to be
- * read.
+ * nfs_readdir_large_page will allocate pages that must be freed with a call
+ * to nfs_readdir_free_large_page
*/
-static inline
-int find_dirent_index(nfs_readdir_descriptor_t *desc)
+static
+void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
{
- struct nfs_entry *entry = desc->entry;
- int loop_count = 0,
- status;
+ void *ptr;
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ struct page *page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ goto out_freepages;
+ pages[i] = page;
+ }
- for(;;) {
- status = dir_decode(desc);
- if (status)
- break;
+ ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
+ if (!IS_ERR_OR_NULL(ptr))
+ return ptr;
+out_freepages:
+ nfs_readdir_free_pagearray(pages, i);
+ return NULL;
+}
+
+static
+int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
+{
+ struct page *pages[NFS_MAX_READDIR_PAGES];
+ void *pages_ptr = NULL;
+ struct nfs_entry entry;
+ struct file *file = desc->file;
+ struct nfs_cache_array *array;
+ int status = 0;
+ unsigned int array_size = ARRAY_SIZE(pages);
+
+ entry.prev_cookie = 0;
+ entry.cookie = *desc->dir_cookie;
+ entry.eof = 0;
+ entry.fh = nfs_alloc_fhandle();
+ entry.fattr = nfs_alloc_fattr();
+ if (entry.fh == NULL || entry.fattr == NULL)
+ goto out;
- dfprintk(DIRCACHE, "NFS: found cookie %Lu at index %Ld\n",
- (unsigned long long)entry->cookie, desc->current_index);
+ array = nfs_readdir_get_array(page);
+ memset(array, 0, sizeof(struct nfs_cache_array));
+ array->eof_index = -1;
- if (desc->file->f_pos == desc->current_index) {
- *desc->dir_cookie = entry->cookie;
+ pages_ptr = nfs_readdir_large_page(pages, array_size);
+ if (!pages_ptr)
+ goto out_release_array;
+ do {
+ status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode);
+
+ if (status < 0)
break;
- }
- desc->current_index++;
- if (loop_count++ > 200) {
- loop_count = 0;
- schedule();
- }
- }
+ nfs_readdir_page_filler(desc, &entry, pages_ptr, page, array_size * PAGE_SIZE);
+ } while (array->eof_index < 0 && array->size < MAX_READDIR_ARRAY);
+
+ nfs_readdir_free_large_page(pages_ptr, pages, array_size);
+out_release_array:
+ nfs_readdir_release_array(page);
+out:
+ nfs_free_fattr(entry.fattr);
+ nfs_free_fhandle(entry.fh);
return status;
}
/*
- * Find the given page, and call find_dirent() or find_dirent_index in
- * order to try to return the next entry.
+ * Now we cache directories properly, by converting xdr information
+ * to an array that can be used for lookups later. This results in
+ * fewer cache pages, since we can store more information on each page.
+ * We only need to convert from xdr once so future lookups are much simpler
*/
-static inline
-int find_dirent_page(nfs_readdir_descriptor_t *desc)
+static
+int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
{
struct inode *inode = desc->file->f_path.dentry->d_inode;
- struct page *page;
- int status;
- dfprintk(DIRCACHE, "NFS: %s: searching page %ld for target %Lu\n",
- __func__, desc->page_index,
- (long long) *desc->dir_cookie);
+ if (nfs_readdir_xdr_to_array(desc, page, inode) < 0)
+ goto error;
+ SetPageUptodate(page);
- /* If we find the page in the page_cache, we cannot be sure
- * how fresh the data is, so we will ignore readdir_plus attributes.
- */
- desc->timestamp_valid = 0;
- page = read_cache_page(inode->i_mapping, desc->page_index,
- (filler_t *)nfs_readdir_filler, desc);
- if (IS_ERR(page)) {
- status = PTR_ERR(page);
- goto out;
+ if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
+ /* Should never happen */
+ nfs_zap_mapping(inode, inode->i_mapping);
}
+ unlock_page(page);
+ return 0;
+ error:
+ unlock_page(page);
+ return -EIO;
+}
- /* NOTE: Someone else may have changed the READDIRPLUS flag */
- desc->page = page;
- desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
- if (*desc->dir_cookie != 0)
- status = find_dirent(desc);
- else
- status = find_dirent_index(desc);
- if (status < 0)
- dir_page_release(desc);
- out:
- dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status);
- return status;
+static
+void cache_page_release(nfs_readdir_descriptor_t *desc)
+{
+ page_cache_release(desc->page);
+ desc->page = NULL;
+}
+
+static
+struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
+{
+ struct page *page;
+ page = read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
+ desc->page_index, (filler_t *)nfs_readdir_filler, desc);
+ if (IS_ERR(page))
+ desc->eof = 1;
+ return page;
}
/*
- * Recurse through the page cache pages, and return a
- * filled nfs_entry structure of the next directory entry if possible.
- *
- * The target for the search is '*desc->dir_cookie' if non-0,
- * 'desc->file->f_pos' otherwise
+ * Returns 0 if desc->dir_cookie was found on page desc->page_index
*/
+static
+int find_cache_page(nfs_readdir_descriptor_t *desc)
+{
+ int res;
+
+ desc->page = get_cache_page(desc);
+ if (IS_ERR(desc->page))
+ return PTR_ERR(desc->page);
+
+ res = nfs_readdir_search_array(desc);
+ if (res == 0)
+ return 0;
+ cache_page_release(desc);
+ return res;
+}
+
+/* Search for desc->dir_cookie from the beginning of the page cache */
static inline
int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
{
- int loop_count = 0;
- int res;
-
- /* Always search-by-index from the beginning of the cache */
- if (*desc->dir_cookie == 0) {
- dfprintk(DIRCACHE, "NFS: readdir_search_pagecache() searching for offset %Ld\n",
- (long long)desc->file->f_pos);
- desc->page_index = 0;
- desc->entry->cookie = desc->entry->prev_cookie = 0;
- desc->entry->eof = 0;
- desc->current_index = 0;
- } else
- dfprintk(DIRCACHE, "NFS: readdir_search_pagecache() searching for cookie %Lu\n",
- (unsigned long long)*desc->dir_cookie);
+ int res = -EAGAIN;
- for (;;) {
- res = find_dirent_page(desc);
+ while (1) {
+ res = find_cache_page(desc);
if (res != -EAGAIN)
break;
- /* Align to beginning of next page */
- desc->page_index ++;
- if (loop_count++ > 200) {
- loop_count = 0;
- schedule();
- }
+ desc->page_index++;
}
-
- dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, res);
return res;
}
@@ -400,8 +655,6 @@ static inline unsigned int dt_type(struct inode *inode)
return (inode->i_mode >> 12) & 15;
}
-static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc);
-
/*
* Once we've found the start of the dirent within a page: fill 'er up...
*/
@@ -410,49 +663,36 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
filldir_t filldir)
{
struct file *file = desc->file;
- struct nfs_entry *entry = desc->entry;
- struct dentry *dentry = NULL;
- u64 fileid;
- int loop_count = 0,
- res;
-
- dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n",
- (unsigned long long)entry->cookie);
-
- for(;;) {
- unsigned d_type = DT_UNKNOWN;
- /* Note: entry->prev_cookie contains the cookie for
- * retrieving the current dirent on the server */
- fileid = entry->ino;
-
- /* Get a dentry if we have one */
- if (dentry != NULL)
- dput(dentry);
- dentry = nfs_readdir_lookup(desc);
+ int i = 0;
+ int res = 0;
+ struct nfs_cache_array *array = NULL;
+ unsigned int d_type = DT_UNKNOWN;
+ struct dentry *dentry = NULL;
- /* Use readdirplus info */
- if (dentry != NULL && dentry->d_inode != NULL) {
- d_type = dt_type(dentry->d_inode);
- fileid = NFS_FILEID(dentry->d_inode);
- }
+ array = nfs_readdir_get_array(desc->page);
- res = filldir(dirent, entry->name, entry->len,
- file->f_pos, nfs_compat_user_ino64(fileid),
- d_type);
+ for (i = desc->cache_entry_index; i < array->size; i++) {
+ d_type = DT_UNKNOWN;
+
+ res = filldir(dirent, array->array[i].string.name,
+ array->array[i].string.len, file->f_pos,
+ nfs_compat_user_ino64(array->array[i].ino), d_type);
if (res < 0)
break;
file->f_pos++;
- *desc->dir_cookie = entry->cookie;
- if (dir_decode(desc) != 0) {
- desc->page_index ++;
+ desc->cache_entry_index = i;
+ if (i < (array->size-1))
+ *desc->dir_cookie = array->array[i+1].cookie;
+ else
+ *desc->dir_cookie = array->last_cookie;
+ if (i == array->eof_index) {
+ desc->eof = 1;
break;
}
- if (loop_count++ > 200) {
- loop_count = 0;
- schedule();
- }
}
- dir_page_release(desc);
+
+ nfs_readdir_release_array(desc->page);
+ cache_page_release(desc);
if (dentry != NULL)
dput(dentry);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
@@ -476,12 +716,9 @@ static inline
int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
filldir_t filldir)
{
- struct file *file = desc->file;
- struct inode *inode = file->f_path.dentry->d_inode;
- struct rpc_cred *cred = nfs_file_cred(file);
struct page *page = NULL;
int status;
- unsigned long timestamp, gencount;
+ struct inode *inode = desc->file->f_path.dentry->d_inode;
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
(unsigned long long)*desc->dir_cookie);
@@ -491,38 +728,22 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
status = -ENOMEM;
goto out;
}
- timestamp = jiffies;
- gencount = nfs_inc_attr_generation_counter();
- status = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred,
- *desc->dir_cookie, page,
- NFS_SERVER(inode)->dtsize,
- desc->plus);
- desc->page = page;
- desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
- if (status >= 0) {
- desc->timestamp = timestamp;
- desc->gencount = gencount;
- desc->timestamp_valid = 1;
- if ((status = dir_decode(desc)) == 0)
- desc->entry->prev_cookie = *desc->dir_cookie;
- } else
+
+ if (nfs_readdir_xdr_to_array(desc, page, inode) == -1) {
status = -EIO;
- if (status < 0)
goto out_release;
+ }
+ desc->page_index = 0;
+ desc->page = page;
status = nfs_do_filldir(desc, dirent, filldir);
- /* Reset read descriptor so it searches the page cache from
- * the start upon the next call to readdir_search_pagecache() */
- desc->page_index = 0;
- desc->entry->cookie = desc->entry->prev_cookie = 0;
- desc->entry->eof = 0;
out:
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
__func__, status);
return status;
out_release:
- dir_page_release(desc);
+ cache_page_release(desc);
goto out;
}
@@ -536,7 +757,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *inode = dentry->d_inode;
nfs_readdir_descriptor_t my_desc,
*desc = &my_desc;
- struct nfs_entry my_entry;
int res = -ENOMEM;
dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
@@ -557,26 +777,17 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
desc->decode = NFS_PROTO(inode)->decode_dirent;
desc->plus = NFS_USE_READDIRPLUS(inode);
- my_entry.cookie = my_entry.prev_cookie = 0;
- my_entry.eof = 0;
- my_entry.fh = nfs_alloc_fhandle();
- my_entry.fattr = nfs_alloc_fattr();
- if (my_entry.fh == NULL || my_entry.fattr == NULL)
- goto out_alloc_failed;
-
- desc->entry = &my_entry;
-
nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping(inode, filp->f_mapping);
if (res < 0)
goto out;
- while(!desc->entry->eof) {
+ while (desc->eof != 1) {
res = readdir_search_pagecache(desc);
if (res == -EBADCOOKIE) {
/* This means either end of directory */
- if (*desc->dir_cookie && desc->entry->cookie != *desc->dir_cookie) {
+ if (*desc->dir_cookie && desc->eof == 0) {
/* Or that the server has 'lost' a cookie */
res = uncached_readdir(desc, dirent, filldir);
if (res >= 0)
@@ -588,8 +799,9 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (res == -ETOOSMALL && desc->plus) {
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
nfs_zap_caches(inode);
+ desc->page_index = 0;
desc->plus = 0;
- desc->entry->eof = 0;
+ desc->eof = 0;
continue;
}
if (res < 0)
@@ -605,9 +817,6 @@ out:
nfs_unblock_sillyrename(dentry);
if (res > 0)
res = 0;
-out_alloc_failed:
- nfs_free_fattr(my_entry.fattr);
- nfs_free_fhandle(my_entry.fh);
dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
res);
@@ -1029,10 +1238,63 @@ static int is_atomic_open(struct nameidata *nd)
return 1;
}
+static struct nfs_open_context *nameidata_to_nfs_open_context(struct dentry *dentry, struct nameidata *nd)
+{
+ struct path path = {
+ .mnt = nd->path.mnt,
+ .dentry = dentry,
+ };
+ struct nfs_open_context *ctx;
+ struct rpc_cred *cred;
+ fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+ cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return ERR_CAST(cred);
+ ctx = alloc_nfs_open_context(&path, cred, fmode);
+ put_rpccred(cred);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+ return ctx;
+}
+
+static int do_open(struct inode *inode, struct file *filp)
+{
+ nfs_fscache_set_inode_cookie(inode, filp);
+ return 0;
+}
+
+static int nfs_intent_set_file(struct nameidata *nd, struct nfs_open_context *ctx)
+{
+ struct file *filp;
+ int ret = 0;
+
+ /* If the open_intent is for execute, we have an extra check to make */
+ if (ctx->mode & FMODE_EXEC) {
+ ret = nfs_may_open(ctx->path.dentry->d_inode,
+ ctx->cred,
+ nd->intent.open.flags);
+ if (ret < 0)
+ goto out;
+ }
+ filp = lookup_instantiate_filp(nd, ctx->path.dentry, do_open);
+ if (IS_ERR(filp))
+ ret = PTR_ERR(filp);
+ else
+ nfs_file_set_open_context(filp, ctx);
+out:
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
+ struct nfs_open_context *ctx;
+ struct iattr attr;
struct dentry *res = NULL;
- int error;
+ struct inode *inode;
+ int open_flags;
+ int err;
dfprintk(VFS, "NFS: atomic_lookup(%s/%ld), %s\n",
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
@@ -1054,13 +1316,32 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
goto out;
}
+ ctx = nameidata_to_nfs_open_context(dentry, nd);
+ res = ERR_CAST(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
+ open_flags = nd->intent.open.flags;
+ if (nd->flags & LOOKUP_CREATE) {
+ attr.ia_mode = nd->intent.open.create_mode;
+ attr.ia_valid = ATTR_MODE;
+ if (!IS_POSIXACL(dir))
+ attr.ia_mode &= ~current_umask();
+ } else {
+ open_flags &= ~(O_EXCL | O_CREAT);
+ attr.ia_valid = 0;
+ }
+
/* Open the file on the server */
- res = nfs4_atomic_open(dir, dentry, nd);
- if (IS_ERR(res)) {
- error = PTR_ERR(res);
- switch (error) {
+ nfs_block_sillyrename(dentry->d_parent);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+ if (IS_ERR(inode)) {
+ nfs_unblock_sillyrename(dentry->d_parent);
+ put_nfs_open_context(ctx);
+ switch (PTR_ERR(inode)) {
/* Make a negative dentry */
case -ENOENT:
+ d_add(dentry, NULL);
res = NULL;
goto out;
/* This turned out not to be a regular file */
@@ -1072,11 +1353,25 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
goto no_open;
/* case -EINVAL: */
default:
+ res = ERR_CAST(inode);
goto out;
}
- } else if (res != NULL)
+ }
+ res = d_add_unique(dentry, inode);
+ nfs_unblock_sillyrename(dentry->d_parent);
+ if (res != NULL) {
+ dput(ctx->path.dentry);
+ ctx->path.dentry = dget(res);
dentry = res;
+ }
+ err = nfs_intent_set_file(nd, ctx);
+ if (err < 0) {
+ if (res != NULL)
+ dput(res);
+ return ERR_PTR(err);
+ }
out:
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
return res;
no_open:
return nfs_lookup(dir, dentry, nd);
@@ -1087,12 +1382,15 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
struct dentry *parent = NULL;
struct inode *inode = dentry->d_inode;
struct inode *dir;
+ struct nfs_open_context *ctx;
int openflags, ret = 0;
if (!is_atomic_open(nd) || d_mountpoint(dentry))
goto no_open;
+
parent = dget_parent(dentry);
dir = parent->d_inode;
+
/* We can't create new files in nfs_open_revalidate(), so we
* optimize away revalidation of negative dentries.
*/
@@ -1112,99 +1410,96 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
/* We can't create new files, or truncate existing ones here */
openflags &= ~(O_CREAT|O_EXCL|O_TRUNC);
+ ctx = nameidata_to_nfs_open_context(dentry, nd);
+ ret = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out;
/*
* Note: we're not holding inode->i_mutex and so may be racing with
* operations that change the directory. We therefore save the
* change attribute *before* we do the RPC call.
*/
- ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, NULL);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ switch (ret) {
+ case -EPERM:
+ case -EACCES:
+ case -EDQUOT:
+ case -ENOSPC:
+ case -EROFS:
+ goto out_put_ctx;
+ default:
+ goto out_drop;
+ }
+ }
+ iput(inode);
+ if (inode != dentry->d_inode)
+ goto out_drop;
+
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ ret = nfs_intent_set_file(nd, ctx);
+ if (ret >= 0)
+ ret = 1;
out:
dput(parent);
- if (!ret)
- d_drop(dentry);
return ret;
+out_drop:
+ d_drop(dentry);
+ ret = 0;
+out_put_ctx:
+ put_nfs_open_context(ctx);
+ goto out;
+
no_open_dput:
dput(parent);
no_open:
return nfs_lookup_revalidate(dentry, nd);
}
-#endif /* CONFIG_NFSV4 */
-static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
+static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
{
- struct dentry *parent = desc->file->f_path.dentry;
- struct inode *dir = parent->d_inode;
- struct nfs_entry *entry = desc->entry;
- struct dentry *dentry, *alias;
- struct qstr name = {
- .name = entry->name,
- .len = entry->len,
- };
- struct inode *inode;
- unsigned long verf = nfs_save_change_attribute(dir);
+ struct nfs_open_context *ctx = NULL;
+ struct iattr attr;
+ int error;
+ int open_flags = 0;
- switch (name.len) {
- case 2:
- if (name.name[0] == '.' && name.name[1] == '.')
- return dget_parent(parent);
- break;
- case 1:
- if (name.name[0] == '.')
- return dget(parent);
- }
+ dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
- spin_lock(&dir->i_lock);
- if (NFS_I(dir)->cache_validity & NFS_INO_INVALID_DATA) {
- spin_unlock(&dir->i_lock);
- return NULL;
- }
- spin_unlock(&dir->i_lock);
+ attr.ia_mode = mode;
+ attr.ia_valid = ATTR_MODE;
- name.hash = full_name_hash(name.name, name.len);
- dentry = d_lookup(parent, &name);
- if (dentry != NULL) {
- /* Is this a positive dentry that matches the readdir info? */
- if (dentry->d_inode != NULL &&
- (NFS_FILEID(dentry->d_inode) == entry->ino ||
- d_mountpoint(dentry))) {
- if (!desc->plus || entry->fh->size == 0)
- return dentry;
- if (nfs_compare_fh(NFS_FH(dentry->d_inode),
- entry->fh) == 0)
- goto out_renew;
- }
- /* No, so d_drop to allow one to be created */
- d_drop(dentry);
- dput(dentry);
- }
- if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
- return NULL;
- if (name.len > NFS_SERVER(dir)->namelen)
- return NULL;
- /* Note: caller is already holding the dir->i_mutex! */
- dentry = d_alloc(parent, &name);
- if (dentry == NULL)
- return NULL;
- dentry->d_op = NFS_PROTO(dir)->dentry_ops;
- inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
- if (IS_ERR(inode)) {
- dput(dentry);
- return NULL;
- }
+ if ((nd->flags & LOOKUP_CREATE) != 0) {
+ open_flags = nd->intent.open.flags;
- alias = d_materialise_unique(dentry, inode);
- if (alias != NULL) {
- dput(dentry);
- if (IS_ERR(alias))
- return NULL;
- dentry = alias;
+ ctx = nameidata_to_nfs_open_context(dentry, nd);
+ error = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out_err_drop;
}
-out_renew:
- nfs_set_verifier(dentry, verf);
- return dentry;
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx);
+ if (error != 0)
+ goto out_put_ctx;
+ if (ctx != NULL) {
+ error = nfs_intent_set_file(nd, ctx);
+ if (error < 0)
+ goto out_err;
+ }
+ return 0;
+out_put_ctx:
+ if (ctx != NULL)
+ put_nfs_open_context(ctx);
+out_err_drop:
+ d_drop(dentry);
+out_err:
+ return error;
}
+#endif /* CONFIG_NFSV4 */
+
/*
* Code common to create, mkdir, and mknod.
*/
@@ -1258,7 +1553,6 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
{
struct iattr attr;
int error;
- int open_flags = 0;
dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
@@ -1266,10 +1560,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
attr.ia_mode = mode;
attr.ia_valid = ATTR_MODE;
- if ((nd->flags & LOOKUP_CREATE) != 0)
- open_flags = nd->intent.open.flags;
-
- error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd);
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL);
if (error != 0)
goto out_err;
return 0;
@@ -1351,76 +1642,6 @@ static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
return error;
}
-static int nfs_sillyrename(struct inode *dir, struct dentry *dentry)
-{
- static unsigned int sillycounter;
- const int fileidsize = sizeof(NFS_FILEID(dentry->d_inode))*2;
- const int countersize = sizeof(sillycounter)*2;
- const int slen = sizeof(".nfs")+fileidsize+countersize-1;
- char silly[slen+1];
- struct qstr qsilly;
- struct dentry *sdentry;
- int error = -EIO;
-
- dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
- dentry->d_parent->d_name.name, dentry->d_name.name,
- atomic_read(&dentry->d_count));
- nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
-
- /*
- * We don't allow a dentry to be silly-renamed twice.
- */
- error = -EBUSY;
- if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
- goto out;
-
- sprintf(silly, ".nfs%*.*Lx",
- fileidsize, fileidsize,
- (unsigned long long)NFS_FILEID(dentry->d_inode));
-
- /* Return delegation in anticipation of the rename */
- nfs_inode_return_delegation(dentry->d_inode);
-
- sdentry = NULL;
- do {
- char *suffix = silly + slen - countersize;
-
- dput(sdentry);
- sillycounter++;
- sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
-
- dfprintk(VFS, "NFS: trying to rename %s to %s\n",
- dentry->d_name.name, silly);
-
- sdentry = lookup_one_len(silly, dentry->d_parent, slen);
- /*
- * N.B. Better to return EBUSY here ... it could be
- * dangerous to delete the file while it's in use.
- */
- if (IS_ERR(sdentry))
- goto out;
- } while(sdentry->d_inode != NULL); /* need negative lookup */
-
- qsilly.name = silly;
- qsilly.len = strlen(silly);
- if (dentry->d_inode) {
- error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
- dir, &qsilly);
- nfs_mark_for_revalidate(dentry->d_inode);
- } else
- error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
- dir, &qsilly);
- if (!error) {
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- d_move(dentry, sdentry);
- error = nfs_async_unlink(dir, dentry);
- /* If we return 0 we don't unlink */
- }
- dput(sdentry);
-out:
- return error;
-}
-
/*
* Remove a file after making sure there are no pending writes,
* and after checking that the file has only one user.
@@ -1711,14 +1932,14 @@ static void nfs_access_free_list(struct list_head *head)
int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(head);
- struct nfs_inode *nfsi;
+ struct nfs_inode *nfsi, *next;
struct nfs_access_entry *cache;
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
return (nr_to_scan == 0) ? 0 : -1;
spin_lock(&nfs_access_lru_lock);
- list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
+ list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
struct inode *inode;
if (nr_to_scan-- == 0)
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index dba50a5625d..a6e711ad130 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -167,7 +167,7 @@ static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
return 0;
}
item = container_of(h, struct nfs_dns_ent, h);
- ttl = (long)item->h.expiry_time - (long)get_seconds();
+ ttl = item->h.expiry_time - seconds_since_boot();
if (ttl < 0)
ttl = 0;
@@ -239,7 +239,7 @@ static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
ttl = get_expiry(&buf);
if (ttl == 0)
goto out;
- key.h.expiry_time = ttl + get_seconds();
+ key.h.expiry_time = ttl + seconds_since_boot();
ret = -ENOMEM;
item = nfs_dns_lookup(cd, &key);
@@ -301,7 +301,7 @@ static int do_cache_lookup_nowait(struct cache_detail *cd,
goto out_err;
ret = -ETIMEDOUT;
if (!test_bit(CACHE_VALID, &(*item)->h.flags)
- || (*item)->h.expiry_time < get_seconds()
+ || (*item)->h.expiry_time < seconds_since_boot()
|| cd->flush_time > (*item)->h.last_refresh)
goto out_put;
ret = -ENOENT;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 05bf3c0dc75..e756075637b 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -36,6 +36,7 @@
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_FILE
@@ -386,6 +387,10 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file->f_path.dentry->d_name.name,
mapping->host->i_ino, len, (long long) pos);
+ pnfs_update_layout(mapping->host,
+ nfs_file_open_context(file),
+ IOMODE_RW);
+
start:
/*
* Prevent starvation issues if someone is doing a consistency
@@ -551,7 +556,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct file *filp = vma->vm_file;
struct dentry *dentry = filp->f_path.dentry;
unsigned pagelen;
- int ret = -EINVAL;
+ int ret = VM_FAULT_NOPAGE;
struct address_space *mapping;
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
@@ -567,21 +572,20 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (mapping != dentry->d_inode->i_mapping)
goto out_unlock;
- ret = 0;
pagelen = nfs_page_length(page);
if (pagelen == 0)
goto out_unlock;
- ret = nfs_flush_incompatible(filp, page);
- if (ret != 0)
- goto out_unlock;
+ ret = VM_FAULT_LOCKED;
+ if (nfs_flush_incompatible(filp, page) == 0 &&
+ nfs_updatepage(filp, page, 0, pagelen) == 0)
+ goto out;
- ret = nfs_updatepage(filp, page, 0, pagelen);
+ ret = VM_FAULT_SIGBUS;
out_unlock:
- if (!ret)
- return VM_FAULT_LOCKED;
unlock_page(page);
- return VM_FAULT_SIGBUS;
+out:
+ return ret;
}
static const struct vm_operations_struct nfs_file_vm_ops = {
@@ -684,7 +688,8 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
return ret;
}
-static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
+static int
+do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
@@ -699,7 +704,7 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
if (nfs_have_delegation(inode, FMODE_READ))
goto out_noconflict;
- if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)
+ if (is_local)
goto out_noconflict;
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
@@ -726,7 +731,8 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
return res;
}
-static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
+static int
+do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
struct inode *inode = filp->f_mapping->host;
int status;
@@ -741,15 +747,24 @@ static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
* If we're signalled while cleaning up locks on process exit, we
* still need to complete the unlock.
*/
- /* Use local locking if mounted with "-onolock" */
- if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
+ /*
+ * Use local locking if mounted with "-onolock" or with appropriate
+ * "-olocal_lock="
+ */
+ if (!is_local)
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
return status;
}
-static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
+static int
+is_time_granular(struct timespec *ts) {
+ return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000));
+}
+
+static int
+do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
struct inode *inode = filp->f_mapping->host;
int status;
@@ -762,20 +777,31 @@ static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
if (status != 0)
goto out;
- /* Use local locking if mounted with "-onolock" */
- if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
+ /*
+ * Use local locking if mounted with "-onolock" or with appropriate
+ * "-olocal_lock="
+ */
+ if (!is_local)
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
if (status < 0)
goto out;
+
/*
- * Make sure we clear the cache whenever we try to get the lock.
+ * Revalidate the cache if the server has time stamps granular
+ * enough to detect subsecond changes. Otherwise, clear the
+ * cache to prevent missing any changes.
+ *
* This makes locking act as a cache coherency point.
*/
nfs_sync_mapping(filp->f_mapping);
- if (!nfs_have_delegation(inode, FMODE_READ))
- nfs_zap_caches(inode);
+ if (!nfs_have_delegation(inode, FMODE_READ)) {
+ if (is_time_granular(&NFS_SERVER(inode)->time_delta))
+ __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ else
+ nfs_zap_caches(inode);
+ }
out:
return status;
}
@@ -787,6 +813,7 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int ret = -ENOLCK;
+ int is_local = 0;
dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
filp->f_path.dentry->d_parent->d_name.name,
@@ -800,6 +827,9 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
goto out_err;
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
+ is_local = 1;
+
if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
ret = NFS_PROTO(inode)->lock_check_bounds(fl);
if (ret < 0)
@@ -807,11 +837,11 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
}
if (IS_GETLK(cmd))
- ret = do_getlk(filp, cmd, fl);
+ ret = do_getlk(filp, cmd, fl, is_local);
else if (fl->fl_type == F_UNLCK)
- ret = do_unlk(filp, cmd, fl);
+ ret = do_unlk(filp, cmd, fl, is_local);
else
- ret = do_setlk(filp, cmd, fl);
+ ret = do_setlk(filp, cmd, fl, is_local);
out_err:
return ret;
}
@@ -821,6 +851,9 @@ out_err:
*/
static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
{
+ struct inode *inode = filp->f_mapping->host;
+ int is_local = 0;
+
dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
filp->f_path.dentry->d_parent->d_name.name,
filp->f_path.dentry->d_name.name,
@@ -829,14 +862,17 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
+ is_local = 1;
+
/* We're simulating flock() locks using posix locks on the server */
fl->fl_owner = (fl_owner_t)filp;
fl->fl_start = 0;
fl->fl_end = OFFSET_MAX;
if (fl->fl_type == F_UNLCK)
- return do_unlk(filp, cmd, fl);
- return do_setlk(filp, cmd, fl);
+ return do_unlk(filp, cmd, fl, is_local);
+ return do_setlk(filp, cmd, fl, is_local);
}
/*
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 21a84d45916..dec47ed8b6b 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -34,6 +34,212 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifdef CONFIG_NFS_USE_NEW_IDMAPPER
+
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/nfs_idmap.h>
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
+#include <linux/rcupdate.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include <keys/user-type.h>
+
+#define NFS_UINT_MAXLEN 11
+
+const struct cred *id_resolver_cache;
+
+struct key_type key_type_id_resolver = {
+ .name = "id_resolver",
+ .instantiate = user_instantiate,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+};
+
+int nfs_idmap_init(void)
+{
+ struct cred *cred;
+ struct key *keyring;
+ int ret = 0;
+
+ printk(KERN_NOTICE "Registering the %s key type\n", key_type_id_resolver.name);
+
+ cred = prepare_kernel_cred(NULL);
+ if (!cred)
+ return -ENOMEM;
+
+ keyring = key_alloc(&key_type_keyring, ".id_resolver", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto failed_put_cred;
+ }
+
+ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
+ if (ret < 0)
+ goto failed_put_key;
+
+ ret = register_key_type(&key_type_id_resolver);
+ if (ret < 0)
+ goto failed_put_key;
+
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ id_resolver_cache = cred;
+ return 0;
+
+failed_put_key:
+ key_put(keyring);
+failed_put_cred:
+ put_cred(cred);
+ return ret;
+}
+
+void nfs_idmap_quit(void)
+{
+ key_revoke(id_resolver_cache->thread_keyring);
+ unregister_key_type(&key_type_id_resolver);
+ put_cred(id_resolver_cache);
+}
+
+/*
+ * Assemble the description to pass to request_key()
+ * This function will allocate a new string and update dest to point
+ * at it. The caller is responsible for freeing dest.
+ *
+ * On error 0 is returned. Otherwise, the length of dest is returned.
+ */
+static ssize_t nfs_idmap_get_desc(const char *name, size_t namelen,
+ const char *type, size_t typelen, char **desc)
+{
+ char *cp;
+ size_t desclen = typelen + namelen + 2;
+
+ *desc = kmalloc(desclen, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ cp = *desc;
+ memcpy(cp, type, typelen);
+ cp += typelen;
+ *cp++ = ':';
+
+ memcpy(cp, name, namelen);
+ cp += namelen;
+ *cp = '\0';
+ return desclen;
+}
+
+static ssize_t nfs_idmap_request_key(const char *name, size_t namelen,
+ const char *type, void *data, size_t data_size)
+{
+ const struct cred *saved_cred;
+ struct key *rkey;
+ char *desc;
+ struct user_key_payload *payload;
+ ssize_t ret;
+
+ ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
+ if (ret <= 0)
+ goto out;
+
+ saved_cred = override_creds(id_resolver_cache);
+ rkey = request_key(&key_type_id_resolver, desc, "");
+ revert_creds(saved_cred);
+ kfree(desc);
+ if (IS_ERR(rkey)) {
+ ret = PTR_ERR(rkey);
+ goto out;
+ }
+
+ rcu_read_lock();
+ rkey->perm |= KEY_USR_VIEW;
+
+ ret = key_validate(rkey);
+ if (ret < 0)
+ goto out_up;
+
+ payload = rcu_dereference(rkey->payload.data);
+ if (IS_ERR_OR_NULL(payload)) {
+ ret = PTR_ERR(payload);
+ goto out_up;
+ }
+
+ ret = payload->datalen;
+ if (ret > 0 && ret <= data_size)
+ memcpy(data, payload->data, ret);
+ else
+ ret = -EINVAL;
+
+out_up:
+ rcu_read_unlock();
+ key_put(rkey);
+out:
+ return ret;
+}
+
+
+/* ID -> Name */
+static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf, size_t buflen)
+{
+ char id_str[NFS_UINT_MAXLEN];
+ int id_len;
+ ssize_t ret;
+
+ id_len = snprintf(id_str, sizeof(id_str), "%u", id);
+ ret = nfs_idmap_request_key(id_str, id_len, type, buf, buflen);
+ if (ret < 0)
+ return -EINVAL;
+ return ret;
+}
+
+/* Name -> ID */
+static int nfs_idmap_lookup_id(const char *name, size_t namelen,
+ const char *type, __u32 *id)
+{
+ char id_str[NFS_UINT_MAXLEN];
+ long id_long;
+ ssize_t data_size;
+ int ret = 0;
+
+ data_size = nfs_idmap_request_key(name, namelen, type, id_str, NFS_UINT_MAXLEN);
+ if (data_size <= 0) {
+ ret = -EINVAL;
+ } else {
+ ret = strict_strtol(id_str, 10, &id_long);
+ *id = (__u32)id_long;
+ }
+ return ret;
+}
+
+int nfs_map_name_to_uid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
+{
+ return nfs_idmap_lookup_id(name, namelen, "uid", uid);
+}
+
+int nfs_map_group_to_gid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *gid)
+{
+ return nfs_idmap_lookup_id(name, namelen, "gid", gid);
+}
+
+int nfs_map_uid_to_name(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
+{
+ return nfs_idmap_lookup_name(uid, "user", buf, buflen);
+}
+int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t buflen)
+{
+ return nfs_idmap_lookup_name(gid, "group", buf, buflen);
+}
+
+#else /* CONFIG_NFS_USE_IDMAPPER not defined */
+
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
@@ -503,16 +709,17 @@ int nfs_map_group_to_gid(struct nfs_client *clp, const char *name, size_t namele
return nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, uid);
}
-int nfs_map_uid_to_name(struct nfs_client *clp, __u32 uid, char *buf)
+int nfs_map_uid_to_name(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
{
struct idmap *idmap = clp->cl_idmap;
return nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf);
}
-int nfs_map_gid_to_group(struct nfs_client *clp, __u32 uid, char *buf)
+int nfs_map_gid_to_group(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
{
struct idmap *idmap = clp->cl_idmap;
return nfs_idmap_name(idmap, &idmap->idmap_group_hash, uid, buf);
}
+#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 7d2d6c72aa7..314f5716460 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -48,6 +48,7 @@
#include "internal.h"
#include "fscache.h"
#include "dns_resolve.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -234,9 +235,6 @@ nfs_init_locked(struct inode *inode, void *opaque)
return 0;
}
-/* Don't use READDIRPLUS on directories that we believe are too large */
-#define NFS_LIMIT_READDIRPLUS (8*PAGE_SIZE)
-
/*
* This is our front-end to iget that looks up inodes by file handle
* instead of inode number.
@@ -291,8 +289,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
- if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
- && fattr->size <= NFS_LIMIT_READDIRPLUS)
+ if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
if ((fattr->valid & NFS_ATTR_FATTR_FSID)
@@ -623,7 +620,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
nfs_revalidate_inode(server, inode);
}
-static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred)
+struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode)
{
struct nfs_open_context *ctx;
@@ -633,11 +630,13 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
path_get(&ctx->path);
ctx->cred = get_rpccred(cred);
ctx->state = NULL;
+ ctx->mode = f_mode;
ctx->flags = 0;
ctx->error = 0;
ctx->dir_cookie = 0;
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
+ INIT_LIST_HEAD(&ctx->list);
}
return ctx;
}
@@ -653,11 +652,15 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
{
struct inode *inode = ctx->path.dentry->d_inode;
- if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
+ if (!list_empty(&ctx->list)) {
+ if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
+ return;
+ list_del(&ctx->list);
+ spin_unlock(&inode->i_lock);
+ } else if (!atomic_dec_and_test(&ctx->lock_context.count))
return;
- list_del(&ctx->list);
- spin_unlock(&inode->i_lock);
- NFS_PROTO(inode)->close_context(ctx, is_sync);
+ if (inode != NULL)
+ NFS_PROTO(inode)->close_context(ctx, is_sync);
if (ctx->cred != NULL)
put_rpccred(ctx->cred);
path_put(&ctx->path);
@@ -673,7 +676,7 @@ void put_nfs_open_context(struct nfs_open_context *ctx)
* Ensure that mmap has a recent RPC credential for use when writing out
* shared pages
*/
-static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct nfs_inode *nfsi = NFS_I(inode);
@@ -730,11 +733,10 @@ int nfs_open(struct inode *inode, struct file *filp)
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
- ctx = alloc_nfs_open_context(&filp->f_path, cred);
+ ctx = alloc_nfs_open_context(&filp->f_path, cred, filp->f_mode);
put_rpccred(cred);
if (ctx == NULL)
return -ENOMEM;
- ctx->mode = filp->f_mode;
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
nfs_fscache_set_inode_cookie(inode, filp);
@@ -1409,6 +1411,7 @@ void nfs4_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
+ pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
@@ -1446,6 +1449,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation = NULL;
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
+ nfsi->layout = NULL;
#endif
}
@@ -1493,7 +1497,7 @@ static int nfsiod_start(void)
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
- wq = create_singlethread_workqueue("nfsiod");
+ wq = alloc_workqueue("nfsiod", WQ_RESCUER, 0);
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
@@ -1521,6 +1525,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_idmap_init();
+ if (err < 0)
+ goto out9;
+
err = nfs_dns_resolver_init();
if (err < 0)
goto out8;
@@ -1585,6 +1593,8 @@ out6:
out7:
nfs_dns_resolver_destroy();
out8:
+ nfs_idmap_quit();
+out9:
return err;
}
@@ -1597,6 +1607,7 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_nfspagecache();
nfs_fscache_unregister();
nfs_dns_resolver_destroy();
+ nfs_idmap_quit();
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index c961bc92c10..db08ff3ff45 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -63,6 +63,12 @@ struct nfs_clone_mount {
#define NFS_UNSPEC_PORT (-1)
/*
+ * Maximum number of pages that readdir can use for creating
+ * a vmapped array of pages.
+ */
+#define NFS_MAX_READDIR_PAGES 8
+
+/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
@@ -181,15 +187,15 @@ extern void nfs_destroy_directcache(void);
/* nfs2xdr.c */
extern int nfs_stat_to_errno(int);
extern struct rpc_procinfo nfs_procedures[];
-extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
+extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
/* nfs3xdr.c */
extern struct rpc_procinfo nfs3_procedures[];
-extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
+extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
-extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
+extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
#endif
#ifdef CONFIG_NFS_V4_1
extern const u32 nfs41_maxread_overhead;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 59047f8d7d7..eceafe74f47 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -153,6 +153,7 @@ int nfs_mount(struct nfs_mount_request *info)
.rpc_resp = &result,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = info->protocol,
.address = info->sap,
.addrsize = info->salen,
@@ -224,6 +225,7 @@ void nfs_umount(const struct nfs_mount_request *info)
.to_retries = 2,
};
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = IPPROTO_UDP,
.address = info->sap,
.addrsize = info->salen,
@@ -436,7 +438,7 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
for (i = 0; i < entries; i++) {
flavors[i] = ntohl(*p++);
- dprintk("NFS:\tflavor %u: %d\n", i, flavors[i]);
+ dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]);
}
*count = i;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index db8846a0e82..e6bf45710cc 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -337,10 +337,10 @@ nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
static int
nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
{
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_fhandle(p, args->tofh);
- p = xdr_encode_array(p, args->toname, args->tolen);
+ p = xdr_encode_fhandle(p, args->old_dir);
+ p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
+ p = xdr_encode_fhandle(p, args->new_dir);
+ p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
@@ -423,9 +423,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
struct page **page;
size_t hdrlen;
unsigned int pglen, recvd;
- u32 len;
int status, nr = 0;
- __be32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
return nfs_stat_to_errno(status);
@@ -445,80 +443,59 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
- kaddr = p = kmap_atomic(*page, KM_USER0);
- end = (__be32 *)((char *)p + pglen);
- entry = p;
-
- /* Make sure the packet actually has a value_follows and EOF entry */
- if ((entry + 1) > end)
- goto short_pkt;
-
- for (; *p++; nr++) {
- if (p + 2 > end)
- goto short_pkt;
- p++; /* fileid */
- len = ntohl(*p++);
- p += XDR_QUADLEN(len) + 1; /* name plus cookie */
- if (len > NFS2_MAXNAMLEN) {
- dprintk("NFS: giant filename in readdir (len 0x%x)!\n",
- len);
- goto err_unmap;
- }
- if (p + 2 > end)
- goto short_pkt;
- entry = p;
- }
-
- /*
- * Apparently some server sends responses that are a valid size, but
- * contain no entries, and have value_follows==0 and EOF==0. For
- * those, just set the EOF marker.
- */
- if (!nr && entry[1] == 0) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
- out:
- kunmap_atomic(kaddr, KM_USER0);
return nr;
- short_pkt:
- /*
- * When we get a short packet there are 2 possibilities. We can
- * return an error, or fix up the response to look like a valid
- * response and return what we have so far. If there are no
- * entries and the packet was short, then return -EIO. If there
- * are valid entries in the response, return them and pretend that
- * the call was successful, but incomplete. The caller can retry the
- * readdir starting at the last cookie.
- */
- entry[0] = entry[1] = 0;
- if (!nr)
- nr = -errno_NFSERR_IO;
- goto out;
-err_unmap:
- nr = -errno_NFSERR_IO;
- goto out;
+}
+
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
}
__be32 *
-nfs_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
+nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
{
- if (!*p++) {
- if (!*p)
+ __be32 *p;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++))
return ERR_PTR(-EAGAIN);
entry->eof = 1;
return ERR_PTR(-EBADCOOKIE);
}
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+
entry->ino = ntohl(*p++);
entry->len = ntohl(*p++);
+
+ p = xdr_inline_decode(xdr, entry->len + 4);
+ if (unlikely(!p))
+ goto out_overflow;
entry->name = (const char *) p;
p += XDR_QUADLEN(entry->len);
entry->prev_cookie = entry->cookie;
entry->cookie = ntohl(*p++);
- entry->eof = !p[0] && p[1];
+
+ p = xdr_inline_peek(xdr, 8);
+ if (p != NULL)
+ entry->eof = !p[0] && p[1];
+ else
+ entry->eof = 0;
return p;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return ERR_PTR(-EIO);
}
/*
@@ -596,7 +573,6 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 len, recvd;
- char *kaddr;
int status;
if ((status = ntohl(*p++)))
@@ -623,10 +599,7 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
return -EIO;
}
- /* NULL terminate the string we got */
- kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0);
- kaddr[len+rcvbuf->page_base] = '\0';
- kunmap_atomic(kaddr, KM_USER0);
+ xdr_terminate_string(rcvbuf, len);
return 0;
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index fabb4f2849a..ce939c062a5 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -313,7 +313,7 @@ static void nfs3_free_createdata(struct nfs3_createdata *data)
*/
static int
nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nameidata *nd)
+ int flags, struct nfs_open_context *ctx)
{
struct nfs3_createdata *data;
mode_t mode = sattr->ia_mode;
@@ -438,19 +438,38 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
return 1;
}
+static void
+nfs3_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME];
+}
+
+static int
+nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ struct nfs_renameres *res;
+
+ if (nfs3_async_handle_jukebox(task, old_dir))
+ return 0;
+ res = task->tk_msg.rpc_resp;
+
+ nfs_post_op_update_inode(old_dir, res->old_fattr);
+ nfs_post_op_update_inode(new_dir, res->new_fattr);
+ return 1;
+}
+
static int
nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
- struct nfs3_renameargs arg = {
- .fromfh = NFS_FH(old_dir),
- .fromname = old_name->name,
- .fromlen = old_name->len,
- .tofh = NFS_FH(new_dir),
- .toname = new_name->name,
- .tolen = new_name->len
+ struct nfs_renameargs arg = {
+ .old_dir = NFS_FH(old_dir),
+ .old_name = old_name,
+ .new_dir = NFS_FH(new_dir),
+ .new_name = new_name,
};
- struct nfs3_renameres res;
+ struct nfs_renameres res;
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
.rpc_argp = &arg,
@@ -460,17 +479,17 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
- res.fromattr = nfs_alloc_fattr();
- res.toattr = nfs_alloc_fattr();
- if (res.fromattr == NULL || res.toattr == NULL)
+ res.old_fattr = nfs_alloc_fattr();
+ res.new_fattr = nfs_alloc_fattr();
+ if (res.old_fattr == NULL || res.new_fattr == NULL)
goto out;
status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
- nfs_post_op_update_inode(old_dir, res.fromattr);
- nfs_post_op_update_inode(new_dir, res.toattr);
+ nfs_post_op_update_inode(old_dir, res.old_fattr);
+ nfs_post_op_update_inode(new_dir, res.new_fattr);
out:
- nfs_free_fattr(res.toattr);
- nfs_free_fattr(res.fromattr);
+ nfs_free_fattr(res.old_fattr);
+ nfs_free_fattr(res.new_fattr);
dprintk("NFS reply rename: %d\n", status);
return status;
}
@@ -611,7 +630,7 @@ out:
*/
static int
nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page *page, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
__be32 *verf = NFS_COOKIEVERF(dir);
@@ -621,7 +640,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
.verf = {verf[0], verf[1]},
.plus = plus,
.count = count,
- .pages = &page
+ .pages = pages
};
struct nfs3_readdirres res = {
.verf = verf,
@@ -652,7 +671,8 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
nfs_free_fattr(res.dir_attr);
out:
- dprintk("NFS reply readdir: %d\n", status);
+ dprintk("NFS reply readdir%s: %d\n",
+ plus? "plus" : "", status);
return status;
}
@@ -722,7 +742,7 @@ nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
dprintk("NFS call fsstat\n");
nfs_fattr_init(stat->fattr);
status = rpc_call_sync(server->client, &msg, 0);
- dprintk("NFS reply statfs: %d\n", status);
+ dprintk("NFS reply fsstat: %d\n", status);
return status;
}
@@ -844,6 +864,8 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.unlink_setup = nfs3_proc_unlink_setup,
.unlink_done = nfs3_proc_unlink_done,
.rename = nfs3_proc_rename,
+ .rename_setup = nfs3_proc_rename_setup,
+ .rename_done = nfs3_proc_rename_done,
.link = nfs3_proc_link,
.symlink = nfs3_proc_symlink,
.mkdir = nfs3_proc_mkdir,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 9769704f8ce..d9a5e832c25 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -100,6 +100,13 @@ static const umode_t nfs_type2fmt[] = {
[NF3FIFO] = S_IFIFO,
};
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
/*
* Common NFS XDR functions as inlines
*/
@@ -119,6 +126,29 @@ xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
return NULL;
}
+static inline __be32 *
+xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ fh->size = ntohl(*p++);
+
+ if (fh->size <= NFS3_FHSIZE) {
+ p = xdr_inline_decode(xdr, fh->size);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(fh->data, p, fh->size);
+ return p + XDR_QUADLEN(fh->size);
+ }
+ return NULL;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return ERR_PTR(-EIO);
+}
+
/*
* Encode/decode time.
*/
@@ -241,6 +271,26 @@ xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr)
}
static inline __be32 *
+xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (ntohl(*p++)) {
+ p = xdr_inline_decode(xdr, 84);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_fattr(p, fattr);
+ }
+ return p;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return ERR_PTR(-EIO);
+}
+
+static inline __be32 *
xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
{
if (*p++)
@@ -442,12 +492,12 @@ nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args)
* Encode RENAME arguments
*/
static int
-nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs3_renameargs *args)
+nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
{
- p = xdr_encode_fhandle(p, args->fromfh);
- p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_fhandle(p, args->tofh);
- p = xdr_encode_array(p, args->toname, args->tolen);
+ p = xdr_encode_fhandle(p, args->old_dir);
+ p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
+ p = xdr_encode_fhandle(p, args->new_dir);
+ p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
@@ -504,9 +554,8 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
struct kvec *iov = rcvbuf->head;
struct page **page;
size_t hdrlen;
- u32 len, recvd, pglen;
+ u32 recvd, pglen;
int status, nr = 0;
- __be32 *entry, *end, *kaddr;
status = ntohl(*p++);
/* Decode post_op_attrs */
@@ -536,99 +585,38 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
if (pglen > recvd)
pglen = recvd;
page = rcvbuf->pages;
- kaddr = p = kmap_atomic(*page, KM_USER0);
- end = (__be32 *)((char *)p + pglen);
- entry = p;
-
- /* Make sure the packet actually has a value_follows and EOF entry */
- if ((entry + 1) > end)
- goto short_pkt;
-
- for (; *p++; nr++) {
- if (p + 3 > end)
- goto short_pkt;
- p += 2; /* inode # */
- len = ntohl(*p++); /* string length */
- p += XDR_QUADLEN(len) + 2; /* name + cookie */
- if (len > NFS3_MAXNAMLEN) {
- dprintk("NFS: giant filename in readdir (len 0x%x)!\n",
- len);
- goto err_unmap;
- }
- if (res->plus) {
- /* post_op_attr */
- if (p + 2 > end)
- goto short_pkt;
- if (*p++) {
- p += 21;
- if (p + 1 > end)
- goto short_pkt;
- }
- /* post_op_fh3 */
- if (*p++) {
- if (p + 1 > end)
- goto short_pkt;
- len = ntohl(*p++);
- if (len > NFS3_FHSIZE) {
- dprintk("NFS: giant filehandle in "
- "readdir (len 0x%x)!\n", len);
- goto err_unmap;
- }
- p += XDR_QUADLEN(len);
- }
- }
-
- if (p + 2 > end)
- goto short_pkt;
- entry = p;
- }
-
- /*
- * Apparently some server sends responses that are a valid size, but
- * contain no entries, and have value_follows==0 and EOF==0. For
- * those, just set the EOF marker.
- */
- if (!nr && entry[1] == 0) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
- out:
- kunmap_atomic(kaddr, KM_USER0);
return nr;
- short_pkt:
- /*
- * When we get a short packet there are 2 possibilities. We can
- * return an error, or fix up the response to look like a valid
- * response and return what we have so far. If there are no
- * entries and the packet was short, then return -EIO. If there
- * are valid entries in the response, return them and pretend that
- * the call was successful, but incomplete. The caller can retry the
- * readdir starting at the last cookie.
- */
- entry[0] = entry[1] = 0;
- if (!nr)
- nr = -errno_NFSERR_IO;
- goto out;
-err_unmap:
- nr = -errno_NFSERR_IO;
- goto out;
}
__be32 *
-nfs3_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
+nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
{
+ __be32 *p;
struct nfs_entry old = *entry;
- if (!*p++) {
- if (!*p)
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++))
return ERR_PTR(-EAGAIN);
entry->eof = 1;
return ERR_PTR(-EBADCOOKIE);
}
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
p = xdr_decode_hyper(p, &entry->ino);
entry->len = ntohl(*p++);
+
+ p = xdr_inline_decode(xdr, entry->len + 8);
+ if (unlikely(!p))
+ goto out_overflow;
entry->name = (const char *) p;
p += XDR_QUADLEN(entry->len);
entry->prev_cookie = entry->cookie;
@@ -636,10 +624,17 @@ nfs3_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
if (plus) {
entry->fattr->valid = 0;
- p = xdr_decode_post_op_attr(p, entry->fattr);
+ p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
+ if (IS_ERR(p))
+ goto out_overflow_exit;
/* In fact, a post_op_fh3: */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
if (*p++) {
- p = xdr_decode_fhandle(p, entry->fh);
+ p = xdr_decode_fhandle_stream(xdr, entry->fh);
+ if (IS_ERR(p))
+ goto out_overflow_exit;
/* Ugh -- server reply was truncated */
if (p == NULL) {
dprintk("NFS: FH truncated\n");
@@ -650,8 +645,18 @@ nfs3_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
}
- entry->eof = !p[0] && p[1];
+ p = xdr_inline_peek(xdr, 8);
+ if (p != NULL)
+ entry->eof = !p[0] && p[1];
+ else
+ entry->eof = 0;
+
return p;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+out_overflow_exit:
+ return ERR_PTR(-EIO);
}
/*
@@ -824,7 +829,6 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 len, recvd;
- char *kaddr;
int status;
status = ntohl(*p++);
@@ -857,10 +861,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
return -EIO;
}
- /* NULL terminate the string we got */
- kaddr = (char*)kmap_atomic(rcvbuf->pages[0], KM_USER0);
- kaddr[len+rcvbuf->page_base] = '\0';
- kunmap_atomic(kaddr, KM_USER0);
+ xdr_terminate_string(rcvbuf, len);
return 0;
}
@@ -970,14 +971,14 @@ nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
* Decode RENAME reply
*/
static int
-nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs3_renameres *res)
+nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
{
int status;
if ((status = ntohl(*p++)) != 0)
status = nfs_stat_to_errno(status);
- p = xdr_decode_wcc_data(p, res->fromattr);
- p = xdr_decode_wcc_data(p, res->toattr);
+ p = xdr_decode_wcc_data(p, res->old_fattr);
+ p = xdr_decode_wcc_data(p, res->new_fattr);
return status;
}
@@ -1043,8 +1044,9 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
res->wtmult = ntohl(*p++);
res->dtpref = ntohl(*p++);
p = xdr_decode_hyper(p, &res->maxfilesize);
+ p = xdr_decode_time3(p, &res->time_delta);
- /* ignore time_delta and properties */
+ /* ignore properties */
res->lease_time = 0;
return 0;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 311e15cc8af..9fa496387fd 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -242,8 +242,6 @@ extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
-extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
-extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
@@ -333,7 +331,7 @@ extern void nfs_free_seqid(struct nfs_seqid *seqid);
extern const nfs4_stateid zero_stateid;
/* nfs4xdr.c */
-extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
+extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
extern struct rpc_procinfo nfs4_procedures[];
struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
new file mode 100644
index 00000000000..2e92f0d8d65
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.c
@@ -0,0 +1,280 @@
+/*
+ * Module for the pnfs nfs4 file layout driver.
+ * Defines all I/O and Policy interface operations, plus code
+ * to register itself with the pNFS client.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+
+#include "internal.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
+MODULE_DESCRIPTION("The NFSv4 file layout driver");
+
+static int
+filelayout_set_layoutdriver(struct nfs_server *nfss)
+{
+ int status = pnfs_alloc_init_deviceid_cache(nfss->nfs_client,
+ nfs4_fl_free_deviceid_callback);
+ if (status) {
+ printk(KERN_WARNING "%s: deviceid cache could not be "
+ "initialized\n", __func__);
+ return status;
+ }
+ dprintk("%s: deviceid cache has been initialized successfully\n",
+ __func__);
+ return 0;
+}
+
+/* Clear out the layout by destroying its device list */
+static int
+filelayout_clear_layoutdriver(struct nfs_server *nfss)
+{
+ dprintk("--> %s\n", __func__);
+
+ if (nfss->nfs_client->cl_devid_cache)
+ pnfs_put_deviceid_cache(nfss->nfs_client);
+ return 0;
+}
+
+/*
+ * filelayout_check_layout()
+ *
+ * Make sure layout segment parameters are sane WRT the device.
+ * At this point no generic layer initialization of the lseg has occurred,
+ * and nothing has been added to the layout_hdr cache.
+ *
+ */
+static int
+filelayout_check_layout(struct pnfs_layout_hdr *lo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ int status = -EINVAL;
+ struct nfs_server *nfss = NFS_SERVER(lo->inode);
+
+ dprintk("--> %s\n", __func__);
+
+ if (fl->pattern_offset > lgr->range.offset) {
+ dprintk("%s pattern_offset %lld to large\n",
+ __func__, fl->pattern_offset);
+ goto out;
+ }
+
+ if (fl->stripe_unit % PAGE_SIZE) {
+ dprintk("%s Stripe unit (%u) not page aligned\n",
+ __func__, fl->stripe_unit);
+ goto out;
+ }
+
+ /* find and reference the deviceid */
+ dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
+ if (dsaddr == NULL) {
+ dsaddr = get_device_info(lo->inode, id);
+ if (dsaddr == NULL)
+ goto out;
+ }
+ fl->dsaddr = dsaddr;
+
+ if (fl->first_stripe_index < 0 ||
+ fl->first_stripe_index >= dsaddr->stripe_count) {
+ dprintk("%s Bad first_stripe_index %d\n",
+ __func__, fl->first_stripe_index);
+ goto out_put;
+ }
+
+ if ((fl->stripe_type == STRIPE_SPARSE &&
+ fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
+ (fl->stripe_type == STRIPE_DENSE &&
+ fl->num_fh != dsaddr->stripe_count)) {
+ dprintk("%s num_fh %u not valid for given packing\n",
+ __func__, fl->num_fh);
+ goto out_put;
+ }
+
+ if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
+ dprintk("%s Stripe unit (%u) not aligned with rsize %u "
+ "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
+ nfss->wsize);
+ }
+
+ status = 0;
+out:
+ dprintk("--> %s returns %d\n", __func__, status);
+ return status;
+out_put:
+ pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache, &dsaddr->deviceid);
+ goto out;
+}
+
+static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
+{
+ int i;
+
+ for (i = 0; i < fl->num_fh; i++) {
+ if (!fl->fh_array[i])
+ break;
+ kfree(fl->fh_array[i]);
+ }
+ kfree(fl->fh_array);
+ fl->fh_array = NULL;
+}
+
+static void
+_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
+{
+ filelayout_free_fh_array(fl);
+ kfree(fl);
+}
+
+static int
+filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id)
+{
+ uint32_t *p = (uint32_t *)lgr->layout.buf;
+ uint32_t nfl_util;
+ int i;
+
+ dprintk("%s: set_layout_map Begin\n", __func__);
+
+ memcpy(id, p, sizeof(*id));
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+ print_deviceid(id);
+
+ nfl_util = be32_to_cpup(p++);
+ if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
+ fl->commit_through_mds = 1;
+ if (nfl_util & NFL4_UFLG_DENSE)
+ fl->stripe_type = STRIPE_DENSE;
+ else
+ fl->stripe_type = STRIPE_SPARSE;
+ fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
+
+ fl->first_stripe_index = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &fl->pattern_offset);
+ fl->num_fh = be32_to_cpup(p++);
+
+ dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
+ __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
+ fl->pattern_offset);
+
+ fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ GFP_KERNEL);
+ if (!fl->fh_array)
+ return -ENOMEM;
+
+ for (i = 0; i < fl->num_fh; i++) {
+ /* Do we want to use a mempool here? */
+ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+ if (!fl->fh_array[i]) {
+ filelayout_free_fh_array(fl);
+ return -ENOMEM;
+ }
+ fl->fh_array[i]->size = be32_to_cpup(p++);
+ if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ printk(KERN_ERR "Too big fh %d received %d\n",
+ i, fl->fh_array[i]->size);
+ filelayout_free_fh_array(fl);
+ return -EIO;
+ }
+ memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
+ p += XDR_QUADLEN(fl->fh_array[i]->size);
+ dprintk("DEBUG: %s: fh len %d\n", __func__,
+ fl->fh_array[i]->size);
+ }
+
+ return 0;
+}
+
+static struct pnfs_layout_segment *
+filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
+ struct nfs4_layoutget_res *lgr)
+{
+ struct nfs4_filelayout_segment *fl;
+ int rc;
+ struct nfs4_deviceid id;
+
+ dprintk("--> %s\n", __func__);
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return NULL;
+
+ rc = filelayout_decode_layout(layoutid, fl, lgr, &id);
+ if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) {
+ _filelayout_free_lseg(fl);
+ return NULL;
+ }
+ return &fl->generic_hdr;
+}
+
+static void
+filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+
+ dprintk("--> %s\n", __func__);
+ pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache,
+ &fl->dsaddr->deviceid);
+ _filelayout_free_lseg(fl);
+}
+
+static struct pnfs_layoutdriver_type filelayout_type = {
+ .id = LAYOUT_NFSV4_1_FILES,
+ .name = "LAYOUT_NFSV4_1_FILES",
+ .owner = THIS_MODULE,
+ .set_layoutdriver = filelayout_set_layoutdriver,
+ .clear_layoutdriver = filelayout_clear_layoutdriver,
+ .alloc_lseg = filelayout_alloc_lseg,
+ .free_lseg = filelayout_free_lseg,
+};
+
+static int __init nfs4filelayout_init(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
+ __func__);
+ return pnfs_register_layoutdriver(&filelayout_type);
+}
+
+static void __exit nfs4filelayout_exit(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
+ __func__);
+ pnfs_unregister_layoutdriver(&filelayout_type);
+}
+
+module_init(nfs4filelayout_init);
+module_exit(nfs4filelayout_exit);
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
new file mode 100644
index 00000000000..bbf60dd2ab9
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.h
@@ -0,0 +1,94 @@
+/*
+ * NFSv4 file layout driver data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_NFS4FILELAYOUT_H
+#define FS_NFS_NFS4FILELAYOUT_H
+
+#include "pnfs.h"
+
+/*
+ * Field testing shows we need to support upto 4096 stripe indices.
+ * We store each index as a u8 (u32 on the wire) to keep the memory footprint
+ * reasonable. This in turn means we support a maximum of 256
+ * RFC 5661 multipath_list4 structures.
+ */
+#define NFS4_PNFS_MAX_STRIPE_CNT 4096
+#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
+
+enum stripetype4 {
+ STRIPE_SPARSE = 1,
+ STRIPE_DENSE = 2
+};
+
+/* Individual ip address */
+struct nfs4_pnfs_ds {
+ struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ u32 ds_ip_addr;
+ u32 ds_port;
+ struct nfs_client *ds_clp;
+ atomic_t ds_count;
+};
+
+struct nfs4_file_layout_dsaddr {
+ struct pnfs_deviceid_node deviceid;
+ u32 stripe_count;
+ u8 *stripe_indices;
+ u32 ds_num;
+ struct nfs4_pnfs_ds *ds_list[1];
+};
+
+struct nfs4_filelayout_segment {
+ struct pnfs_layout_segment generic_hdr;
+ u32 stripe_type;
+ u32 commit_through_mds;
+ u32 stripe_unit;
+ u32 first_stripe_index;
+ u64 pattern_offset;
+ struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
+ unsigned int num_fh;
+ struct nfs_fh **fh_array;
+};
+
+static inline struct nfs4_filelayout_segment *
+FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg,
+ struct nfs4_filelayout_segment,
+ generic_hdr);
+}
+
+extern void nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *);
+extern void print_ds(struct nfs4_pnfs_ds *ds);
+extern void print_deviceid(struct nfs4_deviceid *dev_id);
+extern struct nfs4_file_layout_dsaddr *
+nfs4_fl_find_get_deviceid(struct nfs_client *, struct nfs4_deviceid *dev_id);
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
+
+#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
new file mode 100644
index 00000000000..51fe64ace55
--- /dev/null
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -0,0 +1,448 @@
+/*
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ * Garth Goodson <Garth.Goodson@netapp.com>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/vmalloc.h>
+
+#include "internal.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/*
+ * Data server cache
+ *
+ * Data servers can be mapped to different device ids.
+ * nfs4_pnfs_ds reference counting
+ * - set to 1 on allocation
+ * - incremented when a device id maps a data server already in the cache.
+ * - decremented when deviceid is removed from the cache.
+ */
+DEFINE_SPINLOCK(nfs4_ds_cache_lock);
+static LIST_HEAD(nfs4_data_server_cache);
+
+/* Debug routines */
+void
+print_ds(struct nfs4_pnfs_ds *ds)
+{
+ if (ds == NULL) {
+ printk("%s NULL device\n", __func__);
+ return;
+ }
+ printk(" ip_addr %x port %hu\n"
+ " ref count %d\n"
+ " client %p\n"
+ " cl_exchange_flags %x\n",
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
+ atomic_read(&ds->ds_count), ds->ds_clp,
+ ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
+}
+
+void
+print_ds_list(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ int i;
+
+ ifdebug(FACILITY) {
+ printk("%s dsaddr->ds_num %d\n", __func__,
+ dsaddr->ds_num);
+ for (i = 0; i < dsaddr->ds_num; i++)
+ print_ds(dsaddr->ds_list[i]);
+ }
+}
+
+void print_deviceid(struct nfs4_deviceid *id)
+{
+ u32 *p = (u32 *)id;
+
+ dprintk("%s: device id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+}
+
+/* nfs4_ds_cache_lock is held */
+static struct nfs4_pnfs_ds *
+_data_server_lookup_locked(u32 ip_addr, u32 port)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ dprintk("_data_server_lookup: ip_addr=%x port=%hu\n",
+ ntohl(ip_addr), ntohs(port));
+
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) {
+ if (ds->ds_ip_addr == ip_addr &&
+ ds->ds_port == port) {
+ return ds;
+ }
+ }
+ return NULL;
+}
+
+static void
+destroy_ds(struct nfs4_pnfs_ds *ds)
+{
+ dprintk("--> %s\n", __func__);
+ ifdebug(FACILITY)
+ print_ds(ds);
+
+ if (ds->ds_clp)
+ nfs_put_client(ds->ds_clp);
+ kfree(ds);
+}
+
+static void
+nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ struct nfs4_pnfs_ds *ds;
+ int i;
+
+ print_deviceid(&dsaddr->deviceid.de_id);
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ ds = dsaddr->ds_list[i];
+ if (ds != NULL) {
+ if (atomic_dec_and_lock(&ds->ds_count,
+ &nfs4_ds_cache_lock)) {
+ list_del_init(&ds->ds_node);
+ spin_unlock(&nfs4_ds_cache_lock);
+ destroy_ds(ds);
+ }
+ }
+ }
+ kfree(dsaddr->stripe_indices);
+ kfree(dsaddr);
+}
+
+void
+nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *device)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr =
+ container_of(device, struct nfs4_file_layout_dsaddr, deviceid);
+
+ nfs4_fl_free_deviceid(dsaddr);
+}
+
+static struct nfs4_pnfs_ds *
+nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
+{
+ struct nfs4_pnfs_ds *tmp_ds, *ds;
+
+ ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
+ if (!ds)
+ goto out;
+
+ spin_lock(&nfs4_ds_cache_lock);
+ tmp_ds = _data_server_lookup_locked(ip_addr, port);
+ if (tmp_ds == NULL) {
+ ds->ds_ip_addr = ip_addr;
+ ds->ds_port = port;
+ atomic_set(&ds->ds_count, 1);
+ INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_clp = NULL;
+ list_add(&ds->ds_node, &nfs4_data_server_cache);
+ dprintk("%s add new data server ip 0x%x\n", __func__,
+ ds->ds_ip_addr);
+ } else {
+ kfree(ds);
+ atomic_inc(&tmp_ds->ds_count);
+ dprintk("%s data server found ip 0x%x, inc'ed ds_count to %d\n",
+ __func__, tmp_ds->ds_ip_addr,
+ atomic_read(&tmp_ds->ds_count));
+ ds = tmp_ds;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+out:
+ return ds;
+}
+
+/*
+ * Currently only support ipv4, and one multi-path address.
+ */
+static struct nfs4_pnfs_ds *
+decode_and_add_ds(__be32 **pp, struct inode *inode)
+{
+ struct nfs4_pnfs_ds *ds = NULL;
+ char *buf;
+ const char *ipend, *pstr;
+ u32 ip_addr, port;
+ int nlen, rlen, i;
+ int tmp[2];
+ __be32 *r_netid, *r_addr, *p = *pp;
+
+ /* r_netid */
+ nlen = be32_to_cpup(p++);
+ r_netid = p;
+ p += XDR_QUADLEN(nlen);
+
+ /* r_addr */
+ rlen = be32_to_cpup(p++);
+ r_addr = p;
+ p += XDR_QUADLEN(rlen);
+ *pp = p;
+
+ /* Check that netid is "tcp" */
+ if (nlen != 3 || memcmp((char *)r_netid, "tcp", 3)) {
+ dprintk("%s: ERROR: non ipv4 TCP r_netid\n", __func__);
+ goto out_err;
+ }
+
+ /* ipv6 length plus port is legal */
+ if (rlen > INET6_ADDRSTRLEN + 8) {
+ dprintk("%s Invalid address, length %d\n", __func__,
+ rlen);
+ goto out_err;
+ }
+ buf = kmalloc(rlen + 1, GFP_KERNEL);
+ buf[rlen] = '\0';
+ memcpy(buf, r_addr, rlen);
+
+ /* replace the port dots with dashes for the in4_pton() delimiter*/
+ for (i = 0; i < 2; i++) {
+ char *res = strrchr(buf, '.');
+ *res = '-';
+ }
+
+ /* Currently only support ipv4 address */
+ if (in4_pton(buf, rlen, (u8 *)&ip_addr, '-', &ipend) == 0) {
+ dprintk("%s: Only ipv4 addresses supported\n", __func__);
+ goto out_free;
+ }
+
+ /* port */
+ pstr = ipend;
+ sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
+ port = htons((tmp[0] << 8) | (tmp[1]));
+
+ ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
+ dprintk("%s Decoded address and port %s\n", __func__, buf);
+out_free:
+ kfree(buf);
+out_err:
+ return ds;
+}
+
+/* Decode opaque device data and return the result */
+static struct nfs4_file_layout_dsaddr*
+decode_device(struct inode *ino, struct pnfs_device *pdev)
+{
+ int i, dummy;
+ u32 cnt, num;
+ u8 *indexp;
+ __be32 *p = (__be32 *)pdev->area, *indicesp;
+ struct nfs4_file_layout_dsaddr *dsaddr;
+
+ /* Get the stripe count (number of stripe index) */
+ cnt = be32_to_cpup(p++);
+ dprintk("%s stripe count %d\n", __func__, cnt);
+ if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
+ printk(KERN_WARNING "%s: stripe count %d greater than "
+ "supported maximum %d\n", __func__,
+ cnt, NFS4_PNFS_MAX_STRIPE_CNT);
+ goto out_err;
+ }
+
+ /* Check the multipath list count */
+ indicesp = p;
+ p += XDR_QUADLEN(cnt << 2);
+ num = be32_to_cpup(p++);
+ dprintk("%s ds_num %u\n", __func__, num);
+ if (num > NFS4_PNFS_MAX_MULTI_CNT) {
+ printk(KERN_WARNING "%s: multipath count %d greater than "
+ "supported maximum %d\n", __func__,
+ num, NFS4_PNFS_MAX_MULTI_CNT);
+ goto out_err;
+ }
+ dsaddr = kzalloc(sizeof(*dsaddr) +
+ (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
+ GFP_KERNEL);
+ if (!dsaddr)
+ goto out_err;
+
+ dsaddr->stripe_indices = kzalloc(sizeof(u8) * cnt, GFP_KERNEL);
+ if (!dsaddr->stripe_indices)
+ goto out_err_free;
+
+ dsaddr->stripe_count = cnt;
+ dsaddr->ds_num = num;
+
+ memcpy(&dsaddr->deviceid.de_id, &pdev->dev_id, sizeof(pdev->dev_id));
+
+ /* Go back an read stripe indices */
+ p = indicesp;
+ indexp = &dsaddr->stripe_indices[0];
+ for (i = 0; i < dsaddr->stripe_count; i++) {
+ *indexp = be32_to_cpup(p++);
+ if (*indexp >= num)
+ goto out_err_free;
+ indexp++;
+ }
+ /* Skip already read multipath list count */
+ p++;
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ int j;
+
+ dummy = be32_to_cpup(p++); /* multipath count */
+ if (dummy > 1) {
+ printk(KERN_WARNING
+ "%s: Multipath count %d not supported, "
+ "skipping all greater than 1\n", __func__,
+ dummy);
+ }
+ for (j = 0; j < dummy; j++) {
+ if (j == 0) {
+ dsaddr->ds_list[i] = decode_and_add_ds(&p, ino);
+ if (dsaddr->ds_list[i] == NULL)
+ goto out_err_free;
+ } else {
+ u32 len;
+ /* skip extra multipath */
+ len = be32_to_cpup(p++);
+ p += XDR_QUADLEN(len);
+ len = be32_to_cpup(p++);
+ p += XDR_QUADLEN(len);
+ continue;
+ }
+ }
+ }
+ return dsaddr;
+
+out_err_free:
+ nfs4_fl_free_deviceid(dsaddr);
+out_err:
+ dprintk("%s ERROR: returning NULL\n", __func__);
+ return NULL;
+}
+
+/*
+ * Decode the opaque device specified in 'dev'
+ * and add it to the list of available devices.
+ * If the deviceid is already cached, nfs4_add_deviceid will return
+ * a pointer to the cached struct and throw away the new.
+ */
+static struct nfs4_file_layout_dsaddr*
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ struct pnfs_deviceid_node *d;
+
+ dsaddr = decode_device(inode, dev);
+ if (!dsaddr) {
+ printk(KERN_WARNING "%s: Could not decode or add device\n",
+ __func__);
+ return NULL;
+ }
+
+ d = pnfs_add_deviceid(NFS_SERVER(inode)->nfs_client->cl_devid_cache,
+ &dsaddr->deviceid);
+
+ return container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+}
+
+/*
+ * Retrieve the information for dev_id, add it to the list
+ * of available devices, and return it.
+ */
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
+{
+ struct pnfs_device *pdev = NULL;
+ u32 max_resp_sz;
+ int max_pages;
+ struct page **pages = NULL;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ int rc, i;
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ /*
+ * Use the session max response size as the basis for setting
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = max_resp_sz >> PAGE_SHIFT;
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+ pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
+ if (pdev == NULL)
+ return NULL;
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+ if (pages == NULL) {
+ kfree(pdev);
+ return NULL;
+ }
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
+ }
+
+ /* set pdev->area */
+ pdev->area = vmap(pages, max_pages, VM_MAP, PAGE_KERNEL);
+ if (!pdev->area)
+ goto out_free;
+
+ memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
+ pdev->layout_type = LAYOUT_NFSV4_1_FILES;
+ pdev->pages = pages;
+ pdev->pgbase = 0;
+ pdev->pglen = PAGE_SIZE * max_pages;
+ pdev->mincount = 0;
+
+ rc = nfs4_proc_getdeviceinfo(server, pdev);
+ dprintk("%s getdevice info returns %d\n", __func__, rc);
+ if (rc)
+ goto out_free;
+
+ /*
+ * Found new device, need to decode it and then add it to the
+ * list of known devices for this mountpoint.
+ */
+ dsaddr = decode_and_add_device(inode, pdev);
+out_free:
+ if (pdev->area != NULL)
+ vunmap(pdev->area);
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ kfree(pdev);
+ dprintk("<-- %s dsaddr %p\n", __func__, dsaddr);
+ return dsaddr;
+}
+
+struct nfs4_file_layout_dsaddr *
+nfs4_fl_find_get_deviceid(struct nfs_client *clp, struct nfs4_deviceid *id)
+{
+ struct pnfs_deviceid_node *d;
+
+ d = pnfs_find_get_deviceid(clp->cl_devid_cache, id);
+ return (d == NULL) ? NULL :
+ container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 089da5b5d20..32c8758c99f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -55,6 +55,7 @@
#include "internal.h"
#include "iostat.h"
#include "callback.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -129,7 +130,8 @@ const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
| FATTR4_WORD0_MAXREAD
| FATTR4_WORD0_MAXWRITE
| FATTR4_WORD0_LEASE_TIME,
- 0
+ FATTR4_WORD1_TIME_DELTA
+ | FATTR4_WORD1_FS_LAYOUT_TYPES
};
const u32 nfs4_fs_locations_bitmap[2] = {
@@ -255,9 +257,6 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
nfs4_state_mark_reclaim_nograce(clp, state);
goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- if (state == NULL)
- break;
- nfs4_state_mark_reclaim_reboot(clp, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
goto do_state_recovery;
@@ -334,10 +333,12 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
* Must be called while holding tbl->slot_tbl_lock
*/
static void
-nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
+nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
{
+ int free_slotid = free_slot - tbl->slots;
int slotid = free_slotid;
+ BUG_ON(slotid < 0 || slotid >= NFS4_MAX_SLOT_TABLE);
/* clear used bit in bitmap */
__clear_bit(slotid, tbl->used_slots);
@@ -379,7 +380,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
struct nfs4_slot_table *tbl;
tbl = &res->sr_session->fc_slot_table;
- if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
+ if (!res->sr_slot) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
dprintk("%s: No slot\n", __func__);
@@ -387,17 +388,15 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
}
spin_lock(&tbl->slot_tbl_lock);
- nfs4_free_slot(tbl, res->sr_slotid);
+ nfs4_free_slot(tbl, res->sr_slot);
nfs41_check_drain_session_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
- res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ res->sr_slot = NULL;
}
static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
unsigned long timestamp;
- struct nfs4_slot_table *tbl;
- struct nfs4_slot *slot;
struct nfs_client *clp;
/*
@@ -410,17 +409,14 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
res->sr_status = NFS_OK;
/* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
- if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
+ if (!res->sr_slot)
goto out;
- tbl = &res->sr_session->fc_slot_table;
- slot = tbl->slots + res->sr_slotid;
-
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
- ++slot->seq_nr;
+ ++res->sr_slot->seq_nr;
timestamp = res->sr_renewal_time;
clp = res->sr_session->clp;
do_renew_lease(clp, timestamp);
@@ -433,12 +429,14 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
* returned NFS4ERR_DELAY as per Section 2.10.6.2
* of RFC5661.
*/
- dprintk("%s: slot=%d seq=%d: Operation in progress\n",
- __func__, res->sr_slotid, slot->seq_nr);
+ dprintk("%s: slot=%ld seq=%d: Operation in progress\n",
+ __func__,
+ res->sr_slot - res->sr_session->fc_slot_table.slots,
+ res->sr_slot->seq_nr);
goto out_retry;
default:
/* Just update the slot sequence no. */
- ++slot->seq_nr;
+ ++res->sr_slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
@@ -505,10 +503,9 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
dprintk("--> %s\n", __func__);
/* slot already allocated? */
- if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
+ if (res->sr_slot != NULL)
return 0;
- res->sr_slotid = NFS4_MAX_SLOT_TABLE;
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
@@ -550,7 +547,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
res->sr_session = session;
- res->sr_slotid = slotid;
+ res->sr_slot = slot;
res->sr_renewal_time = jiffies;
res->sr_status_flags = 0;
/*
@@ -576,8 +573,9 @@ int nfs4_setup_sequence(const struct nfs_server *server,
goto out;
}
- dprintk("--> %s clp %p session %p sr_slotid %d\n",
- __func__, session->clp, session, res->sr_slotid);
+ dprintk("--> %s clp %p session %p sr_slot %ld\n",
+ __func__, session->clp, session, res->sr_slot ?
+ res->sr_slot - session->fc_slot_table.slots : -1);
ret = nfs41_setup_sequence(session, args, res, cache_reply,
task);
@@ -650,7 +648,7 @@ static int nfs4_call_sync_sequence(struct nfs_server *server,
.callback_data = &data
};
- res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ res->sr_slot = NULL;
if (privileged)
task_setup.callback_ops = &nfs41_call_priv_sync_ops;
task = rpc_run_task(&task_setup);
@@ -735,7 +733,6 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
- p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
@@ -1120,6 +1117,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
clear_bit(NFS_DELEGATED_STATE, &state->flags);
smp_rmb();
if (state->n_rdwr != 0) {
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
@@ -1127,6 +1125,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
return -ESTALE;
}
if (state->n_wronly != 0) {
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
if (ret != 0)
return ret;
@@ -1134,6 +1133,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
return -ESTALE;
}
if (state->n_rdonly != 0) {
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
if (ret != 0)
return ret;
@@ -1188,7 +1188,7 @@ static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state
int err;
do {
err = _nfs4_do_open_reclaim(ctx, state);
- if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)
+ if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
@@ -1258,6 +1258,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
case -ENOMEM:
err = 0;
goto out;
@@ -1605,7 +1612,6 @@ static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
@@ -1975,7 +1981,6 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
- calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
path_get(path);
calldata->path = *path;
@@ -1998,120 +2003,17 @@ out:
return status;
}
-static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state, fmode_t fmode)
+static struct inode *
+nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
{
- struct file *filp;
- int ret;
-
- /* If the open_intent is for execute, we have an extra check to make */
- if (fmode & FMODE_EXEC) {
- ret = nfs_may_open(state->inode,
- state->owner->so_cred,
- nd->intent.open.flags);
- if (ret < 0)
- goto out_close;
- }
- filp = lookup_instantiate_filp(nd, path->dentry, NULL);
- if (!IS_ERR(filp)) {
- struct nfs_open_context *ctx;
- ctx = nfs_file_open_context(filp);
- ctx->state = state;
- return 0;
- }
- ret = PTR_ERR(filp);
-out_close:
- nfs4_close_sync(path, state, fmode & (FMODE_READ|FMODE_WRITE));
- return ret;
-}
-
-struct dentry *
-nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
-{
- struct path path = {
- .mnt = nd->path.mnt,
- .dentry = dentry,
- };
- struct dentry *parent;
- struct iattr attr;
- struct rpc_cred *cred;
struct nfs4_state *state;
- struct dentry *res;
- int open_flags = nd->intent.open.flags;
- fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
-
- if (nd->flags & LOOKUP_CREATE) {
- attr.ia_mode = nd->intent.open.create_mode;
- attr.ia_valid = ATTR_MODE;
- if (!IS_POSIXACL(dir))
- attr.ia_mode &= ~current_umask();
- } else {
- open_flags &= ~O_EXCL;
- attr.ia_valid = 0;
- BUG_ON(open_flags & O_CREAT);
- }
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return (struct dentry *)cred;
- parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
- nfs_block_sillyrename(parent);
- state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred);
- put_rpccred(cred);
- if (IS_ERR(state)) {
- if (PTR_ERR(state) == -ENOENT) {
- d_add(dentry, NULL);
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- }
- nfs_unblock_sillyrename(parent);
- return (struct dentry *)state;
- }
- res = d_add_unique(dentry, igrab(state->inode));
- if (res != NULL)
- path.dentry = res;
- nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
- nfs_unblock_sillyrename(parent);
- nfs4_intent_set_file(nd, &path, state, fmode);
- return res;
-}
-
-int
-nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
-{
- struct path path = {
- .mnt = nd->path.mnt,
- .dentry = dentry,
- };
- struct rpc_cred *cred;
- struct nfs4_state *state;
- fmode_t fmode = openflags & (FMODE_READ | FMODE_WRITE);
-
- cred = rpc_lookup_cred();
- if (IS_ERR(cred))
- return PTR_ERR(cred);
- state = nfs4_do_open(dir, &path, fmode, openflags, NULL, cred);
- put_rpccred(cred);
- if (IS_ERR(state)) {
- switch (PTR_ERR(state)) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- return PTR_ERR(state);
- default:
- goto out_drop;
- }
- }
- if (state->inode == dentry->d_inode) {
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- nfs4_intent_set_file(nd, &path, state, fmode);
- return 1;
- }
- nfs4_close_sync(&path, state, fmode);
-out_drop:
- d_drop(dentry);
- return 0;
+ state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred);
+ if (IS_ERR(state))
+ return ERR_CAST(state);
+ ctx->state = state;
+ return igrab(state->inode);
}
static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
@@ -2568,36 +2470,34 @@ static int nfs4_proc_readlink(struct inode *inode, struct page *page,
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nameidata *nd)
+ int flags, struct nfs_open_context *ctx)
{
- struct path path = {
- .mnt = nd->path.mnt,
+ struct path my_path = {
.dentry = dentry,
};
+ struct path *path = &my_path;
struct nfs4_state *state;
- struct rpc_cred *cred;
- fmode_t fmode = flags & (FMODE_READ | FMODE_WRITE);
+ struct rpc_cred *cred = NULL;
+ fmode_t fmode = 0;
int status = 0;
- cred = rpc_lookup_cred();
- if (IS_ERR(cred)) {
- status = PTR_ERR(cred);
- goto out;
+ if (ctx != NULL) {
+ cred = ctx->cred;
+ path = &ctx->path;
+ fmode = ctx->mode;
}
- state = nfs4_do_open(dir, &path, fmode, flags, sattr, cred);
+ state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
- goto out_putcred;
+ goto out;
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
- status = nfs4_intent_set_file(nd, &path, state, fmode);
+ if (ctx != NULL)
+ ctx->state = state;
else
- nfs4_close_sync(&path, state, fmode);
-out_putcred:
- put_rpccred(cred);
+ nfs4_close_sync(path, state, fmode);
out:
return status;
}
@@ -2655,6 +2555,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
args->bitmask = server->cache_consistency_bitmask;
res->server = server;
+ res->seq_res.sr_slot = NULL;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
}
@@ -2671,18 +2572,46 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
return 1;
}
+static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_renameargs *arg = msg->rpc_argp;
+ struct nfs_renameres *res = msg->rpc_resp;
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
+ arg->bitmask = server->attr_bitmask;
+ res->server = server;
+}
+
+static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ struct nfs_renameres *res = task->tk_msg.rpc_resp;
+
+ if (!nfs4_sequence_done(task, &res->seq_res))
+ return 0;
+ if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
+ return 0;
+
+ update_changeattr(old_dir, &res->old_cinfo);
+ nfs_post_op_update_inode(old_dir, res->old_fattr);
+ update_changeattr(new_dir, &res->new_cinfo);
+ nfs_post_op_update_inode(new_dir, res->new_fattr);
+ return 1;
+}
+
static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs_server *server = NFS_SERVER(old_dir);
- struct nfs4_rename_arg arg = {
+ struct nfs_renameargs arg = {
.old_dir = NFS_FH(old_dir),
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
.bitmask = server->attr_bitmask,
};
- struct nfs4_rename_res res = {
+ struct nfs_renameres res = {
.server = server,
};
struct rpc_message msg = {
@@ -2896,15 +2825,16 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
}
static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page *page, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
struct nfs4_readdir_arg args = {
.fh = NFS_FH(dir),
- .pages = &page,
+ .pages = pages,
.pgbase = 0,
.count = count,
.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
+ .plus = plus,
};
struct nfs4_readdir_res res;
struct rpc_message msg = {
@@ -2932,14 +2862,14 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
}
static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page *page, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct nfs4_exception exception = { };
int err;
do {
err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
_nfs4_proc_readdir(dentry, cred, cookie,
- page, count, plus),
+ pages, count, plus),
&exception);
} while (exception.retry);
return err;
@@ -3490,9 +3420,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
nfs4_state_mark_reclaim_nograce(clp, state);
goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- if (state == NULL)
- break;
- nfs4_state_mark_reclaim_reboot(clp, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
goto do_state_recovery;
@@ -3626,7 +3553,6 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
case -NFS4ERR_RESOURCE:
/* The IBM lawyers misread another document! */
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
err = nfs4_delay(clp->cl_rpcclient, &timeout);
}
} while (err == 0);
@@ -3721,7 +3647,6 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
- data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
@@ -3874,7 +3799,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
- p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
@@ -4054,7 +3978,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
- p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
@@ -4241,7 +4164,7 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request
if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
return 0;
err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
- if (err != -NFS4ERR_DELAY && err != -EKEYEXPIRED)
+ if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
@@ -4266,7 +4189,6 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request
goto out;
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
nfs4_handle_exception(server, err, &exception);
err = 0;
}
@@ -4412,13 +4334,21 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
err = 0;
goto out;
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
+ err = 0;
+ goto out;
case -ENOMEM:
case -NFS4ERR_DENIED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
err = 0;
goto out;
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
break;
}
err = nfs4_handle_exception(server, err, &exception);
@@ -4647,7 +4577,6 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
switch (task->tk_status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
- case -EKEYEXPIRED:
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
@@ -4687,7 +4616,6 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
};
int status;
- res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
dprintk("--> %s\n", __func__);
task = rpc_run_task(&task_setup);
@@ -4914,49 +4842,56 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
args->bc_attrs.max_reqs);
}
-static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
{
- if (rcvd <= sent)
- return 0;
- printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
- "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
- return -EINVAL;
+ struct nfs4_channel_attrs *sent = &args->fc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+
+ if (rcvd->headerpadsz > sent->headerpadsz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
+ return -EINVAL;
+ /*
+ * Our requested max_ops is the minimum we need; we're not
+ * prepared to break up compounds into smaller pieces than that.
+ * So, no point even trying to continue if the server won't
+ * cooperate:
+ */
+ if (rcvd->max_ops < sent->max_ops)
+ return -EINVAL;
+ if (rcvd->max_reqs == 0)
+ return -EINVAL;
+ return 0;
}
-#define _verify_fore_channel_attr(_name_) \
- _verify_channel_attr("fore", #_name_, \
- args->fc_attrs._name_, \
- session->fc_attrs._name_)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+{
+ struct nfs4_channel_attrs *sent = &args->bc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
-#define _verify_back_channel_attr(_name_) \
- _verify_channel_attr("back", #_name_, \
- args->bc_attrs._name_, \
- session->bc_attrs._name_)
+ if (rcvd->max_rqst_sz > sent->max_rqst_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz < sent->max_resp_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
+ return -EINVAL;
+ /* These would render the backchannel useless: */
+ if (rcvd->max_ops == 0)
+ return -EINVAL;
+ if (rcvd->max_reqs == 0)
+ return -EINVAL;
+ return 0;
+}
-/*
- * The server is not allowed to increase the fore channel header pad size,
- * maximum response size, or maximum number of operations.
- *
- * The back channel attributes are only negotiatied down: We send what the
- * (back channel) server insists upon.
- */
static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
struct nfs4_session *session)
{
- int ret = 0;
-
- ret |= _verify_fore_channel_attr(headerpadsz);
- ret |= _verify_fore_channel_attr(max_resp_sz);
- ret |= _verify_fore_channel_attr(max_ops);
-
- ret |= _verify_back_channel_attr(headerpadsz);
- ret |= _verify_back_channel_attr(max_rqst_sz);
- ret |= _verify_back_channel_attr(max_resp_sz);
- ret |= _verify_back_channel_attr(max_resp_sz_cached);
- ret |= _verify_back_channel_attr(max_ops);
- ret |= _verify_back_channel_attr(max_reqs);
+ int ret;
- return ret;
+ ret = nfs4_verify_fore_channel_attrs(args, session);
+ if (ret)
+ return ret;
+ return nfs4_verify_back_channel_attrs(args, session);
}
static int _nfs4_proc_create_session(struct nfs_client *clp)
@@ -5111,7 +5046,6 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client
{
switch(task->tk_status) {
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN;
default:
@@ -5180,12 +5114,11 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_
if (!atomic_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
- calldata = kmalloc(sizeof(*calldata), GFP_NOFS);
+ calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL) {
nfs_put_client(clp);
return ERR_PTR(-ENOMEM);
}
- calldata->res.sr_slotid = NFS4_MAX_SLOT_TABLE;
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
calldata->clp = clp;
@@ -5254,7 +5187,6 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
case -NFS4ERR_WRONG_CRED: /* What to do here? */
break;
case -NFS4ERR_DELAY:
- case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return -EAGAIN;
default:
@@ -5317,7 +5249,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
goto out;
calldata->clp = clp;
calldata->arg.one_fs = 0;
- calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
msg.rpc_argp = &calldata->arg;
msg.rpc_resp = &calldata->res;
@@ -5333,6 +5264,147 @@ out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
}
+
+static void
+nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct inode *ino = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(ino);
+
+ dprintk("--> %s\n", __func__);
+ if (nfs4_setup_sequence(server, &lgp->args.seq_args,
+ &lgp->res.seq_res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &lgp->res.seq_res))
+ return;
+
+ switch (task->tk_status) {
+ case 0:
+ break;
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
+ task->tk_status = -NFS4ERR_DELAY;
+ /* Fall through */
+ default:
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+ lgp->status = task->tk_status;
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_layoutget_release(void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ put_layout_hdr(lgp->args.inode);
+ if (lgp->res.layout.buf != NULL)
+ free_page((unsigned long) lgp->res.layout.buf);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_call_prepare = nfs4_layoutget_prepare,
+ .rpc_call_done = nfs4_layoutget_done,
+ .rpc_release = nfs4_layoutget_release,
+};
+
+int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+{
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+ .rpc_argp = &lgp->args,
+ .rpc_resp = &lgp->res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutget_call_ops,
+ .callback_data = lgp,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ lgp->res.layout.buf = (void *)__get_free_page(GFP_NOFS);
+ if (lgp->res.layout.buf == NULL) {
+ nfs4_layoutget_release(lgp);
+ return -ENOMEM;
+ }
+
+ lgp->res.seq_res.sr_slot = NULL;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = lgp->status;
+ if (status != 0)
+ goto out;
+ status = pnfs_layout_process(lgp);
+out:
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+static int
+_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_getdeviceinfo_args args = {
+ .pdev = pdev,
+ };
+ struct nfs4_getdeviceinfo_res res = {
+ .pdev = pdev,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ dprintk("<-- %s status=%d\n", __func__, status);
+
+ return status;
+}
+
+int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_getdeviceinfo(server, pdev),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
+
#endif /* CONFIG_NFS_V4_1 */
struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
@@ -5443,6 +5515,8 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.unlink_setup = nfs4_proc_unlink_setup,
.unlink_done = nfs4_proc_unlink_done,
.rename = nfs4_proc_rename,
+ .rename_setup = nfs4_proc_rename_setup,
+ .rename_done = nfs4_proc_rename_done,
.link = nfs4_proc_link,
.symlink = nfs4_proc_symlink,
.mkdir = nfs4_proc_mkdir,
@@ -5463,6 +5537,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
+ .open_context = nfs4_atomic_open,
};
/*
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3e2f19b04c0..f575a312673 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -40,12 +40,13 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
+#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
+#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
@@ -53,6 +54,7 @@
#include "callback.h"
#include "delegation.h"
#include "internal.h"
+#include "pnfs.h"
#define OPENOWNER_POOL_SIZE 8
@@ -970,13 +972,13 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
/* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
/* Protect inode->i_flock using the BKL */
- lock_kernel();
+ lock_flocks();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
- unlock_kernel();
+ unlock_flocks();
status = ops->recover_lock(state, fl);
switch (status) {
case 0:
@@ -1003,9 +1005,9 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
status = 0;
}
- lock_kernel();
+ lock_flocks();
}
- unlock_kernel();
+ unlock_flocks();
out:
up_write(&nfsi->rwsem);
return status;
@@ -1063,6 +1065,14 @@ restart:
/* Mark the file as being 'closed' */
state->state = 0;
break;
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
+ break;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_BAD_STATEID:
@@ -1138,16 +1148,14 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
(void)ops->reclaim_complete(clp);
}
-static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
struct nfs4_state *state;
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
- return;
-
- nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
+ return 0;
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
@@ -1161,6 +1169,14 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
}
nfs_delegation_reap_unclaimed(clp);
+ return 1;
+}
+
+static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+{
+ if (!nfs4_state_clear_reclaim_reboot(clp))
+ return;
+ nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
}
static void nfs_delegation_clear_all(struct nfs_client *clp)
@@ -1175,6 +1191,14 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
}
+static void nfs4_warn_keyexpired(const char *s)
+{
+ printk_ratelimited(KERN_WARNING "Error: state manager"
+ " encountered RPCSEC_GSS session"
+ " expired against NFSv4 server %s.\n",
+ s);
+}
+
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
@@ -1187,7 +1211,7 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_end_reclaim_reboot(clp);
+ nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
@@ -1204,6 +1228,10 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
return 0;
+ case -EKEYEXPIRED:
+ /* Nothing we can do */
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ return 0;
}
return error;
}
@@ -1414,9 +1442,10 @@ static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
case -NFS4ERR_DELAY:
case -NFS4ERR_CLID_INUSE:
case -EAGAIN:
- case -EKEYEXPIRED:
break;
+ case -EKEYEXPIRED:
+ nfs4_warn_keyexpired(clp->cl_hostname);
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
@@ -1447,6 +1476,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ pnfs_destroy_all_layouts(clp);
}
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 08ef9129113..f313c4cce7e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
#include <linux/nfs_idmap.h>
#include "nfs4_fs.h"
#include "internal.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -310,6 +311,19 @@ static int nfs4_stat_to_errno(int);
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
+#define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \
+ XDR_QUADLEN(NFS4_DEVICEID4_SIZE))
+#define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \
+ 1 /* layout type */ + \
+ 1 /* opaque devaddr4 length */ + \
+ /* devaddr4 payload is read into page */ \
+ 1 /* notification bitmap length */ + \
+ 1 /* notification bitmap */)
+#define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \
+ encode_stateid_maxsz)
+#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
+ decode_stateid_maxsz + \
+ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
@@ -699,6 +713,20 @@ static int nfs4_stat_to_errno(int);
#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_reclaim_complete_maxsz)
+#define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_getdeviceinfo_maxsz)
+#define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_getdeviceinfo_maxsz)
+#define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_layoutget_maxsz)
+#define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutget_maxsz)
const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
compound_encode_hdr_maxsz +
@@ -816,7 +844,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
if (iap->ia_valid & ATTR_MODE)
len += 4;
if (iap->ia_valid & ATTR_UID) {
- owner_namelen = nfs_map_uid_to_name(server->nfs_client, iap->ia_uid, owner_name);
+ owner_namelen = nfs_map_uid_to_name(server->nfs_client, iap->ia_uid, owner_name, IDMAP_NAMESZ);
if (owner_namelen < 0) {
dprintk("nfs: couldn't resolve uid %d to string\n",
iap->ia_uid);
@@ -828,7 +856,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
}
if (iap->ia_valid & ATTR_GID) {
- owner_grouplen = nfs_map_gid_to_group(server->nfs_client, iap->ia_gid, owner_group);
+ owner_grouplen = nfs_map_gid_to_group(server->nfs_client, iap->ia_gid, owner_group, IDMAP_NAMESZ);
if (owner_grouplen < 0) {
dprintk("nfs: couldn't resolve gid %d to string\n",
iap->ia_gid);
@@ -1385,24 +1413,35 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
{
- uint32_t attrs[2] = {
- FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID,
- FATTR4_WORD1_MOUNTED_ON_FILEID,
- };
+ uint32_t attrs[2] = {0, 0};
+ uint32_t dircount = readdir->count >> 1;
__be32 *p;
+ if (readdir->plus) {
+ attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE|
+ FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE;
+ attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER|
+ FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV|
+ FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS|
+ FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
+ dircount >>= 1;
+ }
+ attrs[0] |= FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID;
+ attrs[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+ /* Switch to mounted_on_fileid if the server supports it */
+ if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+ attrs[0] &= ~FATTR4_WORD0_FILEID;
+ else
+ attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
+
p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20);
*p++ = cpu_to_be32(OP_READDIR);
p = xdr_encode_hyper(p, readdir->cookie);
p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE);
- *p++ = cpu_to_be32(readdir->count >> 1); /* We're not doing readdirplus */
+ *p++ = cpu_to_be32(dircount);
*p++ = cpu_to_be32(readdir->count);
*p++ = cpu_to_be32(2);
- /* Switch to mounted_on_fileid if the server supports it */
- if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
- attrs[0] &= ~FATTR4_WORD0_FILEID;
- else
- attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
+
*p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
*p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
@@ -1726,6 +1765,58 @@ static void encode_sequence(struct xdr_stream *xdr,
#endif /* CONFIG_NFS_V4_1 */
}
+#ifdef CONFIG_NFS_V4_1
+static void
+encode_getdeviceinfo(struct xdr_stream *xdr,
+ const struct nfs4_getdeviceinfo_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(OP_GETDEVICEINFO);
+ p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(args->pdev->layout_type);
+ *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */
+ *p++ = cpu_to_be32(0); /* bitmap length 0 */
+ hdr->nops++;
+ hdr->replen += decode_getdeviceinfo_maxsz;
+}
+
+static void
+encode_layoutget(struct xdr_stream *xdr,
+ const struct nfs4_layoutget_args *args,
+ struct compound_hdr *hdr)
+{
+ nfs4_stateid stateid;
+ __be32 *p;
+
+ p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
+ *p++ = cpu_to_be32(OP_LAYOUTGET);
+ *p++ = cpu_to_be32(0); /* Signal layout available */
+ *p++ = cpu_to_be32(args->type);
+ *p++ = cpu_to_be32(args->range.iomode);
+ p = xdr_encode_hyper(p, args->range.offset);
+ p = xdr_encode_hyper(p, args->range.length);
+ p = xdr_encode_hyper(p, args->minlength);
+ pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
+ args->ctx->state);
+ p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+ *p = cpu_to_be32(args->maxcount);
+
+ dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
+ __func__,
+ args->type,
+ args->range.iomode,
+ (unsigned long)args->range.offset,
+ (unsigned long)args->range.length,
+ args->maxcount);
+ hdr->nops++;
+ hdr->replen += decode_layoutget_maxsz;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
@@ -1823,7 +1914,7 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs
/*
* Encode RENAME request
*/
-static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs4_rename_arg *args)
+static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -2543,6 +2634,51 @@ static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
return 0;
}
+/*
+ * Encode GETDEVICEINFO request
+ */
+static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_getdeviceinfo_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_getdeviceinfo(&xdr, args, &hdr);
+
+ /* set up reply kvec. Subtract notification bitmap max size (2)
+ * so that notification bitmap is put in xdr_buf tail */
+ xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2,
+ args->pdev->pages, args->pdev->pgbase,
+ args->pdev->pglen);
+
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * Encode LAYOUTGET request
+ */
+static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_layoutget_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutget(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
#endif /* CONFIG_NFS_V4_1 */
static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -2676,7 +2812,10 @@ out_overflow:
static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
{
if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) {
- decode_attr_bitmap(xdr, bitmask);
+ int ret;
+ ret = decode_attr_bitmap(xdr, bitmask);
+ if (unlikely(ret < 0))
+ return ret;
bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
} else
bitmask[0] = bitmask[1] = 0;
@@ -2848,6 +2987,56 @@ out_overflow:
return -EIO;
}
+static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+ __be32 *p;
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_RDATTR_ERROR - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
+{
+ __be32 *p;
+ int len;
+
+ if (fh != NULL)
+ memset(fh, 0, sizeof(*fh));
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEHANDLE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len > NFS4_FHSIZE)
+ return -EIO;
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (fh != NULL) {
+ memcpy(fh->data, p, len);
+ fh->size = len;
+ }
+ bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
{
__be32 *p;
@@ -3521,6 +3710,24 @@ static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, s
return status;
}
+static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct timespec *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_DELTA - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_DELTA)) {
+ status = decode_attr_time(xdr, time);
+ bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
+ }
+ dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
+ (long)time->tv_nsec);
+ return status;
+}
+
static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
{
int status = 0;
@@ -3744,29 +3951,14 @@ xdr_error:
return status;
}
-static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_fattr *fattr, struct nfs_fh *fh,
const struct nfs_server *server, int may_sleep)
{
- __be32 *savep;
- uint32_t attrlen,
- bitmap[2] = {0},
- type;
int status;
umode_t fmode = 0;
uint64_t fileid;
-
- status = decode_op_hdr(xdr, OP_GETATTR);
- if (status < 0)
- goto xdr_error;
-
- status = decode_attr_bitmap(xdr, bitmap);
- if (status < 0)
- goto xdr_error;
-
- status = decode_attr_length(xdr, &attrlen, &savep);
- if (status < 0)
- goto xdr_error;
-
+ uint32_t type;
status = decode_attr_type(xdr, bitmap, &type);
if (status < 0)
@@ -3792,6 +3984,14 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
goto xdr_error;
fattr->valid |= status;
+ status = decode_attr_error(xdr, bitmap);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_filehandle(xdr, bitmap, fh);
+ if (status < 0)
+ goto xdr_error;
+
status = decode_attr_fileid(xdr, bitmap, &fattr->fileid);
if (status < 0)
goto xdr_error;
@@ -3862,12 +4062,101 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
fattr->valid |= status;
}
+xdr_error:
+ dprintk("%s: xdr returned %d\n", __func__, -status);
+ return status;
+}
+
+static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ struct nfs_fh *fh, const struct nfs_server *server, int may_sleep)
+{
+ __be32 *savep;
+ uint32_t attrlen,
+ bitmap[2] = {0};
+ int status;
+
+ status = decode_op_hdr(xdr, OP_GETATTR);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_bitmap(xdr, bitmap);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_length(xdr, &attrlen, &savep);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep);
+ if (status < 0)
+ goto xdr_error;
+
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
}
+static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ const struct nfs_server *server, int may_sleep)
+{
+ return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep);
+}
+
+/*
+ * Decode potentially multiple layout types. Currently we only support
+ * one layout driver per file system.
+ */
+static int decode_first_pnfs_layout_type(struct xdr_stream *xdr,
+ uint32_t *layouttype)
+{
+ uint32_t *p;
+ int num;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num = be32_to_cpup(p);
+
+ /* pNFS is not supported by the underlying file system */
+ if (num == 0) {
+ *layouttype = 0;
+ return 0;
+ }
+ if (num > 1)
+ printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers "
+ "per filesystem not supported\n", __func__);
+
+ /* Decode and set first layout type, move xdr->p past unused types */
+ p = xdr_inline_decode(xdr, num * 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *layouttype = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * The type of file system exported.
+ * Note we must ensure that layouttype is set in any non-error case.
+ */
+static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
+ uint32_t *layouttype)
+{
+ int status = 0;
+
+ dprintk("%s: bitmap is %x\n", __func__, bitmap[1]);
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U)))
+ return -EIO;
+ if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) {
+ status = decode_first_pnfs_layout_type(xdr, layouttype);
+ bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES;
+ } else
+ *layouttype = 0;
+ return status;
+}
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
{
@@ -3894,6 +4183,12 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0)
goto xdr_error;
fsinfo->wtpref = fsinfo->wtmax;
+ status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta);
+ if (status != 0)
+ goto xdr_error;
+ status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
+ if (status != 0)
+ goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
@@ -3950,13 +4245,13 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
__be32 *p;
uint32_t namelen, type;
- p = xdr_inline_decode(xdr, 32);
+ p = xdr_inline_decode(xdr, 32); /* read 32 bytes */
if (unlikely(!p))
goto out_overflow;
- p = xdr_decode_hyper(p, &offset);
+ p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */
p = xdr_decode_hyper(p, &length);
- type = be32_to_cpup(p++);
- if (fl != NULL) {
+ type = be32_to_cpup(p++); /* 4 byte read */
+ if (fl != NULL) { /* manipulate file lock */
fl->fl_start = (loff_t)offset;
fl->fl_end = fl->fl_start + (loff_t)length - 1;
if (length == ~(uint64_t)0)
@@ -3966,9 +4261,9 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
fl->fl_type = F_RDLCK;
fl->fl_pid = 0;
}
- p = xdr_decode_hyper(p, &clientid);
- namelen = be32_to_cpup(p);
- p = xdr_inline_decode(xdr, namelen);
+ p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
+ namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
+ p = xdr_inline_decode(xdr, namelen); /* variable size field */
if (likely(p))
return -NFS4ERR_DENIED;
out_overflow:
@@ -4200,12 +4495,9 @@ out_overflow:
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct page *page = *rcvbuf->pages;
struct kvec *iov = rcvbuf->head;
size_t hdrlen;
u32 recvd, pglen = rcvbuf->page_len;
- __be32 *end, *entry, *p, *kaddr;
- unsigned int nr = 0;
int status;
status = decode_op_hdr(xdr, OP_READDIR);
@@ -4225,71 +4517,8 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
pglen = recvd;
xdr_read_pages(xdr, pglen);
- BUG_ON(pglen + readdir->pgbase > PAGE_CACHE_SIZE);
- kaddr = p = kmap_atomic(page, KM_USER0);
- end = p + ((pglen + readdir->pgbase) >> 2);
- entry = p;
-
- /* Make sure the packet actually has a value_follows and EOF entry */
- if ((entry + 1) > end)
- goto short_pkt;
-
- for (; *p++; nr++) {
- u32 len, attrlen, xlen;
- if (end - p < 3)
- goto short_pkt;
- dprintk("cookie = %Lu, ", *((unsigned long long *)p));
- p += 2; /* cookie */
- len = ntohl(*p++); /* filename length */
- if (len > NFS4_MAXNAMLEN) {
- dprintk("NFS: giant filename in readdir (len 0x%x)\n",
- len);
- goto err_unmap;
- }
- xlen = XDR_QUADLEN(len);
- if (end - p < xlen + 1)
- goto short_pkt;
- dprintk("filename = %*s\n", len, (char *)p);
- p += xlen;
- len = ntohl(*p++); /* bitmap length */
- if (end - p < len + 1)
- goto short_pkt;
- p += len;
- attrlen = XDR_QUADLEN(ntohl(*p++));
- if (end - p < attrlen + 2)
- goto short_pkt;
- p += attrlen; /* attributes */
- entry = p;
- }
- /*
- * Apparently some server sends responses that are a valid size, but
- * contain no entries, and have value_follows==0 and EOF==0. For
- * those, just set the EOF marker.
- */
- if (!nr && entry[1] == 0) {
- dprintk("NFS: readdir reply truncated!\n");
- entry[1] = 1;
- }
-out:
- kunmap_atomic(kaddr, KM_USER0);
+
return 0;
-short_pkt:
- /*
- * When we get a short packet there are 2 possibilities. We can
- * return an error, or fix up the response to look like a valid
- * response and return what we have so far. If there are no
- * entries and the packet was short, then return -EIO. If there
- * are valid entries in the response, return them and pretend that
- * the call was successful, but incomplete. The caller can retry the
- * readdir starting at the last cookie.
- */
- dprintk("%s: short packet at entry %d\n", __func__, nr);
- entry[0] = entry[1] = 0;
- if (nr)
- goto out;
-err_unmap:
- kunmap_atomic(kaddr, KM_USER0);
- return -errno_NFSERR_IO;
}
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
@@ -4299,7 +4528,6 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
size_t hdrlen;
u32 len, recvd;
__be32 *p;
- char *kaddr;
int status;
status = decode_op_hdr(xdr, OP_READLINK);
@@ -4330,9 +4558,7 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
* and and null-terminate the text (the VFS expects
* null-termination).
*/
- kaddr = (char *)kmap_atomic(rcvbuf->pages[0], KM_USER0);
- kaddr[len+rcvbuf->page_base] = '\0';
- kunmap_atomic(kaddr, KM_USER0);
+ xdr_terminate_string(rcvbuf, len);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -4668,7 +4894,6 @@ static int decode_sequence(struct xdr_stream *xdr,
struct rpc_rqst *rqstp)
{
#if defined(CONFIG_NFS_V4_1)
- struct nfs4_slot *slot;
struct nfs4_sessionid id;
u32 dummy;
int status;
@@ -4700,15 +4925,14 @@ static int decode_sequence(struct xdr_stream *xdr,
goto out_overflow;
/* seqid */
- slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
dummy = be32_to_cpup(p++);
- if (dummy != slot->seq_nr) {
+ if (dummy != res->sr_slot->seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out_err;
}
/* slot id */
dummy = be32_to_cpup(p++);
- if (dummy != res->sr_slotid) {
+ if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) {
dprintk("%s Invalid slot id\n", __func__);
goto out_err;
}
@@ -4731,6 +4955,134 @@ out_overflow:
#endif /* CONFIG_NFS_V4_1 */
}
+#if defined(CONFIG_NFS_V4_1)
+
+static int decode_getdeviceinfo(struct xdr_stream *xdr,
+ struct pnfs_device *pdev)
+{
+ __be32 *p;
+ uint32_t len, type;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_GETDEVICEINFO);
+ if (status) {
+ if (status == -ETOOSMALL) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ pdev->mincount = be32_to_cpup(p);
+ dprintk("%s: Min count too small. mincnt = %u\n",
+ __func__, pdev->mincount);
+ }
+ return status;
+ }
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ type = be32_to_cpup(p++);
+ if (type != pdev->layout_type) {
+ dprintk("%s: layout mismatch req: %u pdev: %u\n",
+ __func__, pdev->layout_type, type);
+ return -EINVAL;
+ }
+ /*
+ * Get the length of the opaque device_addr4. xdr_read_pages places
+ * the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages)
+ * and places the remaining xdr data in xdr_buf->tail
+ */
+ pdev->mincount = be32_to_cpup(p);
+ xdr_read_pages(xdr, pdev->mincount); /* include space for the length */
+
+ /* Parse notification bitmap, verifying that it is zero. */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len) {
+ int i;
+
+ p = xdr_inline_decode(xdr, 4 * len);
+ if (unlikely(!p))
+ goto out_overflow;
+ for (i = 0; i < len; i++, p++) {
+ if (be32_to_cpup(p)) {
+ dprintk("%s: notifications not supported\n",
+ __func__);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ struct nfs4_layoutget_res *res)
+{
+ __be32 *p;
+ int status;
+ u32 layout_count;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTGET);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->return_on_close = be32_to_cpup(p++);
+ p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE);
+ layout_count = be32_to_cpup(p);
+ if (!layout_count) {
+ dprintk("%s: server responded with empty layout array\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ p = xdr_inline_decode(xdr, 24);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &res->range.offset);
+ p = xdr_decode_hyper(p, &res->range.length);
+ res->range.iomode = be32_to_cpup(p++);
+ res->type = be32_to_cpup(p++);
+
+ status = decode_opaque_inline(xdr, &res->layout.len, (char **)&p);
+ if (unlikely(status))
+ return status;
+
+ dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
+ __func__,
+ (unsigned long)res->range.offset,
+ (unsigned long)res->range.length,
+ res->range.iomode,
+ res->type,
+ res->layout.len);
+
+ /* nfs4_proc_layoutget allocated a single page */
+ if (res->layout.len > PAGE_SIZE)
+ return -ENOMEM;
+ memcpy(res->layout.buf, p, res->layout.len);
+
+ if (layout_count > 1) {
+ /* We only handle a length one array at the moment. Any
+ * further entries are just ignored. Note that this means
+ * the client may see a response that is less than the
+ * minimum it requested.
+ */
+ dprintk("%s: server responded with %d layouts, dropping tail\n",
+ __func__, layout_count);
+ }
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
@@ -4873,7 +5225,7 @@ out:
/*
* Decode RENAME response
*/
-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_rename_res *res)
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -5758,25 +6110,84 @@ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
status = decode_reclaim_complete(&xdr, (void *)NULL);
return status;
}
+
+/*
+ * Decode GETDEVINFO response
+ */
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_getdeviceinfo_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status != 0)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status != 0)
+ goto out;
+ status = decode_getdeviceinfo(&xdr, res->pdev);
+out:
+ return status;
+}
+
+/*
+ * Decode LAYOUTGET response
+ */
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_layoutget_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(&xdr);
+ if (status)
+ goto out;
+ status = decode_layoutget(&xdr, rqstp, res);
+out:
+ return status;
+}
#endif /* CONFIG_NFS_V4_1 */
-__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
+__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ struct nfs_server *server, int plus)
{
uint32_t bitmap[2] = {0};
uint32_t len;
-
- if (!*p++) {
- if (!*p)
+ __be32 *p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (!ntohl(*p++))
return ERR_PTR(-EAGAIN);
entry->eof = 1;
return ERR_PTR(-EBADCOOKIE);
}
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
entry->prev_cookie = entry->cookie;
p = xdr_decode_hyper(p, &entry->cookie);
entry->len = ntohl(*p++);
+
+ p = xdr_inline_decode(xdr, entry->len);
+ if (unlikely(!p))
+ goto out_overflow;
entry->name = (const char *) p;
- p += XDR_QUADLEN(entry->len);
/*
* In case the server doesn't return an inode number,
@@ -5784,32 +6195,33 @@ __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
* since glibc seems to choke on it...)
*/
entry->ino = 1;
+ entry->fattr->valid = 0;
- len = ntohl(*p++); /* bitmap length */
- if (len-- > 0) {
- bitmap[0] = ntohl(*p++);
- if (len-- > 0) {
- bitmap[1] = ntohl(*p++);
- p += len;
- }
- }
- len = XDR_QUADLEN(ntohl(*p++)); /* attribute buffer length */
- if (len > 0) {
- if (bitmap[0] & FATTR4_WORD0_RDATTR_ERROR) {
- bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
- /* Ignore the return value of rdattr_error for now */
- p++;
- len--;
- }
- if (bitmap[0] == 0 && bitmap[1] == FATTR4_WORD1_MOUNTED_ON_FILEID)
- xdr_decode_hyper(p, &entry->ino);
- else if (bitmap[0] == FATTR4_WORD0_FILEID)
- xdr_decode_hyper(p, &entry->ino);
- p += len;
- }
+ if (decode_attr_bitmap(xdr, bitmap) < 0)
+ goto out_overflow;
+
+ if (decode_attr_length(xdr, &len, &p) < 0)
+ goto out_overflow;
+
+ if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0)
+ goto out_overflow;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
+ entry->ino = entry->fattr->fileid;
+
+ if (verify_attr_len(xdr, p, len) < 0)
+ goto out_overflow;
+
+ p = xdr_inline_peek(xdr, 8);
+ if (p != NULL)
+ entry->eof = !p[0] && p[1];
+ else
+ entry->eof = 0;
- entry->eof = !p[0] && p[1];
return p;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return ERR_PTR(-EIO);
}
/*
@@ -5936,6 +6348,8 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SEQUENCE, enc_sequence, dec_sequence),
PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
+ PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index df101d9f546..460df365288 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -3,9 +3,10 @@
*
* Allow an NFS filesystem to be mounted as root. The way this works is:
* (1) Use the IP autoconfig mechanism to set local IP addresses and routes.
- * (2) Handle RPC negotiation with the system which replied to RARP or
- * was reported as a boot server by BOOTP or manually.
- * (3) The actual mounting is done later, when init() is running.
+ * (2) Construct the device string and the options string using DHCP
+ * option 17 and/or kernel command line options.
+ * (3) When mount_root() sets up the root file system, pass these strings
+ * to the NFS client's regular mount interface via sys_mount().
*
*
* Changes:
@@ -65,470 +66,243 @@
* Hua Qin : Support for mounting root file system via
* NFS over TCP.
* Fabian Frederick: Option parser rebuilt (using parser lib)
-*/
+ * Chuck Lever : Use super.c's text-based mount option parsing
+ * Chuck Lever : Add "nfsrootdebug".
+ */
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/xprtsock.h>
#include <linux/nfs.h>
#include <linux/nfs_fs.h>
-#include <linux/nfs_mount.h>
-#include <linux/in.h>
-#include <linux/major.h>
#include <linux/utsname.h>
-#include <linux/inet.h>
#include <linux/root_dev.h>
#include <net/ipconfig.h>
-#include <linux/parser.h>
#include "internal.h"
-/* Define this to allow debugging output */
-#undef NFSROOT_DEBUG
#define NFSDBG_FACILITY NFSDBG_ROOT
-/* Default port to use if server is not running a portmapper */
-#define NFS_MNT_PORT 627
-
/* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s"
/* Parameters passed from the kernel command line */
-static char nfs_root_name[256] __initdata = "";
+static char nfs_root_parms[256] __initdata = "";
+
+/* Text-based mount options passed to super.c */
+static char nfs_root_options[256] __initdata = "";
/* Address of NFS server */
-static __be32 servaddr __initdata = 0;
+static __be32 servaddr __initdata = htonl(INADDR_NONE);
/* Name of directory to mount */
-static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, };
-
-/* NFS-related data */
-static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
-static int nfs_port __initdata = 0; /* Port to connect to for NFS */
-static int mount_port __initdata = 0; /* Mount daemon port number */
-
-
-/***************************************************************************
-
- Parsing of options
-
- ***************************************************************************/
-
-enum {
- /* Options that take integer arguments */
- Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin,
- Opt_acregmax, Opt_acdirmin, Opt_acdirmax,
- /* Options that take no arguments */
- Opt_soft, Opt_hard, Opt_intr,
- Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac,
- Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
- Opt_acl, Opt_noacl,
- /* Error token */
- Opt_err
-};
-
-static const match_table_t tokens __initconst = {
- {Opt_port, "port=%u"},
- {Opt_rsize, "rsize=%u"},
- {Opt_wsize, "wsize=%u"},
- {Opt_timeo, "timeo=%u"},
- {Opt_retrans, "retrans=%u"},
- {Opt_acregmin, "acregmin=%u"},
- {Opt_acregmax, "acregmax=%u"},
- {Opt_acdirmin, "acdirmin=%u"},
- {Opt_acdirmax, "acdirmax=%u"},
- {Opt_soft, "soft"},
- {Opt_hard, "hard"},
- {Opt_intr, "intr"},
- {Opt_nointr, "nointr"},
- {Opt_posix, "posix"},
- {Opt_noposix, "noposix"},
- {Opt_cto, "cto"},
- {Opt_nocto, "nocto"},
- {Opt_ac, "ac"},
- {Opt_noac, "noac"},
- {Opt_lock, "lock"},
- {Opt_nolock, "nolock"},
- {Opt_v2, "nfsvers=2"},
- {Opt_v2, "v2"},
- {Opt_v3, "nfsvers=3"},
- {Opt_v3, "v3"},
- {Opt_udp, "proto=udp"},
- {Opt_udp, "udp"},
- {Opt_tcp, "proto=tcp"},
- {Opt_tcp, "tcp"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_err, NULL}
-
-};
+static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = "";
+
+/* server:export path string passed to super.c */
+static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = "";
/*
- * Parse option string.
+ * When the "nfsrootdebug" kernel command line option is specified,
+ * enable debugging messages for NFSROOT.
*/
-
-static int __init root_nfs_parse(char *name, char *buf)
+static int __init nfs_root_debug(char *__unused)
{
-
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
-
- if (!name)
- return 1;
-
- /* Set the NFS remote path */
- p = strsep(&name, ",");
- if (p[0] != '\0' && strcmp(p, "default") != 0)
- strlcpy(buf, p, NFS_MAXPATHLEN);
-
- while ((p = strsep (&name, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
- token = match_token(p, tokens, args);
-
- /* %u tokens only. Beware if you add new tokens! */
- if (token < Opt_soft && match_int(&args[0], &option))
- return 0;
- switch (token) {
- case Opt_port:
- nfs_port = option;
- break;
- case Opt_rsize:
- nfs_data.rsize = option;
- break;
- case Opt_wsize:
- nfs_data.wsize = option;
- break;
- case Opt_timeo:
- nfs_data.timeo = option;
- break;
- case Opt_retrans:
- nfs_data.retrans = option;
- break;
- case Opt_acregmin:
- nfs_data.acregmin = option;
- break;
- case Opt_acregmax:
- nfs_data.acregmax = option;
- break;
- case Opt_acdirmin:
- nfs_data.acdirmin = option;
- break;
- case Opt_acdirmax:
- nfs_data.acdirmax = option;
- break;
- case Opt_soft:
- nfs_data.flags |= NFS_MOUNT_SOFT;
- break;
- case Opt_hard:
- nfs_data.flags &= ~NFS_MOUNT_SOFT;
- break;
- case Opt_intr:
- case Opt_nointr:
- break;
- case Opt_posix:
- nfs_data.flags |= NFS_MOUNT_POSIX;
- break;
- case Opt_noposix:
- nfs_data.flags &= ~NFS_MOUNT_POSIX;
- break;
- case Opt_cto:
- nfs_data.flags &= ~NFS_MOUNT_NOCTO;
- break;
- case Opt_nocto:
- nfs_data.flags |= NFS_MOUNT_NOCTO;
- break;
- case Opt_ac:
- nfs_data.flags &= ~NFS_MOUNT_NOAC;
- break;
- case Opt_noac:
- nfs_data.flags |= NFS_MOUNT_NOAC;
- break;
- case Opt_lock:
- nfs_data.flags &= ~NFS_MOUNT_NONLM;
- break;
- case Opt_nolock:
- nfs_data.flags |= NFS_MOUNT_NONLM;
- break;
- case Opt_v2:
- nfs_data.flags &= ~NFS_MOUNT_VER3;
- break;
- case Opt_v3:
- nfs_data.flags |= NFS_MOUNT_VER3;
- break;
- case Opt_udp:
- nfs_data.flags &= ~NFS_MOUNT_TCP;
- break;
- case Opt_tcp:
- nfs_data.flags |= NFS_MOUNT_TCP;
- break;
- case Opt_acl:
- nfs_data.flags &= ~NFS_MOUNT_NOACL;
- break;
- case Opt_noacl:
- nfs_data.flags |= NFS_MOUNT_NOACL;
- break;
- default:
- printk(KERN_WARNING "Root-NFS: unknown "
- "option: %s\n", p);
- return 0;
- }
- }
-
+ nfs_debug |= NFSDBG_ROOT | NFSDBG_MOUNT;
return 1;
}
+__setup("nfsrootdebug", nfs_root_debug);
+
/*
- * Prepare the NFS data structure and parse all options.
+ * Parse NFS server and directory information passed on the kernel
+ * command line.
+ *
+ * nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
+ *
+ * If there is a "%s" token in the <root-dir> string, it is replaced
+ * by the ASCII-representation of the client's IP address.
*/
-static int __init root_nfs_name(char *name)
+static int __init nfs_root_setup(char *line)
{
- static char buf[NFS_MAXPATHLEN] __initdata;
- char *cp;
-
- /* Set some default values */
- memset(&nfs_data, 0, sizeof(nfs_data));
- nfs_port = -1;
- nfs_data.version = NFS_MOUNT_VERSION;
- nfs_data.flags = NFS_MOUNT_NONLM; /* No lockd in nfs root yet */
- nfs_data.rsize = NFS_DEF_FILE_IO_SIZE;
- nfs_data.wsize = NFS_DEF_FILE_IO_SIZE;
- nfs_data.acregmin = NFS_DEF_ACREGMIN;
- nfs_data.acregmax = NFS_DEF_ACREGMAX;
- nfs_data.acdirmin = NFS_DEF_ACDIRMIN;
- nfs_data.acdirmax = NFS_DEF_ACDIRMAX;
- strcpy(buf, NFS_ROOT);
-
- /* Process options received from the remote server */
- root_nfs_parse(root_server_path, buf);
-
- /* Override them by options set on kernel command-line */
- root_nfs_parse(name, buf);
-
- cp = utsname()->nodename;
- if (strlen(buf) + strlen(cp) > NFS_MAXPATHLEN) {
- printk(KERN_ERR "Root-NFS: Pathname for remote directory too long.\n");
- return -1;
+ ROOT_DEV = Root_NFS;
+
+ if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
+ strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms));
+ } else {
+ size_t n = strlen(line) + sizeof(NFS_ROOT) - 1;
+ if (n >= sizeof(nfs_root_parms))
+ line[sizeof(nfs_root_parms) - sizeof(NFS_ROOT) - 2] = '\0';
+ sprintf(nfs_root_parms, NFS_ROOT, line);
}
- sprintf(nfs_export_path, buf, cp);
+
+ /*
+ * Extract the IP address of the NFS server containing our
+ * root file system, if one was specified.
+ *
+ * Note: root_nfs_parse_addr() removes the server-ip from
+ * nfs_root_parms, if it exists.
+ */
+ root_server_addr = root_nfs_parse_addr(nfs_root_parms);
return 1;
}
+__setup("nfsroot=", nfs_root_setup);
-/*
- * Get NFS server address.
- */
-static int __init root_nfs_addr(void)
+static int __init root_nfs_copy(char *dest, const char *src,
+ const size_t destlen)
{
- if ((servaddr = root_server_addr) == htonl(INADDR_NONE)) {
- printk(KERN_ERR "Root-NFS: No NFS server available, giving up.\n");
+ if (strlcpy(dest, src, destlen) > destlen)
return -1;
- }
+ return 0;
+}
- snprintf(nfs_data.hostname, sizeof(nfs_data.hostname),
- "%pI4", &servaddr);
+static int __init root_nfs_cat(char *dest, const char *src,
+ const size_t destlen)
+{
+ if (strlcat(dest, src, destlen) > destlen)
+ return -1;
return 0;
}
/*
- * Tell the user what's going on.
+ * Parse out root export path and mount options from
+ * passed-in string @incoming.
+ *
+ * Copy the export path into @exppath.
*/
-#ifdef NFSROOT_DEBUG
-static void __init root_nfs_print(void)
+static int __init root_nfs_parse_options(char *incoming, char *exppath,
+ const size_t exppathlen)
{
- printk(KERN_NOTICE "Root-NFS: Mounting %s on server %s as root\n",
- nfs_export_path, nfs_data.hostname);
- printk(KERN_NOTICE "Root-NFS: rsize = %d, wsize = %d, timeo = %d, retrans = %d\n",
- nfs_data.rsize, nfs_data.wsize, nfs_data.timeo, nfs_data.retrans);
- printk(KERN_NOTICE "Root-NFS: acreg (min,max) = (%d,%d), acdir (min,max) = (%d,%d)\n",
- nfs_data.acregmin, nfs_data.acregmax,
- nfs_data.acdirmin, nfs_data.acdirmax);
- printk(KERN_NOTICE "Root-NFS: nfsd port = %d, mountd port = %d, flags = %08x\n",
- nfs_port, mount_port, nfs_data.flags);
-}
-#endif
-
+ char *p;
-static int __init root_nfs_init(void)
-{
-#ifdef NFSROOT_DEBUG
- nfs_debug |= NFSDBG_ROOT;
-#endif
+ /*
+ * Set the NFS remote path
+ */
+ p = strsep(&incoming, ",");
+ if (*p != '\0' && strcmp(p, "default") != 0)
+ if (root_nfs_copy(exppath, p, exppathlen))
+ return -1;
/*
- * Decode the root directory path name and NFS options from
- * the kernel command line. This has to go here in order to
- * be able to use the client IP address for the remote root
- * directory (necessary for pure RARP booting).
+ * @incoming now points to the rest of the string; if it
+ * contains something, append it to our root options buffer
*/
- if (root_nfs_name(nfs_root_name) < 0 ||
- root_nfs_addr() < 0)
- return -1;
+ if (incoming != NULL && *incoming != '\0')
+ if (root_nfs_cat(nfs_root_options, incoming,
+ sizeof(nfs_root_options)))
+ return -1;
-#ifdef NFSROOT_DEBUG
- root_nfs_print();
-#endif
+ /*
+ * Possibly prepare for more options to be appended
+ */
+ if (nfs_root_options[0] != '\0' &&
+ nfs_root_options[strlen(nfs_root_options)] != ',')
+ if (root_nfs_cat(nfs_root_options, ",",
+ sizeof(nfs_root_options)))
+ return -1;
return 0;
}
-
/*
- * Parse NFS server and directory information passed on the kernel
- * command line.
+ * Decode the export directory path name and NFS options from
+ * the kernel command line. This has to be done late in order to
+ * use a dynamically acquired client IP address for the remote
+ * root directory path.
+ *
+ * Returns zero if successful; otherwise -1 is returned.
*/
-static int __init nfs_root_setup(char *line)
+static int __init root_nfs_data(char *cmdline)
{
- ROOT_DEV = Root_NFS;
- if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
- strlcpy(nfs_root_name, line, sizeof(nfs_root_name));
- } else {
- int n = strlen(line) + sizeof(NFS_ROOT) - 1;
- if (n >= sizeof(nfs_root_name))
- line[sizeof(nfs_root_name) - sizeof(NFS_ROOT) - 2] = '\0';
- sprintf(nfs_root_name, NFS_ROOT, line);
+ char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
+ int len, retval = -1;
+ char *tmp = NULL;
+ const size_t tmplen = sizeof(nfs_export_path);
+
+ tmp = kzalloc(tmplen, GFP_KERNEL);
+ if (tmp == NULL)
+ goto out_nomem;
+ strcpy(tmp, NFS_ROOT);
+
+ if (root_server_path[0] != '\0') {
+ dprintk("Root-NFS: DHCPv4 option 17: %s\n",
+ root_server_path);
+ if (root_nfs_parse_options(root_server_path, tmp, tmplen))
+ goto out_optionstoolong;
}
- root_server_addr = root_nfs_parse_addr(nfs_root_name);
- return 1;
-}
-__setup("nfsroot=", nfs_root_setup);
-
-/***************************************************************************
-
- Routines to actually mount the root directory
+ if (cmdline[0] != '\0') {
+ dprintk("Root-NFS: nfsroot=%s\n", cmdline);
+ if (root_nfs_parse_options(cmdline, tmp, tmplen))
+ goto out_optionstoolong;
+ }
- ***************************************************************************/
+ /*
+ * Append mandatory options for nfsroot so they override
+ * what has come before
+ */
+ snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4",
+ &servaddr);
+ if (root_nfs_cat(nfs_root_options, addr_option,
+ sizeof(nfs_root_options)))
+ goto out_optionstoolong;
-/*
- * Construct sockaddr_in from address and port number.
- */
-static inline void
-set_sockaddr(struct sockaddr_in *sin, __be32 addr, __be16 port)
-{
- sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = addr;
- sin->sin_port = port;
-}
+ /*
+ * Set up nfs_root_device. For NFS mounts, this looks like
+ *
+ * server:/path
+ *
+ * At this point, utsname()->nodename contains our local
+ * IP address or hostname, set by ipconfig. If "%s" exists
+ * in tmp, substitute the nodename, then shovel the whole
+ * mess into nfs_root_device.
+ */
+ len = snprintf(nfs_export_path, sizeof(nfs_export_path),
+ tmp, utsname()->nodename);
+ if (len > (int)sizeof(nfs_export_path))
+ goto out_devnametoolong;
+ len = snprintf(nfs_root_device, sizeof(nfs_root_device),
+ "%pI4:%s", &servaddr, nfs_export_path);
+ if (len > (int)sizeof(nfs_root_device))
+ goto out_devnametoolong;
-/*
- * Query server portmapper for the port of a daemon program.
- */
-static int __init root_nfs_getport(int program, int version, int proto)
-{
- struct sockaddr_in sin;
+ retval = 0;
- printk(KERN_NOTICE "Looking up port of RPC %d/%d on %pI4\n",
- program, version, &servaddr);
- set_sockaddr(&sin, servaddr, 0);
- return rpcb_getport_sync(&sin, program, version, proto);
+out:
+ kfree(tmp);
+ return retval;
+out_nomem:
+ printk(KERN_ERR "Root-NFS: could not allocate memory\n");
+ goto out;
+out_optionstoolong:
+ printk(KERN_ERR "Root-NFS: mount options string too long\n");
+ goto out;
+out_devnametoolong:
+ printk(KERN_ERR "Root-NFS: root device name too long.\n");
+ goto out;
}
-
-/*
- * Use portmapper to find mountd and nfsd port numbers if not overriden
- * by the user. Use defaults if portmapper is not available.
- * XXX: Is there any nfs server with no portmapper?
+/**
+ * nfs_root_data - Return prepared 'data' for NFSROOT mount
+ * @root_device: OUT: address of string containing NFSROOT device
+ * @root_data: OUT: address of string containing NFSROOT mount options
+ *
+ * Returns zero and sets @root_device and @root_data if successful,
+ * otherwise -1 is returned.
*/
-static int __init root_nfs_ports(void)
+int __init nfs_root_data(char **root_device, char **root_data)
{
- int port;
- int nfsd_ver, mountd_ver;
- int nfsd_port, mountd_port;
- int proto;
-
- if (nfs_data.flags & NFS_MOUNT_VER3) {
- nfsd_ver = NFS3_VERSION;
- mountd_ver = NFS_MNT3_VERSION;
- nfsd_port = NFS_PORT;
- mountd_port = NFS_MNT_PORT;
- } else {
- nfsd_ver = NFS2_VERSION;
- mountd_ver = NFS_MNT_VERSION;
- nfsd_port = NFS_PORT;
- mountd_port = NFS_MNT_PORT;
- }
-
- proto = (nfs_data.flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
-
- if (nfs_port < 0) {
- if ((port = root_nfs_getport(NFS_PROGRAM, nfsd_ver, proto)) < 0) {
- printk(KERN_ERR "Root-NFS: Unable to get nfsd port "
- "number from server, using default\n");
- port = nfsd_port;
- }
- nfs_port = port;
- dprintk("Root-NFS: Portmapper on server returned %d "
- "as nfsd port\n", port);
+ servaddr = root_server_addr;
+ if (servaddr == htonl(INADDR_NONE)) {
+ printk(KERN_ERR "Root-NFS: no NFS server address\n");
+ return -1;
}
- if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) {
- printk(KERN_ERR "Root-NFS: Unable to get mountd port "
- "number from server, using default\n");
- port = mountd_port;
- }
- mount_port = port;
- dprintk("Root-NFS: mountd port is %d\n", port);
+ if (root_nfs_data(nfs_root_parms) < 0)
+ return -1;
+ *root_device = nfs_root_device;
+ *root_data = nfs_root_options;
return 0;
}
-
-
-/*
- * Get a file handle from the server for the directory which is to be
- * mounted.
- */
-static int __init root_nfs_get_handle(void)
-{
- struct sockaddr_in sin;
- unsigned int auth_flav_len = 0;
- struct nfs_mount_request request = {
- .sap = (struct sockaddr *)&sin,
- .salen = sizeof(sin),
- .dirpath = nfs_export_path,
- .version = (nfs_data.flags & NFS_MOUNT_VER3) ?
- NFS_MNT3_VERSION : NFS_MNT_VERSION,
- .protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
- XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
- .auth_flav_len = &auth_flav_len,
- };
- int status = -ENOMEM;
-
- request.fh = nfs_alloc_fhandle();
- if (!request.fh)
- goto out;
- set_sockaddr(&sin, servaddr, htons(mount_port));
- status = nfs_mount(&request);
- if (status < 0)
- printk(KERN_ERR "Root-NFS: Server returned error %d "
- "while mounting %s\n", status, nfs_export_path);
- else {
- nfs_data.root.size = request.fh->size;
- memcpy(&nfs_data.root.data, request.fh->data, request.fh->size);
- }
- nfs_free_fhandle(request.fh);
-out:
- return status;
-}
-
-/*
- * Get the NFS port numbers and file handle, and return the prepared 'data'
- * argument for mount() if everything went OK. Return NULL otherwise.
- */
-void * __init nfs_root_data(void)
-{
- if (root_nfs_init() < 0
- || root_nfs_ports() < 0
- || root_nfs_get_handle() < 0)
- return NULL;
- set_sockaddr((struct sockaddr_in *) &nfs_data.addr, servaddr, htons(nfs_port));
- return (void*)&nfs_data;
-}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
new file mode 100644
index 00000000000..db773428f95
--- /dev/null
+++ b/fs/nfs/pnfs.c
@@ -0,0 +1,783 @@
+/*
+ * pNFS functions to call and manage layout drivers.
+ *
+ * Copyright (c) 2002 [year of first publication]
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+/* Locking:
+ *
+ * pnfs_spinlock:
+ * protects pnfs_modules_tbl.
+ */
+static DEFINE_SPINLOCK(pnfs_spinlock);
+
+/*
+ * pnfs_modules_tbl holds all pnfs modules
+ */
+static LIST_HEAD(pnfs_modules_tbl);
+
+/* Return the registered pnfs layout driver module matching given id */
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver_locked(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
+ if (local->id == id)
+ goto out;
+ local = NULL;
+out:
+ dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
+ return local;
+}
+
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ spin_lock(&pnfs_spinlock);
+ local = find_pnfs_driver_locked(id);
+ spin_unlock(&pnfs_spinlock);
+ return local;
+}
+
+void
+unset_pnfs_layoutdriver(struct nfs_server *nfss)
+{
+ if (nfss->pnfs_curr_ld) {
+ nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+ module_put(nfss->pnfs_curr_ld->owner);
+ }
+ nfss->pnfs_curr_ld = NULL;
+}
+
+/*
+ * Try to set the server's pnfs module to the pnfs layout type specified by id.
+ * Currently only one pNFS layout driver per filesystem is supported.
+ *
+ * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
+ */
+void
+set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
+{
+ struct pnfs_layoutdriver_type *ld_type = NULL;
+
+ if (id == 0)
+ goto out_no_driver;
+ if (!(server->nfs_client->cl_exchange_flags &
+ (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
+ printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
+ id, server->nfs_client->cl_exchange_flags);
+ goto out_no_driver;
+ }
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ dprintk("%s: No pNFS module found for %u.\n",
+ __func__, id);
+ goto out_no_driver;
+ }
+ }
+ if (!try_module_get(ld_type->owner)) {
+ dprintk("%s: Could not grab reference on module\n", __func__);
+ goto out_no_driver;
+ }
+ server->pnfs_curr_ld = ld_type;
+ if (ld_type->set_layoutdriver(server)) {
+ printk(KERN_ERR
+ "%s: Error initializing mount point for layout driver %u.\n",
+ __func__, id);
+ module_put(ld_type->owner);
+ goto out_no_driver;
+ }
+ dprintk("%s: pNFS module for %u set\n", __func__, id);
+ return;
+
+out_no_driver:
+ dprintk("%s: Using NFSv4 I/O\n", __func__);
+ server->pnfs_curr_ld = NULL;
+}
+
+int
+pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ int status = -EINVAL;
+ struct pnfs_layoutdriver_type *tmp;
+
+ if (ld_type->id == 0) {
+ printk(KERN_ERR "%s id 0 is reserved\n", __func__);
+ return status;
+ }
+ if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
+ printk(KERN_ERR "%s Layout driver must provide "
+ "alloc_lseg and free_lseg.\n", __func__);
+ return status;
+ }
+
+ spin_lock(&pnfs_spinlock);
+ tmp = find_pnfs_driver_locked(ld_type->id);
+ if (!tmp) {
+ list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
+ status = 0;
+ dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
+ ld_type->name);
+ } else {
+ printk(KERN_ERR "%s Module with id %d already loaded!\n",
+ __func__, ld_type->id);
+ }
+ spin_unlock(&pnfs_spinlock);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
+
+void
+pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
+ spin_lock(&pnfs_spinlock);
+ list_del(&ld_type->pnfs_tblid);
+ spin_unlock(&pnfs_spinlock);
+}
+EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
+
+/*
+ * pNFS client layout cache
+ */
+
+static void
+get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+ assert_spin_locked(&lo->inode->i_lock);
+ lo->refcount++;
+}
+
+static void
+put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+ assert_spin_locked(&lo->inode->i_lock);
+ BUG_ON(lo->refcount == 0);
+
+ lo->refcount--;
+ if (!lo->refcount) {
+ dprintk("%s: freeing layout cache %p\n", __func__, lo);
+ BUG_ON(!list_empty(&lo->layouts));
+ NFS_I(lo->inode)->layout = NULL;
+ kfree(lo);
+ }
+}
+
+void
+put_layout_hdr(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ put_layout_hdr_locked(NFS_I(inode)->layout);
+ spin_unlock(&inode->i_lock);
+}
+
+static void
+init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
+{
+ INIT_LIST_HEAD(&lseg->fi_list);
+ kref_init(&lseg->kref);
+ lseg->layout = lo;
+}
+
+/* Called without i_lock held, as the free_lseg call may sleep */
+static void
+destroy_lseg(struct kref *kref)
+{
+ struct pnfs_layout_segment *lseg =
+ container_of(kref, struct pnfs_layout_segment, kref);
+ struct inode *ino = lseg->layout->inode;
+
+ dprintk("--> %s\n", __func__);
+ NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ /* Matched by get_layout_hdr_locked in pnfs_insert_layout */
+ put_layout_hdr(ino);
+}
+
+static void
+put_lseg(struct pnfs_layout_segment *lseg)
+{
+ if (!lseg)
+ return;
+
+ dprintk("%s: lseg %p ref %d\n", __func__, lseg,
+ atomic_read(&lseg->kref.refcount));
+ kref_put(&lseg->kref, destroy_lseg);
+}
+
+static void
+pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+{
+ struct pnfs_layout_segment *lseg, *next;
+ struct nfs_client *clp;
+
+ dprintk("%s:Begin lo %p\n", __func__, lo);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
+ dprintk("%s: freeing lseg %p\n", __func__, lseg);
+ list_move(&lseg->fi_list, tmp_list);
+ }
+ clp = NFS_SERVER(lo->inode)->nfs_client;
+ spin_lock(&clp->cl_lock);
+ /* List does not take a reference, so no need for put here */
+ list_del_init(&lo->layouts);
+ spin_unlock(&clp->cl_lock);
+ write_seqlock(&lo->seqlock);
+ clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+ write_sequnlock(&lo->seqlock);
+
+ dprintk("%s:Return\n", __func__);
+}
+
+static void
+pnfs_free_lseg_list(struct list_head *tmp_list)
+{
+ struct pnfs_layout_segment *lseg;
+
+ while (!list_empty(tmp_list)) {
+ lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
+ fi_list);
+ dprintk("%s calling put_lseg on %p\n", __func__, lseg);
+ list_del(&lseg->fi_list);
+ put_lseg(lseg);
+ }
+}
+
+void
+pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ lo = nfsi->layout;
+ if (lo) {
+ pnfs_clear_lseg_list(lo, &tmp_list);
+ /* Matched by refcount set to 1 in alloc_init_layout_hdr */
+ put_layout_hdr_locked(lo);
+ }
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+}
+
+/*
+ * Called by the state manger to remove all layouts established under an
+ * expired lease.
+ */
+void
+pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&clp->cl_lock);
+ list_splice_init(&clp->cl_layouts, &tmp_list);
+ spin_unlock(&clp->cl_lock);
+
+ while (!list_empty(&tmp_list)) {
+ lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
+ layouts);
+ dprintk("%s freeing layout for inode %lu\n", __func__,
+ lo->inode->i_ino);
+ pnfs_destroy_layout(NFS_I(lo->inode));
+ }
+}
+
+/* update lo->stateid with new if is more recent
+ *
+ * lo->stateid could be the open stateid, in which case we just use what given.
+ */
+static void
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *new)
+{
+ nfs4_stateid *old = &lo->stateid;
+ bool overwrite = false;
+
+ write_seqlock(&lo->seqlock);
+ if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
+ memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
+ overwrite = true;
+ else {
+ u32 oldseq, newseq;
+
+ oldseq = be32_to_cpu(old->stateid.seqid);
+ newseq = be32_to_cpu(new->stateid.seqid);
+ if ((int)(newseq - oldseq) > 0)
+ overwrite = true;
+ }
+ if (overwrite)
+ memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
+ write_sequnlock(&lo->seqlock);
+}
+
+static void
+pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
+ struct nfs4_state *state)
+{
+ int seq;
+
+ dprintk("--> %s\n", __func__);
+ write_seqlock(&lo->seqlock);
+ do {
+ seq = read_seqbegin(&state->seqlock);
+ memcpy(lo->stateid.data, state->stateid.data,
+ sizeof(state->stateid.data));
+ } while (read_seqretry(&state->seqlock, seq));
+ set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
+ write_sequnlock(&lo->seqlock);
+ dprintk("<-- %s\n", __func__);
+}
+
+void
+pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state)
+{
+ int seq;
+
+ dprintk("--> %s\n", __func__);
+ do {
+ seq = read_seqbegin(&lo->seqlock);
+ if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
+ /* This will trigger retry of the read */
+ pnfs_layout_from_open_stateid(lo, open_state);
+ } else
+ memcpy(dst->data, lo->stateid.data,
+ sizeof(lo->stateid.data));
+ } while (read_seqretry(&lo->seqlock, seq));
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+* Get layout from server.
+* for now, assume that whole file layouts are requested.
+* arg->offset: 0
+* arg->length: all ones
+*/
+static struct pnfs_layout_segment *
+send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_open_context *ctx,
+ u32 iomode)
+{
+ struct inode *ino = lo->inode;
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+
+ dprintk("--> %s\n", __func__);
+
+ BUG_ON(ctx == NULL);
+ lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+ if (lgp == NULL) {
+ put_layout_hdr(lo->inode);
+ return NULL;
+ }
+ lgp->args.minlength = NFS4_MAX_UINT64;
+ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+ lgp->args.range.iomode = iomode;
+ lgp->args.range.offset = 0;
+ lgp->args.range.length = NFS4_MAX_UINT64;
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+ lgp->lsegpp = &lseg;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+ nfs4_proc_layoutget(lgp);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(iomode), &lo->state);
+ }
+ return lseg;
+}
+
+/*
+ * Compare two layout segments for sorting into layout cache.
+ * We want to preferentially return RW over RO layouts, so ensure those
+ * are seen first.
+ */
+static s64
+cmp_layout(u32 iomode1, u32 iomode2)
+{
+ /* read > read/write */
+ return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
+}
+
+static void
+pnfs_insert_layout(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_segment *lp;
+ int found = 0;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ if (list_empty(&lo->segs)) {
+ struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
+
+ spin_lock(&clp->cl_lock);
+ BUG_ON(!list_empty(&lo->layouts));
+ list_add_tail(&lo->layouts, &clp->cl_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
+ list_for_each_entry(lp, &lo->segs, fi_list) {
+ if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+ continue;
+ list_add_tail(&lseg->fi_list, &lp->fi_list);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu before "
+ "lp %p iomode %d offset %llu length %llu\n",
+ __func__, lseg, lseg->range.iomode,
+ lseg->range.offset, lseg->range.length,
+ lp, lp->range.iomode, lp->range.offset,
+ lp->range.length);
+ found = 1;
+ break;
+ }
+ if (!found) {
+ list_add_tail(&lseg->fi_list, &lo->segs);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu at tail\n",
+ __func__, lseg, lseg->range.iomode,
+ lseg->range.offset, lseg->range.length);
+ }
+ get_layout_hdr_locked(lo);
+
+ dprintk("%s:Return\n", __func__);
+}
+
+static struct pnfs_layout_hdr *
+alloc_init_layout_hdr(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+
+ lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+ if (!lo)
+ return NULL;
+ lo->refcount = 1;
+ INIT_LIST_HEAD(&lo->layouts);
+ INIT_LIST_HEAD(&lo->segs);
+ seqlock_init(&lo->seqlock);
+ lo->inode = ino;
+ return lo;
+}
+
+static struct pnfs_layout_hdr *
+pnfs_find_alloc_layout(struct inode *ino)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *new = NULL;
+
+ dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
+
+ assert_spin_locked(&ino->i_lock);
+ if (nfsi->layout)
+ return nfsi->layout;
+
+ spin_unlock(&ino->i_lock);
+ new = alloc_init_layout_hdr(ino);
+ spin_lock(&ino->i_lock);
+
+ if (likely(nfsi->layout == NULL)) /* Won the race? */
+ nfsi->layout = new;
+ else
+ kfree(new);
+ return nfsi->layout;
+}
+
+/*
+ * iomode matching rules:
+ * iomode lseg match
+ * ----- ----- -----
+ * ANY READ true
+ * ANY RW true
+ * RW READ false
+ * RW RW true
+ * READ READ true
+ * READ RW true
+ */
+static int
+is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
+{
+ return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+}
+
+/*
+ * lookup range in layout
+ */
+static struct pnfs_layout_segment *
+pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+{
+ struct pnfs_layout_segment *lseg, *ret = NULL;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->inode->i_lock);
+ list_for_each_entry(lseg, &lo->segs, fi_list) {
+ if (is_matching_lseg(lseg, iomode)) {
+ ret = lseg;
+ break;
+ }
+ if (cmp_layout(iomode, lseg->range.iomode) > 0)
+ break;
+ }
+
+ dprintk("%s:Return lseg %p ref %d\n",
+ __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+ return ret;
+}
+
+/*
+ * Layout segment is retreived from the server if not cached.
+ * The appropriate layout segment is referenced and returned to the caller.
+ */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ enum pnfs_iomode iomode)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_segment *lseg = NULL;
+
+ if (!pnfs_enabled_sb(NFS_SERVER(ino)))
+ return NULL;
+ spin_lock(&ino->i_lock);
+ lo = pnfs_find_alloc_layout(ino);
+ if (lo == NULL) {
+ dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
+ goto out_unlock;
+ }
+
+ /* Check to see if the layout for the given range already exists */
+ lseg = pnfs_has_layout(lo, iomode);
+ if (lseg) {
+ dprintk("%s: Using cached lseg %p for iomode %d)\n",
+ __func__, lseg, iomode);
+ goto out_unlock;
+ }
+
+ /* if LAYOUTGET already failed once we don't try again */
+ if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+ goto out_unlock;
+
+ get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+ spin_unlock(&ino->i_lock);
+
+ lseg = send_layoutget(lo, ctx, iomode);
+out:
+ dprintk("%s end, state 0x%lx lseg %p\n", __func__,
+ nfsi->layout->state, lseg);
+ return lseg;
+out_unlock:
+ spin_unlock(&ino->i_lock);
+ goto out;
+}
+
+int
+pnfs_layout_process(struct nfs4_layoutget *lgp)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
+ struct nfs4_layoutget_res *res = &lgp->res;
+ struct pnfs_layout_segment *lseg;
+ struct inode *ino = lo->inode;
+ int status = 0;
+
+ /* Inject layout blob into I/O device driver */
+ lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+ if (!lseg || IS_ERR(lseg)) {
+ if (!lseg)
+ status = -ENOMEM;
+ else
+ status = PTR_ERR(lseg);
+ dprintk("%s: Could not allocate layout: error %d\n",
+ __func__, status);
+ goto out;
+ }
+
+ spin_lock(&ino->i_lock);
+ init_lseg(lo, lseg);
+ lseg->range = res->range;
+ *lgp->lsegpp = lseg;
+ pnfs_insert_layout(lo, lseg);
+
+ /* Done processing layoutget. Set the layout stateid */
+ pnfs_set_layout_stateid(lo, &res->stateid);
+ spin_unlock(&ino->i_lock);
+out:
+ return status;
+}
+
+/*
+ * Device ID cache. Currently supports one layout type per struct nfs_client.
+ * Add layout type to the lookup key to expand to support multiple types.
+ */
+int
+pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
+ void (*free_callback)(struct pnfs_deviceid_node *))
+{
+ struct pnfs_deviceid_cache *c;
+
+ c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+ spin_lock(&clp->cl_lock);
+ if (clp->cl_devid_cache != NULL) {
+ atomic_inc(&clp->cl_devid_cache->dc_ref);
+ dprintk("%s [kref [%d]]\n", __func__,
+ atomic_read(&clp->cl_devid_cache->dc_ref));
+ kfree(c);
+ } else {
+ /* kzalloc initializes hlists */
+ spin_lock_init(&c->dc_lock);
+ atomic_set(&c->dc_ref, 1);
+ c->dc_free_callback = free_callback;
+ clp->cl_devid_cache = c;
+ dprintk("%s [new]\n", __func__);
+ }
+ spin_unlock(&clp->cl_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
+
+/*
+ * Called from pnfs_layoutdriver_type->free_lseg
+ * last layout segment reference frees deviceid
+ */
+void
+pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
+ struct pnfs_deviceid_node *devid)
+{
+ struct nfs4_deviceid *id = &devid->de_id;
+ struct pnfs_deviceid_node *d;
+ struct hlist_node *n;
+ long h = nfs4_deviceid_hash(id);
+
+ dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
+ if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
+ return;
+
+ hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
+ if (!memcmp(&d->de_id, id, sizeof(*id))) {
+ hlist_del_rcu(&d->de_node);
+ spin_unlock(&c->dc_lock);
+ synchronize_rcu();
+ c->dc_free_callback(devid);
+ return;
+ }
+ spin_unlock(&c->dc_lock);
+ /* Why wasn't it found in the list? */
+ BUG();
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
+
+/* Find and reference a deviceid */
+struct pnfs_deviceid_node *
+pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
+{
+ struct pnfs_deviceid_node *d;
+ struct hlist_node *n;
+ long hash = nfs4_deviceid_hash(id);
+
+ dprintk("--> %s hash %ld\n", __func__, hash);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
+ if (!memcmp(&d->de_id, id, sizeof(*id))) {
+ if (!atomic_inc_not_zero(&d->de_ref)) {
+ goto fail;
+ } else {
+ rcu_read_unlock();
+ return d;
+ }
+ }
+ }
+fail:
+ rcu_read_unlock();
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
+
+/*
+ * Add a deviceid to the cache.
+ * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
+ */
+struct pnfs_deviceid_node *
+pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
+{
+ struct pnfs_deviceid_node *d;
+ long hash = nfs4_deviceid_hash(&new->de_id);
+
+ dprintk("--> %s hash %ld\n", __func__, hash);
+ spin_lock(&c->dc_lock);
+ d = pnfs_find_get_deviceid(c, &new->de_id);
+ if (d) {
+ spin_unlock(&c->dc_lock);
+ dprintk("%s [discard]\n", __func__);
+ c->dc_free_callback(new);
+ return d;
+ }
+ INIT_HLIST_NODE(&new->de_node);
+ atomic_set(&new->de_ref, 1);
+ hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
+ spin_unlock(&c->dc_lock);
+ dprintk("%s [new]\n", __func__);
+ return new;
+}
+EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
+
+void
+pnfs_put_deviceid_cache(struct nfs_client *clp)
+{
+ struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
+
+ dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+ if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
+ int i;
+ /* Verify cache is empty */
+ for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
+ BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
+ clp->cl_devid_cache = NULL;
+ spin_unlock(&clp->cl_lock);
+ kfree(local);
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
new file mode 100644
index 00000000000..e12367d5048
--- /dev/null
+++ b/fs/nfs/pnfs.h
@@ -0,0 +1,189 @@
+/*
+ * pNFS client data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_PNFS_H
+#define FS_NFS_PNFS_H
+
+struct pnfs_layout_segment {
+ struct list_head fi_list;
+ struct pnfs_layout_range range;
+ struct kref kref;
+ struct pnfs_layout_hdr *layout;
+};
+
+#ifdef CONFIG_NFS_V4_1
+
+#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
+
+enum {
+ NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
+ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
+ NFS_LAYOUT_STATEID_SET, /* have a valid layout stateid */
+};
+
+/* Per-layout driver specific registration structure */
+struct pnfs_layoutdriver_type {
+ struct list_head pnfs_tblid;
+ const u32 id;
+ const char *name;
+ struct module *owner;
+ int (*set_layoutdriver) (struct nfs_server *);
+ int (*clear_layoutdriver) (struct nfs_server *);
+ struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
+ void (*free_lseg) (struct pnfs_layout_segment *lseg);
+};
+
+struct pnfs_layout_hdr {
+ unsigned long refcount;
+ struct list_head layouts; /* other client layouts */
+ struct list_head segs; /* layout segments list */
+ seqlock_t seqlock; /* Protects the stateid */
+ nfs4_stateid stateid;
+ unsigned long state;
+ struct inode *inode;
+};
+
+struct pnfs_device {
+ struct nfs4_deviceid dev_id;
+ unsigned int layout_type;
+ unsigned int mincount;
+ struct page **pages;
+ void *area;
+ unsigned int pgbase;
+ unsigned int pglen;
+};
+
+/*
+ * Device ID RCU cache. A device ID is unique per client ID and layout type.
+ */
+#define NFS4_DEVICE_ID_HASH_BITS 5
+#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
+#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
+
+static inline u32
+nfs4_deviceid_hash(struct nfs4_deviceid *id)
+{
+ unsigned char *cptr = (unsigned char *)id->data;
+ unsigned int nbytes = NFS4_DEVICEID4_SIZE;
+ u32 x = 0;
+
+ while (nbytes--) {
+ x *= 37;
+ x += *cptr++;
+ }
+ return x & NFS4_DEVICE_ID_HASH_MASK;
+}
+
+struct pnfs_deviceid_node {
+ struct hlist_node de_node;
+ struct nfs4_deviceid de_id;
+ atomic_t de_ref;
+};
+
+struct pnfs_deviceid_cache {
+ spinlock_t dc_lock;
+ atomic_t dc_ref;
+ void (*dc_free_callback)(struct pnfs_deviceid_node *);
+ struct hlist_head dc_deviceids[NFS4_DEVICE_ID_HASH_SIZE];
+};
+
+extern int pnfs_alloc_init_deviceid_cache(struct nfs_client *,
+ void (*free_callback)(struct pnfs_deviceid_node *));
+extern void pnfs_put_deviceid_cache(struct nfs_client *);
+extern struct pnfs_deviceid_node *pnfs_find_get_deviceid(
+ struct pnfs_deviceid_cache *,
+ struct nfs4_deviceid *);
+extern struct pnfs_deviceid_node *pnfs_add_deviceid(
+ struct pnfs_deviceid_cache *,
+ struct pnfs_deviceid_node *);
+extern void pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
+ struct pnfs_deviceid_node *devid);
+
+extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
+extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
+
+/* nfs4proc.c */
+extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+
+/* pnfs.c */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
+ enum pnfs_iomode access_type);
+void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
+void unset_pnfs_layoutdriver(struct nfs_server *);
+int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_destroy_layout(struct nfs_inode *);
+void pnfs_destroy_all_layouts(struct nfs_client *);
+void put_layout_hdr(struct inode *inode);
+void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state);
+
+
+static inline int lo_fail_bit(u32 iomode)
+{
+ return iomode == IOMODE_RW ?
+ NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
+}
+
+/* Return true if a layout driver is being used for this mountpoint */
+static inline int pnfs_enabled_sb(struct nfs_server *nfss)
+{
+ return nfss->pnfs_curr_ld != NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+}
+
+static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+}
+
+static inline struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
+ enum pnfs_iomode access_type)
+{
+ return NULL;
+}
+
+static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
+{
+}
+
+static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
+{
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 611bec22f55..58e7f84fc1f 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -258,7 +258,7 @@ static void nfs_free_createdata(const struct nfs_createdata *data)
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nameidata *nd)
+ int flags, struct nfs_open_context *ctx)
{
struct nfs_createdata *data;
struct rpc_message msg = {
@@ -365,17 +365,32 @@ static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
return 1;
}
+static void
+nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME];
+}
+
+static int
+nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ if (nfs_async_handle_expired_key(task))
+ return 0;
+ nfs_mark_for_revalidate(old_dir);
+ nfs_mark_for_revalidate(new_dir);
+ return 1;
+}
+
static int
nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
struct inode *new_dir, struct qstr *new_name)
{
struct nfs_renameargs arg = {
- .fromfh = NFS_FH(old_dir),
- .fromname = old_name->name,
- .fromlen = old_name->len,
- .tofh = NFS_FH(new_dir),
- .toname = new_name->name,
- .tolen = new_name->len
+ .old_dir = NFS_FH(old_dir),
+ .old_name = old_name,
+ .new_dir = NFS_FH(new_dir),
+ .new_name = new_name,
};
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_RENAME],
@@ -519,14 +534,14 @@ nfs_proc_rmdir(struct inode *dir, struct qstr *name)
*/
static int
nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
- u64 cookie, struct page *page, unsigned int count, int plus)
+ u64 cookie, struct page **pages, unsigned int count, int plus)
{
struct inode *dir = dentry->d_inode;
struct nfs_readdirargs arg = {
.fh = NFS_FH(dir),
.cookie = cookie,
.count = count,
- .pages = &page,
+ .pages = pages,
};
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_READDIR],
@@ -705,6 +720,8 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.unlink_setup = nfs_proc_unlink_setup,
.unlink_done = nfs_proc_unlink_done,
.rename = nfs_proc_rename,
+ .rename_setup = nfs_proc_rename_setup,
+ .rename_done = nfs_proc_rename_done,
.link = nfs_proc_link,
.symlink = nfs_proc_symlink,
.mkdir = nfs_proc_mkdir,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 87adc274424..e4b62c6f5a6 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -25,6 +25,7 @@
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -46,7 +47,6 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
- p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -121,6 +121,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
+ pnfs_update_layout(inode, ctx, IOMODE_READ);
new = nfs_create_request(ctx, inode, page, 0, len);
if (IS_ERR(new)) {
unlock_page(page);
@@ -625,6 +626,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
+ pnfs_update_layout(inode, desc.ctx, IOMODE_READ);
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f4cbf0c306c..3600ec700d5 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -100,6 +100,7 @@ enum {
Opt_addr, Opt_mountaddr, Opt_clientaddr,
Opt_lookupcache,
Opt_fscache_uniq,
+ Opt_local_lock,
/* Special mount options */
Opt_userspace, Opt_deprecated, Opt_sloppy,
@@ -171,6 +172,7 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_lookupcache, "lookupcache=%s" },
{ Opt_fscache_uniq, "fsc=%s" },
+ { Opt_local_lock, "local_lock=%s" },
{ Opt_err, NULL }
};
@@ -236,6 +238,22 @@ static match_table_t nfs_lookupcache_tokens = {
{ Opt_lookupcache_err, NULL }
};
+enum {
+ Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_posix,
+ Opt_local_lock_none,
+
+ Opt_local_lock_err
+};
+
+static match_table_t nfs_local_lock_tokens = {
+ { Opt_local_lock_all, "all" },
+ { Opt_local_lock_flock, "flock" },
+ { Opt_local_lock_posix, "posix" },
+ { Opt_local_lock_none, "none" },
+
+ { Opt_local_lock_err, NULL }
+};
+
static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
@@ -622,6 +640,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
const struct proc_nfs_info *nfs_infop;
struct nfs_client *clp = nfss->nfs_client;
u32 version = clp->rpc_ops->version;
+ int local_flock, local_fcntl;
seq_printf(m, ",vers=%u", version);
seq_printf(m, ",rsize=%u", nfss->rsize);
@@ -670,6 +689,18 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
else
seq_printf(m, ",lookupcache=pos");
}
+
+ local_flock = nfss->flags & NFS_MOUNT_LOCAL_FLOCK;
+ local_fcntl = nfss->flags & NFS_MOUNT_LOCAL_FCNTL;
+
+ if (!local_flock && !local_fcntl)
+ seq_printf(m, ",local_lock=none");
+ else if (local_flock && local_fcntl)
+ seq_printf(m, ",local_lock=all");
+ else if (local_flock)
+ seq_printf(m, ",local_lock=flock");
+ else
+ seq_printf(m, ",local_lock=posix");
}
/*
@@ -1017,9 +1048,13 @@ static int nfs_parse_mount_options(char *raw,
break;
case Opt_lock:
mnt->flags &= ~NFS_MOUNT_NONLM;
+ mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
break;
case Opt_nolock:
mnt->flags |= NFS_MOUNT_NONLM;
+ mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
break;
case Opt_v2:
mnt->flags &= ~NFS_MOUNT_VER3;
@@ -1420,6 +1455,34 @@ static int nfs_parse_mount_options(char *raw,
mnt->fscache_uniq = string;
mnt->options |= NFS_OPTION_FSCACHE;
break;
+ case Opt_local_lock:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ token = match_token(string, nfs_local_lock_tokens,
+ args);
+ kfree(string);
+ switch (token) {
+ case Opt_local_lock_all:
+ mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ case Opt_local_lock_flock:
+ mnt->flags |= NFS_MOUNT_LOCAL_FLOCK;
+ break;
+ case Opt_local_lock_posix:
+ mnt->flags |= NFS_MOUNT_LOCAL_FCNTL;
+ break;
+ case Opt_local_lock_none:
+ mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ default:
+ dfprintk(MOUNT, "NFS: invalid "
+ "local_lock argument\n");
+ return 0;
+ };
+ break;
/*
* Special options
@@ -1825,6 +1888,12 @@ static int nfs_validate_mount_data(void *options,
if (!args->nfs_server.hostname)
goto out_nomem;
+ if (!(data->flags & NFS_MOUNT_NONLM))
+ args->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
+ else
+ args->flags |= (NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
/*
* The legacy version 6 binary mount data from userspace has a
* field used only to transport selinux information into the
@@ -2441,7 +2510,8 @@ static void nfs4_fill_super(struct super_block *sb)
static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
{
- args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
+ args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3|
+ NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
}
static int nfs4_validate_text_mount_data(void *options,
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index ad4d2e787b2..978aaeb8a09 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -32,6 +32,7 @@ static ctl_table nfs_cb_sysctls[] = {
.extra1 = (int *)&nfs_set_port_min,
.extra2 = (int *)&nfs_set_port_max,
},
+#ifndef CONFIG_NFS_USE_NEW_IDMAPPER
{
.procname = "idmap_cache_timeout",
.data = &nfs_idmap_cache_timeout,
@@ -39,6 +40,7 @@ static ctl_table nfs_cb_sysctls[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
#endif
{
.procname = "nfs_mountpoint_timeout",
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 2f84adaad42..9a16bad5d2e 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -13,9 +13,12 @@
#include <linux/nfs_fs.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/namei.h>
#include "internal.h"
#include "nfs4_fs.h"
+#include "iostat.h"
+#include "delegation.h"
struct nfs_unlinkdata {
struct hlist_node list;
@@ -244,7 +247,7 @@ void nfs_unblock_sillyrename(struct dentry *dentry)
* @dir: parent directory of dentry
* @dentry: dentry to unlink
*/
-int
+static int
nfs_async_unlink(struct inode *dir, struct dentry *dentry)
{
struct nfs_unlinkdata *data;
@@ -259,7 +262,6 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
status = PTR_ERR(data->cred);
goto out_free;
}
- data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
data->res.dir_attr = &data->dir_attr;
status = -EBUSY;
@@ -303,3 +305,256 @@ nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
nfs_free_unlinkdata(data);
}
+
+/* Cancel a queued async unlink. Called when a sillyrename run fails. */
+static void
+nfs_cancel_async_unlink(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ struct nfs_unlinkdata *data = dentry->d_fsdata;
+
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ spin_unlock(&dentry->d_lock);
+ nfs_free_unlinkdata(data);
+ return;
+ }
+ spin_unlock(&dentry->d_lock);
+}
+
+struct nfs_renamedata {
+ struct nfs_renameargs args;
+ struct nfs_renameres res;
+ struct rpc_cred *cred;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct nfs_fattr old_fattr;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct nfs_fattr new_fattr;
+};
+
+/**
+ * nfs_async_rename_done - Sillyrename post-processing
+ * @task: rpc_task of the sillyrename
+ * @calldata: nfs_renamedata for the sillyrename
+ *
+ * Do the directory attribute updates and the d_move
+ */
+static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ struct inode *old_dir = data->old_dir;
+ struct inode *new_dir = data->new_dir;
+
+ if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
+ nfs_restart_rpc(task, NFS_SERVER(old_dir)->nfs_client);
+ return;
+ }
+
+ if (task->tk_status != 0) {
+ nfs_cancel_async_unlink(data->old_dentry);
+ return;
+ }
+
+ nfs_set_verifier(data->old_dentry, nfs_save_change_attribute(old_dir));
+ d_move(data->old_dentry, data->new_dentry);
+}
+
+/**
+ * nfs_async_rename_release - Release the sillyrename data.
+ * @calldata: the struct nfs_renamedata to be released
+ */
+static void nfs_async_rename_release(void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ struct super_block *sb = data->old_dir->i_sb;
+
+ if (data->old_dentry->d_inode)
+ nfs_mark_for_revalidate(data->old_dentry->d_inode);
+
+ dput(data->old_dentry);
+ dput(data->new_dentry);
+ iput(data->old_dir);
+ iput(data->new_dir);
+ nfs_sb_deactive(sb);
+ put_rpccred(data->cred);
+ kfree(data);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+static void nfs_rename_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->old_dir);
+
+ if (nfs4_setup_sequence(server, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static const struct rpc_call_ops nfs_rename_ops = {
+ .rpc_call_done = nfs_async_rename_done,
+ .rpc_release = nfs_async_rename_release,
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_rename_prepare,
+#endif /* CONFIG_NFS_V4_1 */
+};
+
+/**
+ * nfs_async_rename - perform an asynchronous rename operation
+ * @old_dir: directory that currently holds the dentry to be renamed
+ * @new_dir: target directory for the rename
+ * @old_dentry: original dentry to be renamed
+ * @new_dentry: dentry to which the old_dentry should be renamed
+ *
+ * It's expected that valid references to the dentries and inodes are held
+ */
+static struct rpc_task *
+nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
+ struct dentry *old_dentry, struct dentry *new_dentry)
+{
+ struct nfs_renamedata *data;
+ struct rpc_message msg = { };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_message = &msg,
+ .callback_ops = &nfs_rename_ops,
+ .workqueue = nfsiod_workqueue,
+ .rpc_client = NFS_CLIENT(old_dir),
+ .flags = RPC_TASK_ASYNC,
+ };
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return ERR_PTR(-ENOMEM);
+ task_setup_data.callback_data = data,
+
+ data->cred = rpc_lookup_cred();
+ if (IS_ERR(data->cred)) {
+ struct rpc_task *task = ERR_CAST(data->cred);
+ kfree(data);
+ return task;
+ }
+
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ msg.rpc_cred = data->cred;
+
+ /* set up nfs_renamedata */
+ data->old_dir = old_dir;
+ atomic_inc(&old_dir->i_count);
+ data->new_dir = new_dir;
+ atomic_inc(&new_dir->i_count);
+ data->old_dentry = dget(old_dentry);
+ data->new_dentry = dget(new_dentry);
+ nfs_fattr_init(&data->old_fattr);
+ nfs_fattr_init(&data->new_fattr);
+
+ /* set up nfs_renameargs */
+ data->args.old_dir = NFS_FH(old_dir);
+ data->args.old_name = &old_dentry->d_name;
+ data->args.new_dir = NFS_FH(new_dir);
+ data->args.new_name = &new_dentry->d_name;
+
+ /* set up nfs_renameres */
+ data->res.old_fattr = &data->old_fattr;
+ data->res.new_fattr = &data->new_fattr;
+
+ nfs_sb_active(old_dir->i_sb);
+
+ NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dir);
+
+ return rpc_run_task(&task_setup_data);
+}
+
+/**
+ * nfs_sillyrename - Perform a silly-rename of a dentry
+ * @dir: inode of directory that contains dentry
+ * @dentry: dentry to be sillyrenamed
+ *
+ * NFSv2/3 is stateless and the server doesn't know when the client is
+ * holding a file open. To prevent application problems when a file is
+ * unlinked while it's still open, the client performs a "silly-rename".
+ * That is, it renames the file to a hidden file in the same directory,
+ * and only performs the unlink once the last reference to it is put.
+ *
+ * The final cleanup is done during dentry_iput.
+ */
+int
+nfs_sillyrename(struct inode *dir, struct dentry *dentry)
+{
+ static unsigned int sillycounter;
+ const int fileidsize = sizeof(NFS_FILEID(dentry->d_inode))*2;
+ const int countersize = sizeof(sillycounter)*2;
+ const int slen = sizeof(".nfs")+fileidsize+countersize-1;
+ char silly[slen+1];
+ struct dentry *sdentry;
+ struct rpc_task *task;
+ int error = -EIO;
+
+ dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ atomic_read(&dentry->d_count));
+ nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
+
+ /*
+ * We don't allow a dentry to be silly-renamed twice.
+ */
+ error = -EBUSY;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto out;
+
+ sprintf(silly, ".nfs%*.*Lx",
+ fileidsize, fileidsize,
+ (unsigned long long)NFS_FILEID(dentry->d_inode));
+
+ /* Return delegation in anticipation of the rename */
+ nfs_inode_return_delegation(dentry->d_inode);
+
+ sdentry = NULL;
+ do {
+ char *suffix = silly + slen - countersize;
+
+ dput(sdentry);
+ sillycounter++;
+ sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
+
+ dfprintk(VFS, "NFS: trying to rename %s to %s\n",
+ dentry->d_name.name, silly);
+
+ sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ /*
+ * N.B. Better to return EBUSY here ... it could be
+ * dangerous to delete the file while it's in use.
+ */
+ if (IS_ERR(sdentry))
+ goto out;
+ } while (sdentry->d_inode != NULL); /* need negative lookup */
+
+ /* queue unlink first. Can't do this from rpc_release as it
+ * has to allocate memory
+ */
+ error = nfs_async_unlink(dir, dentry);
+ if (error)
+ goto out_dput;
+
+ /* run the rename task, undo unlink if it fails */
+ task = nfs_async_rename(dir, dir, dentry, sdentry);
+ if (IS_ERR(task)) {
+ error = -EBUSY;
+ nfs_cancel_async_unlink(dentry);
+ goto out_dput;
+ }
+
+ /* wait for the RPC task to complete, unless a SIGKILL intervenes */
+ error = rpc_wait_for_completion_task(task);
+ if (error == 0)
+ error = task->tk_status;
+ rpc_put_task(task);
+out_dput:
+ dput(sdentry);
+out:
+ return error;
+}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 874972d9427..605e292501f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -55,7 +55,6 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
- p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
@@ -75,7 +74,6 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
- p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -1433,15 +1431,17 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int flags = FLUSH_SYNC;
int ret = 0;
- /* Don't commit yet if this is a non-blocking flush and there are
- * lots of outstanding writes for this mapping.
- */
- if (wbc->sync_mode == WB_SYNC_NONE &&
- nfsi->ncommit <= (nfsi->npages >> 1))
- goto out_mark_dirty;
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ /* Don't commit yet if this is a non-blocking flush and there
+ * are a lot of outstanding writes for this mapping.
+ */
+ if (nfsi->ncommit <= (nfsi->npages >> 1))
+ goto out_mark_dirty;
- if (wbc->nonblocking || wbc->for_background)
+ /* don't wait for the COMMIT response */
flags = 0;
+ }
+
ret = nfs_commit_inode(inode, flags);
if (ret >= 0) {
if (wbc->sync_mode == WB_SYNC_NONE) {
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 4264377552e..31a78fce473 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -2,6 +2,7 @@ config NFSD
tristate "NFS server support"
depends on INET
depends on FILE_LOCKING
+ depends on BKL # fix as soon as lockd is done
select LOCKD
select SUNRPC
select EXPORTFS
@@ -28,6 +29,18 @@ config NFSD
If unsure, say N.
+config NFSD_DEPRECATED
+ bool "Include support for deprecated syscall interface to NFSD"
+ depends on NFSD
+ default y
+ help
+ The syscall interface to nfsd was obsoleted in 2.6.0 by a new
+ filesystem based interface. The old interface is due for removal
+ in 2.6.40. If you wish to remove the interface before then
+ say N.
+
+ In unsure, say Y.
+
config NFSD_V2_ACL
bool
depends on NFSD
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c2a4f71d87d..c0fcb7ab7f6 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -28,9 +28,6 @@
typedef struct auth_domain svc_client;
typedef struct svc_export svc_export;
-static void exp_do_unexport(svc_export *unexp);
-static int exp_verify_string(char *cp, int max);
-
/*
* We have two caches.
* One maps client+vfsmnt+dentry to export options - the export map
@@ -802,6 +799,7 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
return ek;
}
+#ifdef CONFIG_NFSD_DEPRECATED
static int exp_set_key(svc_client *clp, int fsid_type, u32 *fsidv,
struct svc_export *exp)
{
@@ -852,6 +850,7 @@ exp_get_fsid_key(svc_client *clp, int fsid)
return exp_find_key(clp, FSID_NUM, fsidv, NULL);
}
+#endif
static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
struct cache_req *reqp)
@@ -893,6 +892,7 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
return exp;
}
+#ifdef CONFIG_NFSD_DEPRECATED
/*
* Hashtable locking. Write locks are placed only by user processes
* wanting to modify export information.
@@ -925,6 +925,19 @@ exp_writeunlock(void)
{
up_write(&hash_sem);
}
+#else
+
+/* hash_sem not needed once deprecated interface is removed */
+void exp_readlock(void) {}
+static inline void exp_writelock(void){}
+void exp_readunlock(void) {}
+static inline void exp_writeunlock(void){}
+
+#endif
+
+#ifdef CONFIG_NFSD_DEPRECATED
+static void exp_do_unexport(svc_export *unexp);
+static int exp_verify_string(char *cp, int max);
static void exp_fsid_unhash(struct svc_export *exp)
{
@@ -935,10 +948,9 @@ static void exp_fsid_unhash(struct svc_export *exp)
ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
if (!IS_ERR(ek)) {
- ek->h.expiry_time = get_seconds()-1;
+ sunrpc_invalidate(&ek->h, &svc_expkey_cache);
cache_put(&ek->h, &svc_expkey_cache);
}
- svc_expkey_cache.nextcheck = get_seconds();
}
static int exp_fsid_hash(svc_client *clp, struct svc_export *exp)
@@ -973,10 +985,9 @@ static void exp_unhash(struct svc_export *exp)
ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
if (!IS_ERR(ek)) {
- ek->h.expiry_time = get_seconds()-1;
+ sunrpc_invalidate(&ek->h, &svc_expkey_cache);
cache_put(&ek->h, &svc_expkey_cache);
}
- svc_expkey_cache.nextcheck = get_seconds();
}
/*
@@ -1097,8 +1108,7 @@ out:
static void
exp_do_unexport(svc_export *unexp)
{
- unexp->h.expiry_time = get_seconds()-1;
- svc_export_cache.nextcheck = get_seconds();
+ sunrpc_invalidate(&unexp->h, &svc_export_cache);
exp_unhash(unexp);
exp_fsid_unhash(unexp);
}
@@ -1150,6 +1160,7 @@ out_unlock:
exp_writeunlock();
return err;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/*
* Obtain the root fh on behalf of a client.
@@ -1459,25 +1470,43 @@ static void show_secinfo_flags(struct seq_file *m, int flags)
show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
}
+static bool secinfo_flags_equal(int f, int g)
+{
+ f &= NFSEXP_SECINFO_FLAGS;
+ g &= NFSEXP_SECINFO_FLAGS;
+ return f == g;
+}
+
+static int show_secinfo_run(struct seq_file *m, struct exp_flavor_info **fp, struct exp_flavor_info *end)
+{
+ int flags;
+
+ flags = (*fp)->flags;
+ seq_printf(m, ",sec=%d", (*fp)->pseudoflavor);
+ (*fp)++;
+ while (*fp != end && secinfo_flags_equal(flags, (*fp)->flags)) {
+ seq_printf(m, ":%d", (*fp)->pseudoflavor);
+ (*fp)++;
+ }
+ return flags;
+}
+
static void show_secinfo(struct seq_file *m, struct svc_export *exp)
{
struct exp_flavor_info *f;
struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
- int lastflags = 0, first = 0;
+ int flags;
if (exp->ex_nflavors == 0)
return;
- for (f = exp->ex_flavors; f < end; f++) {
- if (first || f->flags != lastflags) {
- if (!first)
- show_secinfo_flags(m, lastflags);
- seq_printf(m, ",sec=%d", f->pseudoflavor);
- lastflags = f->flags;
- } else {
- seq_printf(m, ":%d", f->pseudoflavor);
- }
+ f = exp->ex_flavors;
+ flags = show_secinfo_run(m, &f, end);
+ if (!secinfo_flags_equal(flags, exp->ex_flags))
+ show_secinfo_flags(m, flags);
+ while (f != end) {
+ flags = show_secinfo_run(m, &f, end);
+ show_secinfo_flags(m, flags);
}
- show_secinfo_flags(m, lastflags);
}
static void exp_flags(struct seq_file *m, int flag, int fsid,
@@ -1532,6 +1561,7 @@ const struct seq_operations nfs_exports_op = {
.show = e_show,
};
+#ifdef CONFIG_NFSD_DEPRECATED
/*
* Add or modify a client.
* Change requests may involve the list of host addresses. The list of
@@ -1563,7 +1593,7 @@ exp_addclient(struct nfsctl_client *ncp)
/* Insert client into hashtable. */
for (i = 0; i < ncp->cl_naddr; i++) {
ipv6_addr_set_v4mapped(ncp->cl_addrlist[i].s_addr, &addr6);
- auth_unix_add_addr(&addr6, dom);
+ auth_unix_add_addr(&init_net, &addr6, dom);
}
auth_unix_forget_old(dom);
auth_domain_put(dom);
@@ -1621,6 +1651,7 @@ exp_verify_string(char *cp, int max)
printk(KERN_NOTICE "nfsd: couldn't validate string %s\n", cp);
return 0;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/*
* Initialize the exports module.
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 988cbb3a19b..143da2eecd7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -41,7 +41,6 @@
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
-#define NFS4_STATEID_SIZE 16
/* Index of predefined Linux callback client operations */
@@ -248,10 +247,11 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
}
static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
+encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
+ struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
if (hdr->minorversion == 0)
return;
@@ -259,8 +259,8 @@ encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
WRITE32(OP_CB_SEQUENCE);
- WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
- WRITE32(args->cbs_clp->cl_cb_seq_nr);
+ WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(ses->se_cb_seq_nr);
WRITE32(0); /* slotid, always 0 */
WRITE32(0); /* highest slotid always 0 */
WRITE32(0); /* cachethis always 0 */
@@ -280,18 +280,18 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
static int
nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
- struct nfs4_rpc_args *rpc_args)
+ struct nfsd4_callback *cb)
{
struct xdr_stream xdr;
- struct nfs4_delegation *args = rpc_args->args_op;
+ struct nfs4_delegation *args = cb->cb_op;
struct nfs4_cb_compound_hdr hdr = {
- .ident = args->dl_ident,
- .minorversion = rpc_args->args_seq.cbs_minorversion,
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_minorversion,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
+ encode_cb_sequence(&xdr, cb, &hdr);
encode_cb_recall(&xdr, args, &hdr);
encode_cb_nops(&hdr);
return 0;
@@ -339,15 +339,16 @@ decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
* with a single slot.
*/
static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
+decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
struct rpc_rqst *rqstp)
{
+ struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
struct nfs4_sessionid id;
int status;
u32 dummy;
__be32 *p;
- if (res->cbs_minorversion == 0)
+ if (cb->cb_minorversion == 0)
return 0;
status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
@@ -363,13 +364,12 @@ decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
- if (memcmp(id.data, res->cbs_clp->cl_sessionid.data,
- NFS4_MAX_SESSIONID_LEN)) {
+ if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
dprintk("%s Invalid session id\n", __func__);
goto out;
}
READ32(dummy);
- if (dummy != res->cbs_clp->cl_cb_seq_nr) {
+ if (dummy != ses->se_cb_seq_nr) {
dprintk("%s Invalid sequence number\n", __func__);
goto out;
}
@@ -393,7 +393,7 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
static int
nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
- struct nfsd4_cb_sequence *seq)
+ struct nfsd4_callback *cb)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr;
@@ -403,8 +403,8 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
status = decode_cb_compound_hdr(&xdr, &hdr);
if (status)
goto out;
- if (seq) {
- status = decode_cb_sequence(&xdr, seq, rqstp);
+ if (cb) {
+ status = decode_cb_sequence(&xdr, cb, rqstp);
if (status)
goto out;
}
@@ -473,30 +473,34 @@ static int max_cb_time(void)
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cl_cb_set an atomic? */
-int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
+int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
{
struct rpc_timeout timeparms = {
.to_initval = max_cb_time(),
.to_retries = 0,
};
struct rpc_create_args args = {
- .protocol = XPRT_TRANSPORT_TCP,
- .address = (struct sockaddr *) &cb->cb_addr,
- .addrsize = cb->cb_addrlen,
+ .net = &init_net,
+ .address = (struct sockaddr *) &conn->cb_addr,
+ .addrsize = conn->cb_addrlen,
.timeout = &timeparms,
.program = &cb_program,
- .prognumber = cb->cb_prog,
.version = 0,
.authflavor = clp->cl_flavor,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
- .client_name = clp->cl_principal,
};
struct rpc_clnt *client;
- if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
- return -EINVAL;
- if (cb->cb_minorversion) {
- args.bc_xprt = cb->cb_xprt;
+ if (clp->cl_minorversion == 0) {
+ if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+ return -EINVAL;
+ args.client_name = clp->cl_principal;
+ args.prognumber = conn->cb_prog,
+ args.protocol = XPRT_TRANSPORT_TCP;
+ clp->cl_cb_ident = conn->cb_ident;
+ } else {
+ args.bc_xprt = conn->cb_xprt;
+ args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP;
}
/* Create RPC client */
@@ -506,7 +510,7 @@ int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
PTR_ERR(client));
return PTR_ERR(client);
}
- nfsd4_set_callback_client(clp, client);
+ clp->cl_cb_client = client;
return 0;
}
@@ -519,7 +523,7 @@ static void warn_no_callback_path(struct nfs4_client *clp, int reason)
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_client *clp = calldata;
+ struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
if (task->tk_status)
warn_no_callback_path(clp, task->tk_status);
@@ -528,6 +532,8 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
}
static const struct rpc_call_ops nfsd4_cb_probe_ops = {
+ /* XXX: release method to ensure we set the cb channel down if
+ * necessary on early failure? */
.rpc_call_done = nfsd4_cb_probe_done,
};
@@ -543,38 +549,42 @@ int set_callback_cred(void)
return 0;
}
+static struct workqueue_struct *callback_wq;
-void do_probe_callback(struct nfs4_client *clp)
+static void do_probe_callback(struct nfs4_client *clp)
{
- struct rpc_message msg = {
- .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
- .rpc_argp = clp,
- .rpc_cred = callback_cred
- };
- int status;
+ struct nfsd4_callback *cb = &clp->cl_cb_null;
- status = rpc_call_async(clp->cl_cb_client, &msg,
- RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
- &nfsd4_cb_probe_ops, (void *)clp);
- if (status)
- warn_no_callback_path(clp, status);
+ cb->cb_op = NULL;
+ cb->cb_clp = clp;
+
+ cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
+ cb->cb_msg.rpc_argp = NULL;
+ cb->cb_msg.rpc_resp = NULL;
+ cb->cb_msg.rpc_cred = callback_cred;
+
+ cb->cb_ops = &nfsd4_cb_probe_ops;
+
+ queue_work(callback_wq, &cb->cb_work);
}
/*
- * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
+ * Poke the callback thread to process any updates to the callback
+ * parameters, and send a null probe.
*/
-void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
+void nfsd4_probe_callback(struct nfs4_client *clp)
{
- int status;
+ set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ do_probe_callback(clp);
+}
+void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
+{
BUG_ON(atomic_read(&clp->cl_cb_set));
- status = setup_callback_client(clp, cb);
- if (status) {
- warn_no_callback_path(clp, status);
- return;
- }
- do_probe_callback(clp);
+ spin_lock(&clp->cl_lock);
+ memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
+ spin_unlock(&clp->cl_lock);
}
/*
@@ -585,8 +595,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb)
static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
struct rpc_task *task)
{
- struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
- u32 *ptr = (u32 *)clp->cl_sessionid.data;
+ u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
int status = 0;
dprintk("%s: %u:%u:%u:%u\n", __func__,
@@ -598,14 +607,6 @@ static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
status = -EAGAIN;
goto out;
}
-
- /*
- * We'll need the clp during XDR encoding and decoding,
- * and the sequence during decoding to verify the reply
- */
- args->args_seq.cbs_clp = clp;
- task->tk_msg.rpc_resp = &args->args_seq;
-
out:
dprintk("%s status=%d\n", __func__, status);
return status;
@@ -617,13 +618,13 @@ out:
*/
static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
- struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
- u32 minorversion = clp->cl_cb_conn.cb_minorversion;
+ u32 minorversion = clp->cl_minorversion;
int status = 0;
- args->args_seq.cbs_minorversion = minorversion;
+ cb->cb_minorversion = minorversion;
if (minorversion) {
status = nfsd41_cb_setup_sequence(clp, task);
if (status) {
@@ -640,19 +641,20 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
dprintk("%s: minorversion=%d\n", __func__,
- clp->cl_cb_conn.cb_minorversion);
+ clp->cl_minorversion);
- if (clp->cl_cb_conn.cb_minorversion) {
+ if (clp->cl_minorversion) {
/* No need for lock, access serialized in nfsd4_cb_prepare */
- ++clp->cl_cb_seq_nr;
+ ++clp->cl_cb_session->se_cb_seq_nr;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
- clp->cl_cb_seq_nr);
+ clp->cl_cb_session->se_cb_seq_nr);
/* We're done looking into the sequence information */
task->tk_msg.rpc_resp = NULL;
@@ -662,7 +664,8 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
struct nfs4_client *clp = dp->dl_client;
struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
@@ -707,7 +710,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
static void nfsd4_cb_recall_release(void *calldata)
{
- struct nfs4_delegation *dp = calldata;
+ struct nfsd4_callback *cb = calldata;
+ struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
nfs4_put_delegation(dp);
}
@@ -718,8 +722,6 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = {
.rpc_release = nfsd4_cb_recall_release,
};
-static struct workqueue_struct *callback_wq;
-
int nfsd4_create_callback_queue(void)
{
callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
@@ -734,57 +736,88 @@ void nfsd4_destroy_callback_queue(void)
}
/* must be called under the state lock */
-void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new)
+void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
- struct rpc_clnt *old = clp->cl_cb_client;
-
- clp->cl_cb_client = new;
+ set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
/*
- * After this, any work that saw the old value of cl_cb_client will
- * be gone:
+ * Note this won't actually result in a null callback;
+ * instead, nfsd4_do_callback_rpc() will detect the killed
+ * client, destroy the rpc client, and stop:
*/
+ do_probe_callback(clp);
flush_workqueue(callback_wq);
- /* So we can safely shut it down: */
- if (old)
- rpc_shutdown_client(old);
}
-/*
- * called with dp->dl_count inc'ed.
- */
-static void _nfsd4_cb_recall(struct nfs4_delegation *dp)
+void nfsd4_release_cb(struct nfsd4_callback *cb)
{
- struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_cb_client;
- struct nfs4_rpc_args *args = &dp->dl_recall.cb_args;
- struct rpc_message msg = {
- .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_cred = callback_cred
- };
+ if (cb->cb_ops->rpc_release)
+ cb->cb_ops->rpc_release(cb);
+}
- if (clnt == NULL) {
- nfs4_put_delegation(dp);
- return; /* Client is shutting down; give up. */
+void nfsd4_process_cb_update(struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_conn conn;
+ struct nfs4_client *clp = cb->cb_clp;
+ int err;
+
+ /*
+ * This is either an update, or the client dying; in either case,
+ * kill the old client:
+ */
+ if (clp->cl_cb_client) {
+ rpc_shutdown_client(clp->cl_cb_client);
+ clp->cl_cb_client = NULL;
}
+ if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
+ return;
+ spin_lock(&clp->cl_lock);
+ /*
+ * Only serialized callback code is allowed to clear these
+ * flags; main nfsd code can only set them:
+ */
+ BUG_ON(!clp->cl_cb_flags);
+ clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
+ memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
+ spin_unlock(&clp->cl_lock);
- args->args_op = dp;
- msg.rpc_argp = args;
- dp->dl_retries = 1;
- rpc_call_async(clnt, &msg, RPC_TASK_SOFT, &nfsd4_cb_recall_ops, dp);
+ err = setup_callback_client(clp, &conn);
+ if (err)
+ warn_no_callback_path(clp, err);
}
void nfsd4_do_callback_rpc(struct work_struct *w)
{
- /* XXX: for now, just send off delegation recall. */
- /* In future, generalize to handle any sort of callback. */
- struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work);
- struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall);
+ struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
+ struct nfs4_client *clp = cb->cb_clp;
+ struct rpc_clnt *clnt;
- _nfsd4_cb_recall(dp);
-}
+ if (clp->cl_cb_flags)
+ nfsd4_process_cb_update(cb);
+ clnt = clp->cl_cb_client;
+ if (!clnt) {
+ /* Callback channel broken, or client killed; give up: */
+ nfsd4_release_cb(cb);
+ return;
+ }
+ rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
+ cb->cb_ops, cb);
+}
void nfsd4_cb_recall(struct nfs4_delegation *dp)
{
+ struct nfsd4_callback *cb = &dp->dl_recall;
+
+ dp->dl_retries = 1;
+ cb->cb_op = dp;
+ cb->cb_clp = dp->dl_client;
+ cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
+ cb->cb_msg.rpc_argp = cb;
+ cb->cb_msg.rpc_resp = cb;
+ cb->cb_msg.rpc_cred = callback_cred;
+
+ cb->cb_ops = &nfsd4_cb_recall_ops;
+ dp->dl_retries = 1;
+
queue_work(callback_wq, &dp->dl_recall.cb_work);
}
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index c78dbf49342..f0695e815f0 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
cache_unregister(&nametoid_cache);
}
-/*
- * Deferred request handling
- */
-
-struct idmap_defer_req {
- struct cache_req req;
- struct cache_deferred_req deferred_req;
- wait_queue_head_t waitq;
- atomic_t count;
-};
-
-static inline void
-put_mdr(struct idmap_defer_req *mdr)
-{
- if (atomic_dec_and_test(&mdr->count))
- kfree(mdr);
-}
-
-static inline void
-get_mdr(struct idmap_defer_req *mdr)
-{
- atomic_inc(&mdr->count);
-}
-
-static void
-idmap_revisit(struct cache_deferred_req *dreq, int toomany)
-{
- struct idmap_defer_req *mdr =
- container_of(dreq, struct idmap_defer_req, deferred_req);
-
- wake_up(&mdr->waitq);
- put_mdr(mdr);
-}
-
-static struct cache_deferred_req *
-idmap_defer(struct cache_req *req)
-{
- struct idmap_defer_req *mdr =
- container_of(req, struct idmap_defer_req, req);
-
- mdr->deferred_req.revisit = idmap_revisit;
- get_mdr(mdr);
- return (&mdr->deferred_req);
-}
-
-static inline int
-do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
- struct cache_detail *detail, struct ent **item,
- struct idmap_defer_req *mdr)
-{
- *item = lookup_fn(key);
- if (!*item)
- return -ENOMEM;
- return cache_check(detail, &(*item)->h, &mdr->req);
-}
-
-static inline int
-do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
- struct ent *key, struct cache_detail *detail,
- struct ent **item)
-{
- int ret = -ENOMEM;
-
- *item = lookup_fn(key);
- if (!*item)
- goto out_err;
- ret = -ETIMEDOUT;
- if (!test_bit(CACHE_VALID, &(*item)->h.flags)
- || (*item)->h.expiry_time < get_seconds()
- || detail->flush_time > (*item)->h.last_refresh)
- goto out_put;
- ret = -ENOENT;
- if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
- goto out_put;
- return 0;
-out_put:
- cache_put(&(*item)->h, detail);
-out_err:
- *item = NULL;
- return ret;
-}
-
static int
idmap_lookup(struct svc_rqst *rqstp,
struct ent *(*lookup_fn)(struct ent *), struct ent *key,
struct cache_detail *detail, struct ent **item)
{
- struct idmap_defer_req *mdr;
int ret;
- mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
- if (!mdr)
+ *item = lookup_fn(key);
+ if (!*item)
return -ENOMEM;
- atomic_set(&mdr->count, 1);
- init_waitqueue_head(&mdr->waitq);
- mdr->req.defer = idmap_defer;
- ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
- if (ret == -EAGAIN) {
- wait_event_interruptible_timeout(mdr->waitq,
- test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
- ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
+ retry:
+ ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);
+
+ if (ret == -ETIMEDOUT) {
+ struct ent *prev_item = *item;
+ *item = lookup_fn(key);
+ if (*item != prev_item)
+ goto retry;
+ cache_put(&(*item)->h, detail);
}
- put_mdr(mdr);
return ret;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 59ec449b0c7..0cdfd022bb7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1031,8 +1031,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
resp->cstate.session = NULL;
fh_init(&resp->cstate.current_fh, NFS4_FHSIZE);
fh_init(&resp->cstate.save_fh, NFS4_FHSIZE);
- /* Use the deferral mechanism only for NFSv4.0 compounds */
- rqstp->rq_usedeferral = (args->minorversion == 0);
+ /*
+ * Don't use the deferral mechanism for NFSv4; compounds make it
+ * too hard to avoid non-idempotency problems.
+ */
+ rqstp->rq_usedeferral = 0;
/*
* According to RFC3010, this takes precedence over all other errors.
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index cf0d2ffb3c8..9019e8ec9dc 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -33,7 +33,7 @@
*/
#include <linux/file.h>
-#include <linux/smp_lock.h>
+#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/swap.h>
@@ -207,7 +207,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
- struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn;
dprintk("NFSD alloc_init_deleg\n");
/*
@@ -234,7 +233,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
nfs4_file_get_access(fp, O_RDONLY);
dp->dl_flock = NULL;
dp->dl_type = type;
- dp->dl_ident = cb->cb_ident;
dp->dl_stateid.si_boot = boot_time;
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
@@ -535,171 +533,258 @@ gen_sessionid(struct nfsd4_session *ses)
*/
#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
+static void
+free_session_slots(struct nfsd4_session *ses)
+{
+ int i;
+
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++)
+ kfree(ses->se_slots[i]);
+}
+
/*
- * Give the client the number of ca_maxresponsesize_cached slots it
- * requests, of size bounded by NFSD_SLOT_CACHE_SIZE,
- * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more
- * than NFSD_MAX_SLOTS_PER_SESSION.
- *
- * If we run out of reserved DRC memory we should (up to a point)
+ * We don't actually need to cache the rpc and session headers, so we
+ * can allocate a little less for each slot:
+ */
+static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
+{
+ return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+}
+
+static int nfsd4_sanitize_slot_size(u32 size)
+{
+ size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
+ size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
+
+ return size;
+}
+
+/*
+ * XXX: If we run out of reserved DRC memory we could (up to a point)
* re-negotiate active sessions and reduce their slot usage to make
* rooom for new connections. For now we just fail the create session.
*/
-static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan)
+static int nfsd4_get_drc_mem(int slotsize, u32 num)
{
- int mem, size = fchan->maxresp_cached;
+ int avail;
- if (fchan->maxreqs < 1)
- return nfserr_inval;
+ num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
- if (size < NFSD_MIN_HDR_SEQ_SZ)
- size = NFSD_MIN_HDR_SEQ_SZ;
- size -= NFSD_MIN_HDR_SEQ_SZ;
- if (size > NFSD_SLOT_CACHE_SIZE)
- size = NFSD_SLOT_CACHE_SIZE;
-
- /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */
- mem = fchan->maxreqs * size;
- if (mem > NFSD_MAX_MEM_PER_SESSION) {
- fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size;
- if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
- fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
- mem = fchan->maxreqs * size;
- }
+ spin_lock(&nfsd_drc_lock);
+ avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
+ nfsd_drc_max_mem - nfsd_drc_mem_used);
+ num = min_t(int, num, avail / slotsize);
+ nfsd_drc_mem_used += num * slotsize;
+ spin_unlock(&nfsd_drc_lock);
+ return num;
+}
+
+static void nfsd4_put_drc_mem(int slotsize, int num)
+{
spin_lock(&nfsd_drc_lock);
- /* bound the total session drc memory ussage */
- if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) {
- fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size;
- mem = fchan->maxreqs * size;
- }
- nfsd_drc_mem_used += mem;
+ nfsd_drc_mem_used -= slotsize * num;
spin_unlock(&nfsd_drc_lock);
+}
- if (fchan->maxreqs == 0)
- return nfserr_jukebox;
+static struct nfsd4_session *alloc_session(int slotsize, int numslots)
+{
+ struct nfsd4_session *new;
+ int mem, i;
- fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ;
- return 0;
+ BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
+ + sizeof(struct nfsd4_session) > PAGE_SIZE);
+ mem = numslots * sizeof(struct nfsd4_slot *);
+
+ new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
+ if (!new)
+ return NULL;
+ /* allocate each struct nfsd4_slot and data cache in one piece */
+ for (i = 0; i < numslots; i++) {
+ mem = sizeof(struct nfsd4_slot) + slotsize;
+ new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
+ if (!new->se_slots[i])
+ goto out_free;
+ }
+ return new;
+out_free:
+ while (i--)
+ kfree(new->se_slots[i]);
+ kfree(new);
+ return NULL;
}
-/*
- * fchan holds the client values on input, and the server values on output
- * sv_max_mesg is the maximum payload plus one page for overhead.
- */
-static int init_forechannel_attrs(struct svc_rqst *rqstp,
- struct nfsd4_channel_attrs *session_fchan,
- struct nfsd4_channel_attrs *fchan)
+static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
{
- int status = 0;
- __u32 maxcount = nfsd_serv->sv_max_mesg;
+ u32 maxrpc = nfsd_serv->sv_max_mesg;
- /* headerpadsz set to zero in encode routine */
+ new->maxreqs = numslots;
+ new->maxresp_cached = slotsize + NFSD_MIN_HDR_SEQ_SZ;
+ new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
+ new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
+ new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
+}
- /* Use the client's max request and max response size if possible */
- if (fchan->maxreq_sz > maxcount)
- fchan->maxreq_sz = maxcount;
- session_fchan->maxreq_sz = fchan->maxreq_sz;
+static void free_conn(struct nfsd4_conn *c)
+{
+ svc_xprt_put(c->cn_xprt);
+ kfree(c);
+}
- if (fchan->maxresp_sz > maxcount)
- fchan->maxresp_sz = maxcount;
- session_fchan->maxresp_sz = fchan->maxresp_sz;
+static void nfsd4_conn_lost(struct svc_xpt_user *u)
+{
+ struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
+ struct nfs4_client *clp = c->cn_session->se_client;
- /* Use the client's maxops if possible */
- if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
- fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
- session_fchan->maxops = fchan->maxops;
+ spin_lock(&clp->cl_lock);
+ if (!list_empty(&c->cn_persession)) {
+ list_del(&c->cn_persession);
+ free_conn(c);
+ }
+ spin_unlock(&clp->cl_lock);
+}
- /* FIXME: Error means no more DRC pages so the server should
- * recover pages from existing sessions. For now fail session
- * creation.
- */
- status = set_forechannel_drc_size(fchan);
+static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
+{
+ struct nfsd4_conn *conn;
- session_fchan->maxresp_cached = fchan->maxresp_cached;
- session_fchan->maxreqs = fchan->maxreqs;
+ conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
+ if (!conn)
+ return NULL;
+ svc_xprt_get(rqstp->rq_xprt);
+ conn->cn_xprt = rqstp->rq_xprt;
+ conn->cn_flags = flags;
+ INIT_LIST_HEAD(&conn->cn_xpt_user.list);
+ return conn;
+}
- dprintk("%s status %d\n", __func__, status);
- return status;
+static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
+{
+ conn->cn_session = ses;
+ list_add(&conn->cn_persession, &ses->se_conns);
}
-static void
-free_session_slots(struct nfsd4_session *ses)
+static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
- int i;
+ struct nfs4_client *clp = ses->se_client;
- for (i = 0; i < ses->se_fchannel.maxreqs; i++)
- kfree(ses->se_slots[i]);
+ spin_lock(&clp->cl_lock);
+ __nfsd4_hash_conn(conn, ses);
+ spin_unlock(&clp->cl_lock);
}
-/*
- * We don't actually need to cache the rpc and session headers, so we
- * can allocate a little less for each slot:
- */
-static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
+static void nfsd4_register_conn(struct nfsd4_conn *conn)
{
- return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
+ conn->cn_xpt_user.callback = nfsd4_conn_lost;
+ register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
}
-static int
-alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
- struct nfsd4_create_session *cses)
+static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
{
- struct nfsd4_session *new, tmp;
- struct nfsd4_slot *sp;
- int idx, slotsize, cachesize, i;
- int status;
+ struct nfsd4_conn *conn;
+ u32 flags = NFS4_CDFC4_FORE;
- memset(&tmp, 0, sizeof(tmp));
+ if (ses->se_flags & SESSION4_BACK_CHAN)
+ flags |= NFS4_CDFC4_BACK;
+ conn = alloc_conn(rqstp, flags);
+ if (!conn)
+ return nfserr_jukebox;
+ nfsd4_hash_conn(conn, ses);
+ nfsd4_register_conn(conn);
+ return nfs_ok;
+}
- /* FIXME: For now, we just accept the client back channel attributes. */
- tmp.se_bchannel = cses->back_channel;
- status = init_forechannel_attrs(rqstp, &tmp.se_fchannel,
- &cses->fore_channel);
- if (status)
- goto out;
+static void nfsd4_del_conns(struct nfsd4_session *s)
+{
+ struct nfs4_client *clp = s->se_client;
+ struct nfsd4_conn *c;
- BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot)
- + sizeof(struct nfsd4_session) > PAGE_SIZE);
+ spin_lock(&clp->cl_lock);
+ while (!list_empty(&s->se_conns)) {
+ c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
+ list_del_init(&c->cn_persession);
+ spin_unlock(&clp->cl_lock);
- status = nfserr_jukebox;
- /* allocate struct nfsd4_session and slot table pointers in one piece */
- slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *);
- new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
- if (!new)
- goto out;
+ unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
+ free_conn(c);
- memcpy(new, &tmp, sizeof(*new));
+ spin_lock(&clp->cl_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+}
- /* allocate each struct nfsd4_slot and data cache in one piece */
- cachesize = slot_bytes(&new->se_fchannel);
- for (i = 0; i < new->se_fchannel.maxreqs; i++) {
- sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL);
- if (!sp)
- goto out_free;
- new->se_slots[i] = sp;
+void free_session(struct kref *kref)
+{
+ struct nfsd4_session *ses;
+ int mem;
+
+ ses = container_of(kref, struct nfsd4_session, se_ref);
+ nfsd4_del_conns(ses);
+ spin_lock(&nfsd_drc_lock);
+ mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
+ nfsd_drc_mem_used -= mem;
+ spin_unlock(&nfsd_drc_lock);
+ free_session_slots(ses);
+ kfree(ses);
+}
+
+static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
+{
+ struct nfsd4_session *new;
+ struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
+ int numslots, slotsize;
+ int status;
+ int idx;
+
+ /*
+ * Note decreasing slot size below client's request may
+ * make it difficult for client to function correctly, whereas
+ * decreasing the number of slots will (just?) affect
+ * performance. When short on memory we therefore prefer to
+ * decrease number of slots instead of their size.
+ */
+ slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
+ numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
+
+ new = alloc_session(slotsize, numslots);
+ if (!new) {
+ nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
+ return NULL;
}
+ init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
new->se_client = clp;
gen_sessionid(new);
- idx = hash_sessionid(&new->se_sessionid);
- memcpy(clp->cl_sessionid.data, new->se_sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
+ INIT_LIST_HEAD(&new->se_conns);
+
+ new->se_cb_seq_nr = 1;
new->se_flags = cses->flags;
+ new->se_cb_prog = cses->callback_prog;
kref_init(&new->se_ref);
+ idx = hash_sessionid(&new->se_sessionid);
spin_lock(&client_lock);
list_add(&new->se_hash, &sessionid_hashtbl[idx]);
list_add(&new->se_perclnt, &clp->cl_sessions);
spin_unlock(&client_lock);
- status = nfs_ok;
-out:
- return status;
-out_free:
- free_session_slots(new);
- kfree(new);
- goto out;
+ status = nfsd4_new_conn(rqstp, new);
+ /* whoops: benny points out, status is ignored! (err, or bogus) */
+ if (status) {
+ free_session(&new->se_ref);
+ return NULL;
+ }
+ if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) {
+ struct sockaddr *sa = svc_addr(rqstp);
+
+ clp->cl_cb_session = new;
+ clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(rqstp->rq_xprt);
+ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
+ clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
+ nfsd4_probe_callback(clp);
+ }
+ return new;
}
/* caller must hold client_lock */
@@ -731,21 +816,6 @@ unhash_session(struct nfsd4_session *ses)
list_del(&ses->se_perclnt);
}
-void
-free_session(struct kref *kref)
-{
- struct nfsd4_session *ses;
- int mem;
-
- ses = container_of(kref, struct nfsd4_session, se_ref);
- spin_lock(&nfsd_drc_lock);
- mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
- nfsd_drc_mem_used -= mem;
- spin_unlock(&nfsd_drc_lock);
- free_session_slots(ses);
- kfree(ses);
-}
-
/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
@@ -812,6 +882,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static inline void
free_client(struct nfs4_client *clp)
{
+ while (!list_empty(&clp->cl_sessions)) {
+ struct nfsd4_session *ses;
+ ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
+ se_perclnt);
+ list_del(&ses->se_perclnt);
+ nfsd4_put_session(ses);
+ }
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -838,15 +915,12 @@ release_session_client(struct nfsd4_session *session)
static inline void
unhash_client_locked(struct nfs4_client *clp)
{
+ struct nfsd4_session *ses;
+
mark_client_expired(clp);
list_del(&clp->cl_lru);
- while (!list_empty(&clp->cl_sessions)) {
- struct nfsd4_session *ses;
- ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
- se_perclnt);
- unhash_session(ses);
- nfsd4_put_session(ses);
- }
+ list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
+ list_del_init(&ses->se_hash);
}
static void
@@ -875,7 +949,7 @@ expire_client(struct nfs4_client *clp)
sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
release_openowner(sop);
}
- nfsd4_set_callback_client(clp, NULL);
+ nfsd4_shutdown_callback(clp);
if (clp->cl_cb_conn.cb_xprt)
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
list_del(&clp->cl_idhash);
@@ -960,6 +1034,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
if (clp == NULL)
return NULL;
+ INIT_LIST_HEAD(&clp->cl_sessions);
+
princ = svc_gss_principal(rqstp);
if (princ) {
clp->cl_principal = kstrdup(princ, GFP_KERNEL);
@@ -976,8 +1052,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_sessions);
INIT_LIST_HEAD(&clp->cl_lru);
+ spin_lock_init(&clp->cl_lock);
+ INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
@@ -986,7 +1063,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
clp->cl_flavor = rqstp->rq_flavor;
copy_cred(&clp->cl_cred, &rqstp->rq_cred);
gen_confirm(clp);
-
+ clp->cl_cb_session = NULL;
return clp;
}
@@ -1098,7 +1175,7 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval,
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
{
- struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
+ struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
unsigned short expected_family;
/* Currently, we only support tcp and tcp6 for the callback channel */
@@ -1111,24 +1188,23 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid)
else
goto out_err;
- cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
+ conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
se->se_callback_addr_len,
- (struct sockaddr *) &cb->cb_addr,
- sizeof(cb->cb_addr));
+ (struct sockaddr *)&conn->cb_addr,
+ sizeof(conn->cb_addr));
- if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family)
+ if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
goto out_err;
- if (cb->cb_addr.ss_family == AF_INET6)
- ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid;
+ if (conn->cb_addr.ss_family == AF_INET6)
+ ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
- cb->cb_minorversion = 0;
- cb->cb_prog = se->se_callback_prog;
- cb->cb_ident = se->se_callback_ident;
+ conn->cb_prog = se->se_callback_prog;
+ conn->cb_ident = se->se_callback_ident;
return;
out_err:
- cb->cb_addr.ss_family = AF_UNSPEC;
- cb->cb_addrlen = 0;
+ conn->cb_addr.ss_family = AF_UNSPEC;
+ conn->cb_addrlen = 0;
dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
"will not receive delegations\n",
clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
@@ -1415,7 +1491,9 @@ nfsd4_create_session(struct svc_rqst *rqstp,
{
struct sockaddr *sa = svc_addr(rqstp);
struct nfs4_client *conf, *unconf;
+ struct nfsd4_session *new;
struct nfsd4_clid_slot *cs_slot = NULL;
+ bool confirm_me = false;
int status = 0;
nfs4_lock_state();
@@ -1438,7 +1516,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cs_slot->sl_seqid, cr_ses->seqid);
goto out;
}
- cs_slot->sl_seqid++;
} else if (unconf) {
if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
!rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
@@ -1451,25 +1528,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
if (status) {
/* an unconfirmed replay returns misordered */
status = nfserr_seq_misordered;
- goto out_cache;
+ goto out;
}
- cs_slot->sl_seqid++; /* from 0 to 1 */
- move_to_confirmed(unconf);
-
- if (cr_ses->flags & SESSION4_BACK_CHAN) {
- unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt;
- svc_xprt_get(rqstp->rq_xprt);
- rpc_copy_addr(
- (struct sockaddr *)&unconf->cl_cb_conn.cb_addr,
- sa);
- unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
- unconf->cl_cb_conn.cb_minorversion =
- cstate->minorversion;
- unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
- unconf->cl_cb_seq_nr = 1;
- nfsd4_probe_callback(unconf, &unconf->cl_cb_conn);
- }
+ confirm_me = true;
conf = unconf;
} else {
status = nfserr_stale_clientid;
@@ -1477,22 +1539,30 @@ nfsd4_create_session(struct svc_rqst *rqstp,
}
/*
+ * XXX: we should probably set this at creation time, and check
+ * for consistent minorversion use throughout:
+ */
+ conf->cl_minorversion = 1;
+ /*
* We do not support RDMA or persistent sessions
*/
cr_ses->flags &= ~SESSION4_PERSIST;
cr_ses->flags &= ~SESSION4_RDMA;
- status = alloc_init_session(rqstp, conf, cr_ses);
- if (status)
+ status = nfserr_jukebox;
+ new = alloc_init_session(rqstp, conf, cr_ses);
+ if (!new)
goto out;
-
- memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data,
+ status = nfs_ok;
+ memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
NFS4_MAX_SESSIONID_LEN);
+ cs_slot->sl_seqid++;
cr_ses->seqid = cs_slot->sl_seqid;
-out_cache:
/* cache solo and embedded create sessions under the state lock */
nfsd4_cache_create_session(cr_ses, cs_slot, status);
+ if (confirm_me)
+ move_to_confirmed(conf);
out:
nfs4_unlock_state();
dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1546,8 +1616,11 @@ nfsd4_destroy_session(struct svc_rqst *r,
nfs4_lock_state();
/* wait for callbacks */
- nfsd4_set_callback_client(ses->se_client, NULL);
+ nfsd4_shutdown_callback(ses->se_client);
nfs4_unlock_state();
+
+ nfsd4_del_conns(ses);
+
nfsd4_put_session(ses);
status = nfs_ok;
out:
@@ -1555,6 +1628,36 @@ out:
return status;
}
+static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
+{
+ struct nfsd4_conn *c;
+
+ list_for_each_entry(c, &s->se_conns, cn_persession) {
+ if (c->cn_xprt == xpt) {
+ return c;
+ }
+ }
+ return NULL;
+}
+
+static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
+{
+ struct nfs4_client *clp = ses->se_client;
+ struct nfsd4_conn *c;
+
+ spin_lock(&clp->cl_lock);
+ c = __nfsd4_find_conn(new->cn_xprt, ses);
+ if (c) {
+ spin_unlock(&clp->cl_lock);
+ free_conn(new);
+ return;
+ }
+ __nfsd4_hash_conn(new, ses);
+ spin_unlock(&clp->cl_lock);
+ nfsd4_register_conn(new);
+ return;
+}
+
__be32
nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
@@ -1563,11 +1666,20 @@ nfsd4_sequence(struct svc_rqst *rqstp,
struct nfsd4_compoundres *resp = rqstp->rq_resp;
struct nfsd4_session *session;
struct nfsd4_slot *slot;
+ struct nfsd4_conn *conn;
int status;
if (resp->opcnt != 1)
return nfserr_sequence_pos;
+ /*
+ * Will be either used or freed by nfsd4_sequence_check_conn
+ * below.
+ */
+ conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
+ if (!conn)
+ return nfserr_jukebox;
+
spin_lock(&client_lock);
status = nfserr_badsession;
session = find_in_sessionid_hashtbl(&seq->sessionid);
@@ -1599,6 +1711,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
if (status)
goto out;
+ nfsd4_sequence_check_conn(conn, session);
+ conn = NULL;
+
/* Success! bump slot seqid */
slot->sl_inuse = true;
slot->sl_seqid = seq->seqid;
@@ -1613,6 +1728,7 @@ out:
nfsd4_get_session(cstate->session);
atomic_inc(&session->se_client->cl_refcount);
}
+ kfree(conn);
spin_unlock(&client_lock);
dprintk("%s: return %d\n", __func__, ntohl(status));
return status;
@@ -1747,6 +1863,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
gen_clid(new);
}
+ /*
+ * XXX: we should probably set this at creation time, and check
+ * for consistent minorversion use throughout:
+ */
+ new->cl_minorversion = 0;
gen_callback(new, setclid, rpc_get_scope_id(sa));
add_to_unconfirmed(new, strhashval);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
@@ -1807,7 +1928,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
status = nfserr_clid_inuse;
else {
atomic_set(&conf->cl_cb_set, 0);
- nfsd4_probe_callback(conf, &unconf->cl_cb_conn);
+ nfsd4_change_callback(conf, &unconf->cl_cb_conn);
+ nfsd4_probe_callback(conf);
expire_client(unconf);
status = nfs_ok;
@@ -1841,7 +1963,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
}
move_to_confirmed(unconf);
conf = unconf;
- nfsd4_probe_callback(conf, &conf->cl_cb_conn);
+ nfsd4_probe_callback(conf);
status = nfs_ok;
}
} else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
@@ -2944,7 +3066,11 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
if (STALE_STATEID(stateid))
goto out;
- status = nfserr_bad_stateid;
+ /*
+ * We assume that any stateid that has the current boot time,
+ * but that we can't find, is expired:
+ */
+ status = nfserr_expired;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
if (!dp)
@@ -2964,6 +3090,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
stp = find_stateid(stateid, flags);
if (!stp)
goto out;
+ status = nfserr_bad_stateid;
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -3038,8 +3165,9 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
* a replayed close:
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
+ /* It's not stale; let's assume it's expired: */
if (sop == NULL)
- return nfserr_bad_stateid;
+ return nfserr_expired;
*sopp = sop;
goto check_replay;
}
@@ -3304,6 +3432,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_bad_stateid;
if (!is_delegation_stateid(stateid))
goto out;
+ status = nfserr_expired;
dp = find_delegation_stateid(inode, stateid);
if (!dp)
goto out;
@@ -3895,7 +4024,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner)
struct inode *inode = filp->fi_inode;
int status = 0;
- lock_kernel();
+ lock_flocks();
for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
status = 1;
@@ -3903,7 +4032,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner)
}
}
out:
- unlock_kernel();
+ unlock_flocks();
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1a468bbd330..f35a94a0402 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1805,19 +1805,23 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
goto out_nfserr;
}
}
- if ((buflen -= 16) < 0)
- goto out_resource;
- if (unlikely(bmval2)) {
+ if (bmval2) {
+ if ((buflen -= 16) < 0)
+ goto out_resource;
WRITE32(3);
WRITE32(bmval0);
WRITE32(bmval1);
WRITE32(bmval2);
- } else if (likely(bmval1)) {
+ } else if (bmval1) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
WRITE32(2);
WRITE32(bmval0);
WRITE32(bmval1);
} else {
+ if ((buflen -= 8) < 0)
+ goto out_resource;
WRITE32(1);
WRITE32(bmval0);
}
@@ -1828,15 +1832,17 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
u32 word1 = nfsd_suppattrs1(minorversion);
u32 word2 = nfsd_suppattrs2(minorversion);
- if ((buflen -= 12) < 0)
- goto out_resource;
if (!aclsupport)
word0 &= ~FATTR4_WORD0_ACL;
if (!word2) {
+ if ((buflen -= 12) < 0)
+ goto out_resource;
WRITE32(2);
WRITE32(word0);
WRITE32(word1);
} else {
+ if ((buflen -= 16) < 0)
+ goto out_resource;
WRITE32(3);
WRITE32(word0);
WRITE32(word1);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index b53b1d042f1..d6dc3f61f8b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -22,6 +22,7 @@
*/
enum {
NFSD_Root = 1,
+#ifdef CONFIG_NFSD_DEPRECATED
NFSD_Svc,
NFSD_Add,
NFSD_Del,
@@ -29,6 +30,7 @@ enum {
NFSD_Unexport,
NFSD_Getfd,
NFSD_Getfs,
+#endif
NFSD_List,
NFSD_Export_features,
NFSD_Fh,
@@ -54,6 +56,7 @@ enum {
/*
* write() for these nodes.
*/
+#ifdef CONFIG_NFSD_DEPRECATED
static ssize_t write_svc(struct file *file, char *buf, size_t size);
static ssize_t write_add(struct file *file, char *buf, size_t size);
static ssize_t write_del(struct file *file, char *buf, size_t size);
@@ -61,6 +64,7 @@ static ssize_t write_export(struct file *file, char *buf, size_t size);
static ssize_t write_unexport(struct file *file, char *buf, size_t size);
static ssize_t write_getfd(struct file *file, char *buf, size_t size);
static ssize_t write_getfs(struct file *file, char *buf, size_t size);
+#endif
static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size);
static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size);
@@ -76,6 +80,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
#endif
static ssize_t (*write_op[])(struct file *, char *, size_t) = {
+#ifdef CONFIG_NFSD_DEPRECATED
[NFSD_Svc] = write_svc,
[NFSD_Add] = write_add,
[NFSD_Del] = write_del,
@@ -83,6 +88,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
[NFSD_Unexport] = write_unexport,
[NFSD_Getfd] = write_getfd,
[NFSD_Getfs] = write_getfs,
+#endif
[NFSD_Fh] = write_filehandle,
[NFSD_FO_UnlockIP] = write_unlock_ip,
[NFSD_FO_UnlockFS] = write_unlock_fs,
@@ -121,6 +127,14 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu
static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
{
+ static int warned;
+ if (file->f_dentry->d_name.name[0] == '.' && !warned) {
+ printk(KERN_INFO
+ "Warning: \"%s\" uses deprecated NFSD interface: %s."
+ " This will be removed in 2.6.40\n",
+ current->comm, file->f_dentry->d_name.name);
+ warned = 1;
+ }
if (! file->private_data) {
/* An attempt to read a transaction file without writing
* causes a 0-byte write so that the file can return
@@ -137,6 +151,7 @@ static const struct file_operations transaction_ops = {
.write = nfsctl_transaction_write,
.read = nfsctl_transaction_read,
.release = simple_transaction_release,
+ .llseek = default_llseek,
};
static int exports_open(struct inode *inode, struct file *file)
@@ -186,6 +201,7 @@ static const struct file_operations pool_stats_operations = {
* payload - write methods
*/
+#ifdef CONFIG_NFSD_DEPRECATED
/**
* write_svc - Start kernel's NFSD server
*
@@ -401,7 +417,7 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
- clp = auth_unix_lookup(&in6);
+ clp = auth_unix_lookup(&init_net, &in6);
if (!clp)
err = -EPERM;
else {
@@ -464,7 +480,7 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &in6);
- clp = auth_unix_lookup(&in6);
+ clp = auth_unix_lookup(&init_net, &in6);
if (!clp)
err = -EPERM;
else {
@@ -481,6 +497,7 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
out:
return err;
}
+#endif /* CONFIG_NFSD_DEPRECATED */
/**
* write_unlock_ip - Release all locks used by a client
@@ -999,12 +1016,12 @@ static ssize_t __write_ports_addxprt(char *buf)
if (err != 0)
return err;
- err = svc_create_xprt(nfsd_serv, transport,
+ err = svc_create_xprt(nfsd_serv, transport, &init_net,
PF_INET, port, SVC_SOCK_ANONYMOUS);
if (err < 0)
goto out_err;
- err = svc_create_xprt(nfsd_serv, transport,
+ err = svc_create_xprt(nfsd_serv, transport, &init_net,
PF_INET6, port, SVC_SOCK_ANONYMOUS);
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
@@ -1355,6 +1372,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
{
static struct tree_descr nfsd_files[] = {
+#ifdef CONFIG_NFSD_DEPRECATED
[NFSD_Svc] = {".svc", &transaction_ops, S_IWUSR},
[NFSD_Add] = {".add", &transaction_ops, S_IWUSR},
[NFSD_Del] = {".del", &transaction_ops, S_IWUSR},
@@ -1362,6 +1380,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
[NFSD_Unexport] = {".unexport", &transaction_ops, S_IWUSR},
[NFSD_Getfd] = {".getfd", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Getfs] = {".getfs", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_List] = {"exports", &exports_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index b76ac3a82e3..6b641cf2c19 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -249,7 +249,7 @@ extern time_t nfsd4_grace;
#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
-#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
+#define NFSD_LAUNDROMAT_MINTIMEOUT 1 /* seconds */
/*
* The following attributes are currently not supported by the NFSv4 server:
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index e2c43464f23..2bae1d86f5f 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -16,6 +16,7 @@
#include <linux/lockd/bind.h>
#include <linux/nfsacl.h>
#include <linux/seq_file.h>
+#include <net/net_namespace.h>
#include "nfsd.h"
#include "cache.h"
#include "vfs.h"
@@ -186,12 +187,12 @@ static int nfsd_init_socks(int port)
if (!list_empty(&nfsd_serv->sv_permsocks))
return 0;
- error = svc_create_xprt(nfsd_serv, "udp", PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "udp", &init_net, PF_INET, port,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
- error = svc_create_xprt(nfsd_serv, "tcp", PF_INET, port,
+ error = svc_create_xprt(nfsd_serv, "tcp", &init_net, PF_INET, port,
SVC_SOCK_DEFAULTS);
if (error < 0)
return error;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 322518c88e4..39adc27b068 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/nfsd/nfsfh.h>
#include "nfsfh.h"
@@ -64,19 +65,12 @@ typedef struct {
(s)->si_fileid, \
(s)->si_generation
-struct nfsd4_cb_sequence {
- /* args/res */
- u32 cbs_minorversion;
- struct nfs4_client *cbs_clp;
-};
-
-struct nfs4_rpc_args {
- void *args_op;
- struct nfsd4_cb_sequence args_seq;
-};
-
struct nfsd4_callback {
- struct nfs4_rpc_args cb_args;
+ void *cb_op;
+ struct nfs4_client *cb_clp;
+ u32 cb_minorversion;
+ struct rpc_message cb_msg;
+ const struct rpc_call_ops *cb_ops;
struct work_struct cb_work;
};
@@ -91,7 +85,6 @@ struct nfs4_delegation {
u32 dl_type;
time_t dl_time;
/* For recall: */
- u32 dl_ident;
stateid_t dl_stateid;
struct knfsd_fh dl_fh;
int dl_retries;
@@ -103,8 +96,8 @@ struct nfs4_cb_conn {
/* SETCLIENTID info */
struct sockaddr_storage cb_addr;
size_t cb_addrlen;
- u32 cb_prog;
- u32 cb_minorversion;
+ u32 cb_prog; /* used only in 4.0 case;
+ per-session otherwise */
u32 cb_ident; /* minorversion 0 only */
struct svc_xprt *cb_xprt; /* minorversion 1 only */
};
@@ -160,6 +153,15 @@ struct nfsd4_clid_slot {
struct nfsd4_create_session sl_cr_ses;
};
+struct nfsd4_conn {
+ struct list_head cn_persession;
+ struct svc_xprt *cn_xprt;
+ struct svc_xpt_user cn_xpt_user;
+ struct nfsd4_session *cn_session;
+/* CDFC4_FORE, CDFC4_BACK: */
+ unsigned char cn_flags;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -169,6 +171,9 @@ struct nfsd4_session {
struct nfs4_sessionid se_sessionid;
struct nfsd4_channel_attrs se_fchannel;
struct nfsd4_channel_attrs se_bchannel;
+ struct list_head se_conns;
+ u32 se_cb_prog;
+ u32 se_cb_seq_nr;
struct nfsd4_slot *se_slots[]; /* forward channel slots */
};
@@ -221,24 +226,32 @@ struct nfs4_client {
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
u32 cl_firststate; /* recovery dir creation */
+ u32 cl_minorversion;
/* for v4.0 and v4.1 callbacks: */
struct nfs4_cb_conn cl_cb_conn;
+#define NFSD4_CLIENT_CB_UPDATE 1
+#define NFSD4_CLIENT_KILL 2
+ unsigned long cl_cb_flags;
struct rpc_clnt *cl_cb_client;
+ u32 cl_cb_ident;
atomic_t cl_cb_set;
+ struct nfsd4_callback cl_cb_null;
+ struct nfsd4_session *cl_cb_session;
+
+ /* for all client information that callback code might need: */
+ spinlock_t cl_lock;
/* for nfs41 */
struct list_head cl_sessions;
struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
u32 cl_exchange_flags;
- struct nfs4_sessionid cl_sessionid;
/* number of rpc's in progress over an associated session: */
atomic_t cl_refcount;
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
unsigned long cl_cb_slot_busy;
- u32 cl_cb_seq_nr;
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
};
@@ -440,12 +453,13 @@ extern int nfs4_in_grace(void);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void nfs4_free_stateowner(struct kref *kref);
extern int set_callback_cred(void);
-extern void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
+extern void nfsd4_probe_callback(struct nfs4_client *clp);
+extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
extern void nfsd4_do_callback_rpc(struct work_struct *);
extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
-extern void nfsd4_set_callback_client(struct nfs4_client *, struct rpc_clnt *);
+extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(char *recdir_name);
diff --git a/fs/nilfs2/Makefile b/fs/nilfs2/Makefile
index df3e62c1ddc..85c98737a14 100644
--- a/fs/nilfs2/Makefile
+++ b/fs/nilfs2/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_NILFS2_FS) += nilfs2.o
nilfs2-y := inode.o file.o dir.o super.o namei.o page.o mdt.o \
btnode.o bmap.o btree.o direct.o dat.o recovery.o \
the_nilfs.o segbuf.o segment.o cpfile.o sufile.o \
- ifile.o alloc.o gcinode.o ioctl.o gcdat.o
+ ifile.o alloc.o gcinode.o ioctl.o
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 3dbdc1d356b..8b782b062ba 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -533,18 +533,20 @@ void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
nilfs_btree_init_gc(bmap);
}
-void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
+void nilfs_bmap_save(const struct nilfs_bmap *bmap,
+ struct nilfs_bmap_store *store)
{
- memcpy(gcbmap, bmap, sizeof(*bmap));
- init_rwsem(&gcbmap->b_sem);
- lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
- gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode;
+ memcpy(store->data, bmap->b_u.u_data, sizeof(store->data));
+ store->last_allocated_key = bmap->b_last_allocated_key;
+ store->last_allocated_ptr = bmap->b_last_allocated_ptr;
+ store->state = bmap->b_state;
}
-void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
+void nilfs_bmap_restore(struct nilfs_bmap *bmap,
+ const struct nilfs_bmap_store *store)
{
- memcpy(bmap, gcbmap, sizeof(*bmap));
- init_rwsem(&bmap->b_sem);
- lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
- bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
+ memcpy(bmap->b_u.u_data, store->data, sizeof(store->data));
+ bmap->b_last_allocated_key = store->last_allocated_key;
+ bmap->b_last_allocated_ptr = store->last_allocated_ptr;
+ bmap->b_state = store->state;
}
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index a20569b1992..bde1c0aa2e1 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -135,6 +135,12 @@ struct nilfs_bmap {
/* state */
#define NILFS_BMAP_DIRTY 0x00000001
+struct nilfs_bmap_store {
+ __le64 data[NILFS_BMAP_SIZE / sizeof(__le64)];
+ __u64 last_allocated_key;
+ __u64 last_allocated_ptr;
+ int state;
+};
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *);
int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *);
@@ -153,9 +159,9 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *, __u64, int, __u64 *);
int nilfs_bmap_mark(struct nilfs_bmap *, __u64, int);
void nilfs_bmap_init_gc(struct nilfs_bmap *);
-void nilfs_bmap_init_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
-void nilfs_bmap_commit_gcdat(struct nilfs_bmap *, struct nilfs_bmap *);
+void nilfs_bmap_save(const struct nilfs_bmap *, struct nilfs_bmap_store *);
+void nilfs_bmap_restore(struct nilfs_bmap *, const struct nilfs_bmap_store *);
static inline int nilfs_bmap_lookup(struct nilfs_bmap *bmap, __u64 key,
__u64 *ptr)
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index f78ab1044d1..5115814cb74 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -37,15 +37,7 @@
void nilfs_btnode_cache_init_once(struct address_space *btnc)
{
- memset(btnc, 0, sizeof(*btnc));
- INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
- spin_lock_init(&btnc->tree_lock);
- INIT_LIST_HEAD(&btnc->private_list);
- spin_lock_init(&btnc->private_lock);
-
- spin_lock_init(&btnc->i_mmap_lock);
- INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
- INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
+ nilfs_mapping_init_once(btnc);
}
static const struct address_space_operations def_btnode_aops = {
@@ -55,12 +47,7 @@ static const struct address_space_operations def_btnode_aops = {
void nilfs_btnode_cache_init(struct address_space *btnc,
struct backing_dev_info *bdi)
{
- btnc->host = NULL; /* can safely set to host inode ? */
- btnc->flags = 0;
- mapping_set_gfp_mask(btnc, GFP_NOFS);
- btnc->assoc_mapping = NULL;
- btnc->backing_dev_info = bdi;
- btnc->a_ops = &def_btnode_aops;
+ nilfs_mapping_init(btnc, bdi, &def_btnode_aops);
}
void nilfs_btnode_cache_clear(struct address_space *btnc)
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 18737818db6..5ff15a8a102 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -863,26 +863,19 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
*/
int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
{
- struct the_nilfs *nilfs;
int ret;
- nilfs = NILFS_MDT(cpfile)->mi_nilfs;
-
switch (mode) {
case NILFS_CHECKPOINT:
- /*
- * Check for protecting existing snapshot mounts:
- * ns_mount_mutex is used to make this operation atomic and
- * exclusive with a new mount job. Though it doesn't cover
- * umount, it's enough for the purpose.
- */
- if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
- /* Current implementation does not have to protect
- plain read-only mounts since they are exclusive
- with a read/write mount and are protected from the
- cleaner. */
+ if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno))
+ /*
+ * Current implementation does not have to protect
+ * plain read-only mounts since they are exclusive
+ * with a read/write mount and are protected from the
+ * cleaner.
+ */
ret = -EBUSY;
- } else
+ else
ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
return ret;
case NILFS_SNAPSHOT:
@@ -933,27 +926,40 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
}
/**
- * nilfs_cpfile_read - read cpfile inode
- * @cpfile: cpfile inode
- * @raw_inode: on-disk cpfile inode
- */
-int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
-{
- return nilfs_read_inode_common(cpfile, raw_inode);
-}
-
-/**
- * nilfs_cpfile_new - create cpfile
- * @nilfs: nilfs object
+ * nilfs_cpfile_read - read or get cpfile inode
+ * @sb: super block instance
* @cpsize: size of a checkpoint entry
+ * @raw_inode: on-disk cpfile inode
+ * @inodep: buffer to store the inode
*/
-struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
+int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
+ struct nilfs_inode *raw_inode, struct inode **inodep)
{
struct inode *cpfile;
+ int err;
+
+ cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO);
+ if (unlikely(!cpfile))
+ return -ENOMEM;
+ if (!(cpfile->i_state & I_NEW))
+ goto out;
+
+ err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0);
+ if (err)
+ goto failed;
- cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
- if (cpfile)
- nilfs_mdt_set_entry_size(cpfile, cpsize,
- sizeof(struct nilfs_cpfile_header));
- return cpfile;
+ nilfs_mdt_set_entry_size(cpfile, cpsize,
+ sizeof(struct nilfs_cpfile_header));
+
+ err = nilfs_read_inode_common(cpfile, raw_inode);
+ if (err)
+ goto failed;
+
+ unlock_new_inode(cpfile);
+ out:
+ *inodep = cpfile;
+ return 0;
+ failed:
+ iget_failed(cpfile);
+ return err;
}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index bc0809e0ab4..a242b9a314f 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -40,7 +40,7 @@ int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
size_t);
-int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
-struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
+int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
+ struct nilfs_inode *raw_inode, struct inode **inodep);
#endif /* _NILFS_CPFILE_H */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 01314675568..49c844dab33 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -36,6 +36,7 @@
struct nilfs_dat_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
+ struct nilfs_shadow_map shadow;
};
static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
@@ -102,7 +103,8 @@ void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
nilfs_palloc_abort_alloc_entry(dat, req);
}
-void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req)
+static void nilfs_dat_commit_free(struct inode *dat,
+ struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
void *kaddr;
@@ -327,6 +329,23 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
if (ret < 0)
return ret;
+
+ /*
+ * The given disk block number (blocknr) is not yet written to
+ * the device at this point.
+ *
+ * To prevent nilfs_dat_translate() from returning the
+ * uncommited block number, this makes a copy of the entry
+ * buffer and redirects nilfs_dat_translate() to the copy.
+ */
+ if (!buffer_nilfs_redirected(entry_bh)) {
+ ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
+ if (ret) {
+ brelse(entry_bh);
+ return ret;
+ }
+ }
+
kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
@@ -371,7 +390,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
*/
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
{
- struct buffer_head *entry_bh;
+ struct buffer_head *entry_bh, *bh;
struct nilfs_dat_entry *entry;
sector_t blocknr;
void *kaddr;
@@ -381,6 +400,15 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
if (ret < 0)
return ret;
+ if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
+ bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
+ if (bh) {
+ WARN_ON(!buffer_uptodate(bh));
+ brelse(entry_bh);
+ entry_bh = bh;
+ }
+ }
+
kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
blocknr = le64_to_cpu(entry->de_blocknr);
@@ -436,38 +464,48 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
}
/**
- * nilfs_dat_read - read dat inode
- * @dat: dat inode
- * @raw_inode: on-disk dat inode
- */
-int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
-{
- return nilfs_read_inode_common(dat, raw_inode);
-}
-
-/**
- * nilfs_dat_new - create dat file
- * @nilfs: nilfs object
+ * nilfs_dat_read - read or get dat inode
+ * @sb: super block instance
* @entry_size: size of a dat entry
+ * @raw_inode: on-disk dat inode
+ * @inodep: buffer to store the inode
*/
-struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
+int nilfs_dat_read(struct super_block *sb, size_t entry_size,
+ struct nilfs_inode *raw_inode, struct inode **inodep)
{
static struct lock_class_key dat_lock_key;
struct inode *dat;
struct nilfs_dat_info *di;
int err;
- dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
- if (dat) {
- err = nilfs_palloc_init_blockgroup(dat, entry_size);
- if (unlikely(err)) {
- nilfs_mdt_destroy(dat);
- return NULL;
- }
+ dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
+ if (unlikely(!dat))
+ return -ENOMEM;
+ if (!(dat->i_state & I_NEW))
+ goto out;
- di = NILFS_DAT_I(dat);
- lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
- nilfs_palloc_setup_cache(dat, &di->palloc_cache);
- }
- return dat;
+ err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
+ if (err)
+ goto failed;
+
+ err = nilfs_palloc_init_blockgroup(dat, entry_size);
+ if (err)
+ goto failed;
+
+ di = NILFS_DAT_I(dat);
+ lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
+ nilfs_palloc_setup_cache(dat, &di->palloc_cache);
+ nilfs_mdt_setup_shadow_map(dat, &di->shadow);
+
+ err = nilfs_read_inode_common(dat, raw_inode);
+ if (err)
+ goto failed;
+
+ unlock_new_inode(dat);
+ out:
+ *inodep = dat;
+ return 0;
+ failed:
+ iget_failed(dat);
+ return err;
}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index d31c3aab0ef..cbd8e973250 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -53,7 +53,7 @@ int nilfs_dat_freev(struct inode *, __u64 *, size_t);
int nilfs_dat_move(struct inode *, __u64, sector_t);
ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
-int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
-struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
+int nilfs_dat_read(struct super_block *sb, size_t entry_size,
+ struct nilfs_inode *raw_inode, struct inode **inodep);
#endif /* _NILFS_DAT_H */
diff --git a/fs/nilfs2/export.h b/fs/nilfs2/export.h
new file mode 100644
index 00000000000..a71cc412b65
--- /dev/null
+++ b/fs/nilfs2/export.h
@@ -0,0 +1,17 @@
+#ifndef NILFS_EXPORT_H
+#define NILFS_EXPORT_H
+
+#include <linux/exportfs.h>
+
+extern const struct export_operations nilfs_export_ops;
+
+struct nilfs_fid {
+ u64 cno;
+ u64 ino;
+ u32 gen;
+
+ u32 parent_gen;
+ u64 parent_ino;
+} __attribute__ ((packed));
+
+#endif
diff --git a/fs/nilfs2/gcdat.c b/fs/nilfs2/gcdat.c
deleted file mode 100644
index 84a45d1d546..00000000000
--- a/fs/nilfs2/gcdat.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * gcdat.c - NILFS shadow DAT inode for GC
- *
- * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>,
- * and Ryusuke Konishi <ryusuke@osrg.net>.
- *
- */
-
-#include <linux/buffer_head.h>
-#include "nilfs.h"
-#include "page.h"
-#include "mdt.h"
-
-int nilfs_init_gcdat_inode(struct the_nilfs *nilfs)
-{
- struct inode *dat = nilfs->ns_dat, *gcdat = nilfs->ns_gc_dat;
- struct nilfs_inode_info *dii = NILFS_I(dat), *gii = NILFS_I(gcdat);
- int err;
-
- gcdat->i_state = 0;
- gcdat->i_blocks = dat->i_blocks;
- gii->i_flags = dii->i_flags;
- gii->i_state = dii->i_state | (1 << NILFS_I_GCDAT);
- gii->i_cno = 0;
- nilfs_bmap_init_gcdat(gii->i_bmap, dii->i_bmap);
- err = nilfs_copy_dirty_pages(gcdat->i_mapping, dat->i_mapping);
- if (unlikely(err))
- return err;
-
- return nilfs_copy_dirty_pages(&gii->i_btnode_cache,
- &dii->i_btnode_cache);
-}
-
-void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs)
-{
- struct inode *dat = nilfs->ns_dat, *gcdat = nilfs->ns_gc_dat;
- struct nilfs_inode_info *dii = NILFS_I(dat), *gii = NILFS_I(gcdat);
- struct address_space *mapping = dat->i_mapping;
- struct address_space *gmapping = gcdat->i_mapping;
-
- down_write(&NILFS_MDT(dat)->mi_sem);
- dat->i_blocks = gcdat->i_blocks;
- dii->i_flags = gii->i_flags;
- dii->i_state = gii->i_state & ~(1 << NILFS_I_GCDAT);
-
- nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap);
-
- nilfs_palloc_clear_cache(dat);
- nilfs_palloc_clear_cache(gcdat);
- nilfs_clear_dirty_pages(mapping);
- nilfs_copy_back_pages(mapping, gmapping);
- /* note: mdt dirty flags should be cleared by segctor. */
-
- nilfs_clear_dirty_pages(&dii->i_btnode_cache);
- nilfs_copy_back_pages(&dii->i_btnode_cache, &gii->i_btnode_cache);
-
- up_write(&NILFS_MDT(dat)->mi_sem);
-}
-
-void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs)
-{
- struct inode *gcdat = nilfs->ns_gc_dat;
- struct nilfs_inode_info *gii = NILFS_I(gcdat);
-
- gcdat->i_state = I_FREEING | I_CLEAR;
- gii->i_flags = 0;
-
- nilfs_palloc_clear_cache(gcdat);
- truncate_inode_pages(gcdat->i_mapping, 0);
- truncate_inode_pages(&gii->i_btnode_cache, 0);
-}
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index bed3a783129..33ad25ddd5c 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -28,13 +28,6 @@
* gcinodes), and this file provides lookup function of the dummy
* inodes and their buffer read function.
*
- * Since NILFS2 keeps up multiple checkpoints/snapshots across GC, it
- * has to treat blocks that belong to a same file but have different
- * checkpoint numbers. To avoid interference among generations, dummy
- * inodes are managed separately from actual inodes, and their lookup
- * function (nilfs_gc_iget) is designed to be specified with a
- * checkpoint number argument as well as an inode number.
- *
* Buffers and pages held by the dummy inodes will be released each
* time after they are copied to a new log. Dirty blocks made on the
* current generation and the blocks to be moved by GC never overlap
@@ -175,125 +168,46 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
}
nilfs_btnode_mark_dirty(bh);
} else {
- nilfs_mdt_mark_buffer_dirty(bh);
+ nilfs_mark_buffer_dirty(bh);
}
return 0;
}
-/*
- * nilfs_init_gccache() - allocate and initialize gc_inode hash table
- * @nilfs - the_nilfs
- *
- * Return Value: On success, 0.
- * On error, a negative error code is returned.
- */
-int nilfs_init_gccache(struct the_nilfs *nilfs)
+int nilfs_init_gcinode(struct inode *inode)
{
- int loop;
-
- BUG_ON(nilfs->ns_gc_inodes_h);
-
- INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
-
- nilfs->ns_gc_inodes_h =
- kmalloc(sizeof(struct hlist_head) * NILFS_GCINODE_HASH_SIZE,
- GFP_NOFS);
- if (nilfs->ns_gc_inodes_h == NULL)
- return -ENOMEM;
-
- for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++)
- INIT_HLIST_HEAD(&nilfs->ns_gc_inodes_h[loop]);
- return 0;
-}
-
-/*
- * nilfs_destroy_gccache() - free gc_inode hash table
- * @nilfs - the nilfs
- */
-void nilfs_destroy_gccache(struct the_nilfs *nilfs)
-{
- if (nilfs->ns_gc_inodes_h) {
- nilfs_remove_all_gcinode(nilfs);
- kfree(nilfs->ns_gc_inodes_h);
- nilfs->ns_gc_inodes_h = NULL;
- }
-}
-
-static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
- __u64 cno)
-{
- struct inode *inode;
- struct nilfs_inode_info *ii;
-
- inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
- if (!inode)
- return NULL;
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
- inode->i_op = NULL;
- inode->i_fop = NULL;
+ inode->i_mode = S_IFREG;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
inode->i_mapping->a_ops = &def_gcinode_aops;
+ inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
- ii = NILFS_I(inode);
- ii->i_cno = cno;
ii->i_flags = 0;
- ii->i_state = 1 << NILFS_I_GCINODE;
- ii->i_bh = NULL;
nilfs_bmap_init_gc(ii->i_bmap);
- return inode;
-}
-
-static unsigned long ihash(ino_t ino, __u64 cno)
-{
- return hash_long((unsigned long)((ino << 2) + cno),
- NILFS_GCINODE_HASH_BITS);
-}
-
-/*
- * nilfs_gc_iget() - find or create gc inode with specified (ino,cno)
- */
-struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
-{
- struct hlist_head *head = nilfs->ns_gc_inodes_h + ihash(ino, cno);
- struct hlist_node *node;
- struct inode *inode;
-
- hlist_for_each_entry(inode, node, head, i_hash) {
- if (inode->i_ino == ino && NILFS_I(inode)->i_cno == cno)
- return inode;
- }
+ /*
+ * Add the inode to GC inode list. Garbage Collection
+ * is serialized and no two processes manipulate the
+ * list simultaneously.
+ */
+ igrab(inode);
+ list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes);
- inode = alloc_gcinode(nilfs, ino, cno);
- if (likely(inode)) {
- hlist_add_head(&inode->i_hash, head);
- list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes);
- }
- return inode;
-}
-
-/*
- * nilfs_clear_gcinode() - clear and free a gc inode
- */
-void nilfs_clear_gcinode(struct inode *inode)
-{
- nilfs_mdt_destroy(inode);
+ return 0;
}
-/*
- * nilfs_remove_all_gcinode() - remove all inodes from the_nilfs
+/**
+ * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes
*/
-void nilfs_remove_all_gcinode(struct the_nilfs *nilfs)
+void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
{
- struct hlist_head *head = nilfs->ns_gc_inodes_h;
- struct hlist_node *node, *n;
- struct inode *inode;
- int loop;
+ struct list_head *head = &nilfs->ns_gc_inodes;
+ struct nilfs_inode_info *ii;
- for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++, head++) {
- hlist_for_each_entry_safe(inode, node, n, head, i_hash) {
- hlist_del_init(&inode->i_hash);
- list_del_init(&NILFS_I(inode)->i_dirty);
- nilfs_clear_gcinode(inode); /* might sleep */
- }
+ while (!list_empty(head)) {
+ ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
+ list_del_init(&ii->i_dirty);
+ iput(&ii->vfs_inode);
}
}
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 922d9dd42c8..9f8a2da67f9 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -161,25 +161,46 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
}
/**
- * nilfs_ifile_new - create inode file
- * @sbi: nilfs_sb_info struct
+ * nilfs_ifile_read - read or get ifile inode
+ * @sb: super block instance
+ * @root: root object
* @inode_size: size of an inode
+ * @raw_inode: on-disk ifile inode
+ * @inodep: buffer to store the inode
*/
-struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size)
+int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
+ size_t inode_size, struct nilfs_inode *raw_inode,
+ struct inode **inodep)
{
struct inode *ifile;
int err;
- ifile = nilfs_mdt_new(sbi->s_nilfs, sbi->s_super, NILFS_IFILE_INO,
- sizeof(struct nilfs_ifile_info));
- if (ifile) {
- err = nilfs_palloc_init_blockgroup(ifile, inode_size);
- if (unlikely(err)) {
- nilfs_mdt_destroy(ifile);
- return NULL;
- }
- nilfs_palloc_setup_cache(ifile,
- &NILFS_IFILE_I(ifile)->palloc_cache);
- }
- return ifile;
+ ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO);
+ if (unlikely(!ifile))
+ return -ENOMEM;
+ if (!(ifile->i_state & I_NEW))
+ goto out;
+
+ err = nilfs_mdt_init(ifile, NILFS_MDT_GFP,
+ sizeof(struct nilfs_ifile_info));
+ if (err)
+ goto failed;
+
+ err = nilfs_palloc_init_blockgroup(ifile, inode_size);
+ if (err)
+ goto failed;
+
+ nilfs_palloc_setup_cache(ifile, &NILFS_IFILE_I(ifile)->palloc_cache);
+
+ err = nilfs_read_inode_common(ifile, raw_inode);
+ if (err)
+ goto failed;
+
+ unlock_new_inode(ifile);
+ out:
+ *inodep = ifile;
+ return 0;
+ failed:
+ iget_failed(ifile);
+ return err;
}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index cbca32e498f..59b6f2b51df 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -49,6 +49,8 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **);
int nilfs_ifile_delete_inode(struct inode *, ino_t);
int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
-struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size);
+int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root,
+ size_t inode_size, struct nilfs_inode *raw_inode,
+ struct inode **inodep);
#endif /* _NILFS_IFILE_H */
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index eccb2f2e231..71d4bc8464e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -34,6 +34,12 @@
#include "cpfile.h"
#include "ifile.h"
+struct nilfs_iget_args {
+ u64 ino;
+ __u64 cno;
+ struct nilfs_root *root;
+ int for_gc;
+};
/**
* nilfs_get_block() - get a file block on the filesystem (callback function)
@@ -279,6 +285,7 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
struct nilfs_sb_info *sbi = NILFS_SB(sb);
struct inode *inode;
struct nilfs_inode_info *ii;
+ struct nilfs_root *root;
int err = -ENOMEM;
ino_t ino;
@@ -289,15 +296,17 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+ root = NILFS_I(dir)->i_root;
ii = NILFS_I(inode);
ii->i_state = 1 << NILFS_I_NEW;
+ ii->i_root = root;
- err = nilfs_ifile_create_inode(sbi->s_ifile, &ino, &ii->i_bh);
+ err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
if (unlikely(err))
goto failed_ifile_create_inode;
/* reference count of i_bh inherits from nilfs_mdt_read_block() */
- atomic_inc(&sbi->s_inodes_count);
+ atomic_inc(&root->inodes_count);
inode_init_owner(inode, dir, mode);
inode->i_ino = ino;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
@@ -320,7 +329,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
/* ii->i_file_acl = 0; */
/* ii->i_dir_acl = 0; */
ii->i_dir_start_lookup = 0;
- ii->i_cno = 0;
nilfs_set_inode_flags(inode);
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
@@ -350,16 +358,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
return ERR_PTR(err);
}
-void nilfs_free_inode(struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
-
- /* XXX: check error code? Is there any thing I can do? */
- (void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
- atomic_dec(&sbi->s_inodes_count);
-}
-
void nilfs_set_inode_flags(struct inode *inode)
{
unsigned int flags = NILFS_I(inode)->i_flags;
@@ -410,7 +408,6 @@ int nilfs_read_inode_common(struct inode *inode,
0 : le32_to_cpu(raw_inode->i_dir_acl);
#endif
ii->i_dir_start_lookup = 0;
- ii->i_cno = 0;
inode->i_generation = le32_to_cpu(raw_inode->i_generation);
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
@@ -424,7 +421,8 @@ int nilfs_read_inode_common(struct inode *inode,
return 0;
}
-static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
+static int __nilfs_read_inode(struct super_block *sb,
+ struct nilfs_root *root, unsigned long ino,
struct inode *inode)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
@@ -434,11 +432,11 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
int err;
down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
- err = nilfs_ifile_get_inode_block(sbi->s_ifile, ino, &bh);
+ err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
if (unlikely(err))
goto bad_inode;
- raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, bh);
+ raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
err = nilfs_read_inode_common(inode, raw_inode);
if (err)
@@ -461,14 +459,14 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
inode, inode->i_mode,
huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
}
- nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
+ nilfs_ifile_unmap_inode(root->ifile, ino, bh);
brelse(bh);
up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
nilfs_set_inode_flags(inode);
return 0;
failed_unmap:
- nilfs_ifile_unmap_inode(sbi->s_ifile, ino, bh);
+ nilfs_ifile_unmap_inode(root->ifile, ino, bh);
brelse(bh);
bad_inode:
@@ -476,18 +474,95 @@ static int __nilfs_read_inode(struct super_block *sb, unsigned long ino,
return err;
}
-struct inode *nilfs_iget(struct super_block *sb, unsigned long ino)
+static int nilfs_iget_test(struct inode *inode, void *opaque)
+{
+ struct nilfs_iget_args *args = opaque;
+ struct nilfs_inode_info *ii;
+
+ if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
+ return 0;
+
+ ii = NILFS_I(inode);
+ if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
+ return !args->for_gc;
+
+ return args->for_gc && args->cno == ii->i_cno;
+}
+
+static int nilfs_iget_set(struct inode *inode, void *opaque)
+{
+ struct nilfs_iget_args *args = opaque;
+
+ inode->i_ino = args->ino;
+ if (args->for_gc) {
+ NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
+ NILFS_I(inode)->i_cno = args->cno;
+ NILFS_I(inode)->i_root = NULL;
+ } else {
+ if (args->root && args->ino == NILFS_ROOT_INO)
+ nilfs_get_root(args->root);
+ NILFS_I(inode)->i_root = args->root;
+ }
+ return 0;
+}
+
+struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino)
+{
+ struct nilfs_iget_args args = {
+ .ino = ino, .root = root, .cno = 0, .for_gc = 0
+ };
+
+ return ilookup5(sb, ino, nilfs_iget_test, &args);
+}
+
+struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino)
+{
+ struct nilfs_iget_args args = {
+ .ino = ino, .root = root, .cno = 0, .for_gc = 0
+ };
+
+ return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
+}
+
+struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino)
{
struct inode *inode;
int err;
- inode = iget_locked(sb, ino);
+ inode = nilfs_iget_locked(sb, root, ino);
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
- err = __nilfs_read_inode(sb, ino, inode);
+ err = __nilfs_read_inode(sb, root, ino, inode);
+ if (unlikely(err)) {
+ iget_failed(inode);
+ return ERR_PTR(err);
+ }
+ unlock_new_inode(inode);
+ return inode;
+}
+
+struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
+ __u64 cno)
+{
+ struct nilfs_iget_args args = {
+ .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
+ };
+ struct inode *inode;
+ int err;
+
+ inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
+ if (unlikely(!inode))
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ err = nilfs_init_gcinode(inode);
if (unlikely(err)) {
iget_failed(inode);
return ERR_PTR(err);
@@ -528,21 +603,20 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
{
ino_t ino = inode->i_ino;
struct nilfs_inode_info *ii = NILFS_I(inode);
- struct super_block *sb = inode->i_sb;
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct inode *ifile = ii->i_root->ifile;
struct nilfs_inode *raw_inode;
- raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
+ raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
- memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
+ memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
nilfs_write_inode_common(inode, raw_inode, 0);
/* XXX: call with has_bmap = 0 is a workaround to avoid
deadlock of bmap. This delays update of i_bmap to just
before writing */
- nilfs_ifile_unmap_inode(sbi->s_ifile, ino, ibh);
+ nilfs_ifile_unmap_inode(ifile, ino, ibh);
}
#define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
@@ -617,6 +691,7 @@ void nilfs_truncate(struct inode *inode)
static void nilfs_clear_inode(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
/*
* Free resources allocated in nilfs_read_inode(), here.
@@ -625,10 +700,16 @@ static void nilfs_clear_inode(struct inode *inode)
brelse(ii->i_bh);
ii->i_bh = NULL;
+ if (mdi && mdi->mi_palloc_cache)
+ nilfs_palloc_destroy_cache(inode);
+
if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+
+ if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
+ nilfs_put_root(ii->i_root);
}
void nilfs_evict_inode(struct inode *inode)
@@ -637,7 +718,7 @@ void nilfs_evict_inode(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
- if (inode->i_nlink || unlikely(is_bad_inode(inode))) {
+ if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
@@ -649,12 +730,16 @@ void nilfs_evict_inode(struct inode *inode)
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
+ /* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
end_writeback(inode);
+
+ nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
+ atomic_dec(&ii->i_root->inodes_count);
+
nilfs_clear_inode(inode);
- nilfs_free_inode(inode);
- /* nilfs_free_inode() marks inode buffer dirty */
+
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
nilfs_transaction_commit(sb);
@@ -700,6 +785,17 @@ out_err:
return err;
}
+int nilfs_permission(struct inode *inode, int mask)
+{
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+
+ if ((mask & MAY_WRITE) && root &&
+ root->cno != NILFS_CPTREE_CURRENT_CNO)
+ return -EROFS; /* snapshot is not writable */
+
+ return generic_permission(inode, mask, NULL);
+}
+
int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
struct buffer_head **pbh)
{
@@ -709,8 +805,8 @@ int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
spin_lock(&sbi->s_inode_lock);
if (ii->i_bh == NULL) {
spin_unlock(&sbi->s_inode_lock);
- err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
- pbh);
+ err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
+ inode->i_ino, pbh);
if (unlikely(err))
return err;
spin_lock(&sbi->s_inode_lock);
@@ -790,7 +886,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
}
nilfs_update_inode(inode, ibh);
nilfs_mdt_mark_buffer_dirty(ibh);
- nilfs_mdt_mark_dirty(sbi->s_ifile);
+ nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
brelse(ibh);
return 0;
}
@@ -808,6 +904,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
void nilfs_dirty_inode(struct inode *inode)
{
struct nilfs_transaction_info ti;
+ struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
if (is_bad_inode(inode)) {
nilfs_warning(inode->i_sb, __func__,
@@ -815,6 +912,10 @@ void nilfs_dirty_inode(struct inode *inode)
dump_stack();
return;
}
+ if (mdi) {
+ nilfs_mdt_mark_dirty(inode);
+ return;
+ }
nilfs_transaction_begin(inode->i_sb, &ti, 0);
nilfs_mark_inode_dirty(inode);
nilfs_transaction_commit(inode->i_sb); /* never fails */
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f90a33d9a5b..3e90f86d5bf 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -22,7 +22,6 @@
#include <linux/fs.h>
#include <linux/wait.h>
-#include <linux/smp_lock.h> /* lock_kernel(), unlock_kernel() */
#include <linux/slab.h>
#include <linux/capability.h> /* capable() */
#include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */
@@ -118,7 +117,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
goto out;
- mutex_lock(&nilfs->ns_mount_mutex);
+ down_read(&inode->i_sb->s_umount);
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
@@ -128,7 +127,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
- mutex_unlock(&nilfs->ns_mount_mutex);
+ up_read(&inode->i_sb->s_umount);
out:
mnt_drop_write(filp->f_path.mnt);
return ret;
@@ -334,7 +333,7 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
return 0;
}
-static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs,
+static int nilfs_ioctl_move_blocks(struct super_block *sb,
struct nilfs_argv *argv, void *buf)
{
size_t nmembs = argv->v_nmembs;
@@ -349,7 +348,7 @@ static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs,
for (i = 0, vdesc = buf; i < nmembs; ) {
ino = vdesc->vd_ino;
cno = vdesc->vd_cno;
- inode = nilfs_gc_iget(nilfs, ino, cno);
+ inode = nilfs_iget_for_gc(sb, ino, cno);
if (unlikely(inode == NULL)) {
ret = -ENOMEM;
goto failed;
@@ -357,11 +356,15 @@ static int nilfs_ioctl_move_blocks(struct the_nilfs *nilfs,
do {
ret = nilfs_ioctl_move_inode_block(inode, vdesc,
&buffers);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ iput(inode);
goto failed;
+ }
vdesc++;
} while (++i < nmembs &&
vdesc->vd_ino == ino && vdesc->vd_cno == cno);
+
+ iput(inode); /* The inode still remains in GC inode list */
}
list_for_each_entry_safe(bh, n, &buffers, b_assoc_buffers) {
@@ -567,7 +570,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
}
/*
- * nilfs_ioctl_move_blocks() will call nilfs_gc_iget(),
+ * nilfs_ioctl_move_blocks() will call nilfs_iget_for_gc(),
* which will operates an inode list without blocking.
* To protect the list from concurrent operations,
* nilfs_ioctl_move_blocks should be atomic operation.
@@ -577,15 +580,16 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
goto out_free;
}
- ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], kbufs[0]);
+ vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+ ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
if (ret < 0)
printk(KERN_ERR "NILFS: GC failed during preparation: "
"cannot read source blocks: err=%d\n", ret);
else
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
- if (ret < 0)
- nilfs_remove_all_gcinode(nilfs);
+ nilfs_remove_all_gcinodes(nilfs);
clear_nilfs_gc_running(nilfs);
out_free:
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index d01aff4957d..39a5b84e2c9 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -36,7 +36,6 @@
#define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
-#define INIT_UNUSED_INODE_FIELDS
static int
nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
@@ -78,25 +77,11 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
struct buffer_head *,
void *))
{
- struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
struct super_block *sb = inode->i_sb;
struct nilfs_transaction_info ti;
struct buffer_head *bh;
int err;
- if (!sb) {
- /*
- * Make sure this function is not called from any
- * read-only context.
- */
- if (!nilfs->ns_writer) {
- WARN_ON(1);
- err = -EROFS;
- goto out;
- }
- sb = nilfs->ns_writer->s_super;
- }
-
nilfs_transaction_begin(sb, &ti, 0);
err = -ENOMEM;
@@ -112,7 +97,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
if (buffer_uptodate(bh))
goto failed_bh;
- bh->b_bdev = nilfs->ns_bdev;
+ bh->b_bdev = sb->s_bdev;
err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
if (likely(!err)) {
get_bh(bh);
@@ -129,7 +114,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
err = nilfs_transaction_commit(sb);
else
nilfs_transaction_abort(sb);
- out:
+
return err;
}
@@ -167,9 +152,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
unlock_buffer(bh);
goto failed_bh;
}
- bh->b_bdev = NILFS_MDT(inode)->mi_nilfs->ns_bdev;
- bh->b_blocknr = (sector_t)blknum;
- set_buffer_mapped(bh);
+ map_bh(bh, inode->i_sb, (sector_t)blknum);
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
@@ -398,35 +381,24 @@ int nilfs_mdt_fetch_dirty(struct inode *inode)
static int
nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
{
- struct inode *inode = container_of(page->mapping,
- struct inode, i_data);
- struct super_block *sb = inode->i_sb;
- struct the_nilfs *nilfs = NILFS_MDT(inode)->mi_nilfs;
- struct nilfs_sb_info *writer = NULL;
+ struct inode *inode;
+ struct super_block *sb;
int err = 0;
redirty_page_for_writepage(wbc, page);
unlock_page(page);
- if (page->mapping->assoc_mapping)
- return 0; /* Do not request flush for shadow page cache */
- if (!sb) {
- down_read(&nilfs->ns_writer_sem);
- writer = nilfs->ns_writer;
- if (!writer) {
- up_read(&nilfs->ns_writer_sem);
- return -EROFS;
- }
- sb = writer->s_super;
- }
+ inode = page->mapping->host;
+ if (!inode)
+ return 0;
+
+ sb = inode->i_sb;
if (wbc->sync_mode == WB_SYNC_ALL)
err = nilfs_construct_segment(sb);
else if (wbc->for_reclaim)
nilfs_flush_segment(sb, inode->i_ino);
- if (writer)
- up_read(&nilfs->ns_writer_sem);
return err;
}
@@ -439,105 +411,27 @@ static const struct address_space_operations def_mdt_aops = {
static const struct inode_operations def_mdt_iops;
static const struct file_operations def_mdt_fops;
-/*
- * NILFS2 uses pseudo inodes for meta data files such as DAT, cpfile, sufile,
- * ifile, or gcinodes. This allows the B-tree code and segment constructor
- * to treat them like regular files, and this helps to simplify the
- * implementation.
- * On the other hand, some of the pseudo inodes have an irregular point:
- * They don't have valid inode->i_sb pointer because their lifetimes are
- * longer than those of the super block structs; they may continue for
- * several consecutive mounts/umounts. This would need discussions.
- */
-/**
- * nilfs_mdt_new_common - allocate a pseudo inode for metadata file
- * @nilfs: nilfs object
- * @sb: super block instance the metadata file belongs to
- * @ino: inode number
- * @gfp_mask: gfp mask for data pages
- * @objsz: size of the private object attached to inode->i_private
- */
-struct inode *
-nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
- ino_t ino, gfp_t gfp_mask, size_t objsz)
+
+int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
{
- struct inode *inode = nilfs_alloc_inode_common(nilfs);
+ struct nilfs_mdt_info *mi;
- if (!inode)
- return NULL;
- else {
- struct address_space * const mapping = &inode->i_data;
- struct nilfs_mdt_info *mi;
-
- mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
- if (!mi) {
- nilfs_destroy_inode(inode);
- return NULL;
- }
- mi->mi_nilfs = nilfs;
- init_rwsem(&mi->mi_sem);
-
- inode->i_sb = sb; /* sb may be NULL for some meta data files */
- inode->i_blkbits = nilfs->ns_blocksize_bits;
- inode->i_flags = 0;
- atomic_set(&inode->i_count, 1);
- inode->i_nlink = 1;
- inode->i_ino = ino;
- inode->i_mode = S_IFREG;
- inode->i_private = mi;
-
-#ifdef INIT_UNUSED_INODE_FIELDS
- atomic_set(&inode->i_writecount, 0);
- inode->i_size = 0;
- inode->i_blocks = 0;
- inode->i_bytes = 0;
- inode->i_generation = 0;
-#ifdef CONFIG_QUOTA
- memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
-#endif
- inode->i_pipe = NULL;
- inode->i_bdev = NULL;
- inode->i_cdev = NULL;
- inode->i_rdev = 0;
-#ifdef CONFIG_SECURITY
- inode->i_security = NULL;
-#endif
- inode->dirtied_when = 0;
-
- INIT_LIST_HEAD(&inode->i_list);
- INIT_LIST_HEAD(&inode->i_sb_list);
- inode->i_state = 0;
-#endif
-
- spin_lock_init(&inode->i_lock);
- mutex_init(&inode->i_mutex);
- init_rwsem(&inode->i_alloc_sem);
-
- mapping->host = NULL; /* instead of inode */
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, gfp_mask);
- mapping->assoc_mapping = NULL;
- mapping->backing_dev_info = nilfs->ns_bdi;
-
- inode->i_mapping = mapping;
- }
+ mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
+ if (!mi)
+ return -ENOMEM;
- return inode;
-}
+ init_rwsem(&mi->mi_sem);
+ inode->i_private = mi;
-struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
- ino_t ino, size_t objsz)
-{
- struct inode *inode;
-
- inode = nilfs_mdt_new_common(nilfs, sb, ino, NILFS_MDT_GFP, objsz);
- if (!inode)
- return NULL;
+ inode->i_mode = S_IFREG;
+ mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
+ inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
inode->i_op = &def_mdt_iops;
inode->i_fop = &def_mdt_fops;
inode->i_mapping->a_ops = &def_mdt_aops;
- return inode;
+
+ return 0;
}
void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
@@ -550,34 +444,159 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
-void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
+static const struct address_space_operations shadow_map_aops = {
+ .sync_page = block_sync_page,
+};
+
+/**
+ * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
+ * @inode: inode of the metadata file
+ * @shadow: shadow mapping
+ */
+int nilfs_mdt_setup_shadow_map(struct inode *inode,
+ struct nilfs_shadow_map *shadow)
{
- shadow->i_mapping->assoc_mapping = orig->i_mapping;
- NILFS_I(shadow)->i_btnode_cache.assoc_mapping =
- &NILFS_I(orig)->i_btnode_cache;
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct backing_dev_info *bdi = inode->i_sb->s_bdi;
+
+ INIT_LIST_HEAD(&shadow->frozen_buffers);
+ nilfs_mapping_init_once(&shadow->frozen_data);
+ nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
+ nilfs_mapping_init_once(&shadow->frozen_btnodes);
+ nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
+ mi->mi_shadow = shadow;
+ return 0;
}
-static void nilfs_mdt_clear(struct inode *inode)
+/**
+ * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
+ * @inode: inode of the metadata file
+ */
+int nilfs_mdt_save_to_shadow_map(struct inode *inode)
{
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct nilfs_shadow_map *shadow = mi->mi_shadow;
+ int ret;
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
- truncate_inode_pages(inode->i_mapping, 0);
+ ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping);
+ if (ret)
+ goto out;
+
+ ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes,
+ &ii->i_btnode_cache);
+ if (ret)
+ goto out;
- if (test_bit(NILFS_I_BMAP, &ii->i_state))
- nilfs_bmap_clear(ii->i_bmap);
- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+ nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store);
+ out:
+ return ret;
}
-void nilfs_mdt_destroy(struct inode *inode)
+int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
{
- struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+ struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
+ struct buffer_head *bh_frozen;
+ struct page *page;
+ int blkbits = inode->i_blkbits;
+ int ret = -ENOMEM;
+
+ page = grab_cache_page(&shadow->frozen_data, bh->b_page->index);
+ if (!page)
+ return ret;
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << blkbits, 0);
+
+ bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
+ if (bh_frozen) {
+ if (!buffer_uptodate(bh_frozen))
+ nilfs_copy_buffer(bh_frozen, bh);
+ if (list_empty(&bh_frozen->b_assoc_buffers)) {
+ list_add_tail(&bh_frozen->b_assoc_buffers,
+ &shadow->frozen_buffers);
+ set_buffer_nilfs_redirected(bh);
+ } else {
+ brelse(bh_frozen); /* already frozen */
+ }
+ ret = 0;
+ }
+ unlock_page(page);
+ page_cache_release(page);
+ return ret;
+}
+
+struct buffer_head *
+nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
+{
+ struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow;
+ struct buffer_head *bh_frozen = NULL;
+ struct page *page;
+ int n;
+
+ page = find_lock_page(&shadow->frozen_data, bh->b_page->index);
+ if (page) {
+ if (page_has_buffers(page)) {
+ n = bh_offset(bh) >> inode->i_blkbits;
+ bh_frozen = nilfs_page_get_nth_block(page, n);
+ }
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ return bh_frozen;
+}
+
+static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow)
+{
+ struct list_head *head = &shadow->frozen_buffers;
+ struct buffer_head *bh;
+
+ while (!list_empty(head)) {
+ bh = list_first_entry(head, struct buffer_head,
+ b_assoc_buffers);
+ list_del_init(&bh->b_assoc_buffers);
+ brelse(bh); /* drop ref-count to make it releasable */
+ }
+}
+
+/**
+ * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state
+ * @inode: inode of the metadata file
+ */
+void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
+{
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+ struct nilfs_shadow_map *shadow = mi->mi_shadow;
+
+ down_write(&mi->mi_sem);
- if (mdi->mi_palloc_cache)
- nilfs_palloc_destroy_cache(inode);
- nilfs_mdt_clear(inode);
+ if (mi->mi_palloc_cache)
+ nilfs_palloc_clear_cache(inode);
+
+ nilfs_clear_dirty_pages(inode->i_mapping);
+ nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data);
+
+ nilfs_clear_dirty_pages(&ii->i_btnode_cache);
+ nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes);
+
+ nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
+
+ up_write(&mi->mi_sem);
+}
+
+/**
+ * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches
+ * @inode: inode of the metadata file
+ */
+void nilfs_mdt_clear_shadow_map(struct inode *inode)
+{
+ struct nilfs_mdt_info *mi = NILFS_MDT(inode);
+ struct nilfs_shadow_map *shadow = mi->mi_shadow;
- kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
- kfree(mdi);
- nilfs_destroy_inode(inode);
+ down_write(&mi->mi_sem);
+ nilfs_release_frozen_buffers(shadow);
+ truncate_inode_pages(&shadow->frozen_data, 0);
+ truncate_inode_pages(&shadow->frozen_btnodes, 0);
+ up_write(&mi->mi_sem);
}
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 6c4bbb0470f..b13734bf352 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -28,26 +28,33 @@
#include "nilfs.h"
#include "page.h"
+struct nilfs_shadow_map {
+ struct nilfs_bmap_store bmap_store;
+ struct address_space frozen_data;
+ struct address_space frozen_btnodes;
+ struct list_head frozen_buffers;
+};
+
/**
* struct nilfs_mdt_info - on-memory private data of meta data files
- * @mi_nilfs: back pointer to the_nilfs struct
* @mi_sem: reader/writer semaphore for meta data operations
* @mi_bgl: per-blockgroup locking
* @mi_entry_size: size of an entry
* @mi_first_entry_offset: offset to the first entry
* @mi_entries_per_block: number of entries in a block
* @mi_palloc_cache: persistent object allocator cache
+ * @mi_shadow: shadow of bmap and page caches
* @mi_blocks_per_group: number of blocks in a group
* @mi_blocks_per_desc_block: number of blocks per descriptor block
*/
struct nilfs_mdt_info {
- struct the_nilfs *mi_nilfs;
struct rw_semaphore mi_sem;
struct blockgroup_lock *mi_bgl;
unsigned mi_entry_size;
unsigned mi_first_entry_offset;
unsigned long mi_entries_per_block;
struct nilfs_palloc_cache *mi_palloc_cache;
+ struct nilfs_shadow_map *mi_shadow;
unsigned long mi_blocks_per_group;
unsigned long mi_blocks_per_desc_block;
};
@@ -59,9 +66,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
static inline struct the_nilfs *NILFS_I_NILFS(struct inode *inode)
{
- struct super_block *sb = inode->i_sb;
-
- return sb ? NILFS_SB(sb)->s_nilfs : NILFS_MDT(inode)->mi_nilfs;
+ return NILFS_SB(inode->i_sb)->s_nilfs;
}
/* Default GFP flags using highmem */
@@ -76,14 +81,17 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long);
int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
int nilfs_mdt_fetch_dirty(struct inode *);
-struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
- size_t);
-struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
- ino_t, gfp_t, size_t);
-void nilfs_mdt_destroy(struct inode *);
+int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
-void nilfs_mdt_set_shadow(struct inode *, struct inode *);
+int nilfs_mdt_setup_shadow_map(struct inode *inode,
+ struct nilfs_shadow_map *shadow);
+int nilfs_mdt_save_to_shadow_map(struct inode *inode);
+void nilfs_mdt_restore_from_shadow_map(struct inode *inode);
+void nilfs_mdt_clear_shadow_map(struct inode *inode);
+int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh);
+struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
+ struct buffer_head *bh);
#define nilfs_mdt_mark_buffer_dirty(bh) nilfs_mark_buffer_dirty(bh)
@@ -100,7 +108,7 @@ static inline void nilfs_mdt_clear_dirty(struct inode *inode)
static inline __u64 nilfs_mdt_cno(struct inode *inode)
{
- return NILFS_MDT(inode)->mi_nilfs->ns_cno;
+ return NILFS_I_NILFS(inode)->ns_cno;
}
#define nilfs_mdt_bgl_lock(inode, bg) \
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index ad6ed2cf19b..185d1607cb0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -40,7 +40,11 @@
#include <linux/pagemap.h>
#include "nilfs.h"
+#include "export.h"
+#define NILFS_FID_SIZE_NON_CONNECTABLE \
+ (offsetof(struct nilfs_fid, parent_gen) / 4)
+#define NILFS_FID_SIZE_CONNECTABLE (sizeof(struct nilfs_fid) / 4)
static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
{
@@ -70,29 +74,13 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
ino = nilfs_inode_by_name(dir, &dentry->d_name);
inode = NULL;
if (ino) {
- inode = nilfs_iget(dir->i_sb, ino);
+ inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
-struct dentry *nilfs_get_parent(struct dentry *child)
-{
- unsigned long ino;
- struct inode *inode;
- struct qstr dotdot = {.name = "..", .len = 2};
-
- ino = nilfs_inode_by_name(child->d_inode, &dotdot);
- if (!ino)
- return ERR_PTR(-ENOENT);
-
- inode = nilfs_iget(child->d_inode->i_sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
- return d_obtain_alias(inode);
-}
-
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
@@ -468,6 +456,115 @@ out:
return err;
}
+/*
+ * Export operations
+ */
+static struct dentry *nilfs_get_parent(struct dentry *child)
+{
+ unsigned long ino;
+ struct inode *inode;
+ struct qstr dotdot = {.name = "..", .len = 2};
+ struct nilfs_root *root;
+
+ ino = nilfs_inode_by_name(child->d_inode, &dotdot);
+ if (!ino)
+ return ERR_PTR(-ENOENT);
+
+ root = NILFS_I(child->d_inode)->i_root;
+
+ inode = nilfs_iget(child->d_inode->i_sb, root, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return d_obtain_alias(inode);
+}
+
+static struct dentry *nilfs_get_dentry(struct super_block *sb, u64 cno,
+ u64 ino, u32 gen)
+{
+ struct nilfs_root *root;
+ struct inode *inode;
+
+ if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO)
+ return ERR_PTR(-ESTALE);
+
+ root = nilfs_lookup_root(NILFS_SB(sb)->s_nilfs, cno);
+ if (!root)
+ return ERR_PTR(-ESTALE);
+
+ inode = nilfs_iget(sb, root, ino);
+ nilfs_put_root(root);
+
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (gen && inode->i_generation != gen) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
+ return d_obtain_alias(inode);
+}
+
+static struct dentry *nilfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
+ int fh_len, int fh_type)
+{
+ struct nilfs_fid *fid = (struct nilfs_fid *)fh;
+
+ if ((fh_len != NILFS_FID_SIZE_NON_CONNECTABLE &&
+ fh_len != NILFS_FID_SIZE_CONNECTABLE) ||
+ (fh_type != FILEID_NILFS_WITH_PARENT &&
+ fh_type != FILEID_NILFS_WITHOUT_PARENT))
+ return NULL;
+
+ return nilfs_get_dentry(sb, fid->cno, fid->ino, fid->gen);
+}
+
+static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh,
+ int fh_len, int fh_type)
+{
+ struct nilfs_fid *fid = (struct nilfs_fid *)fh;
+
+ if (fh_len != NILFS_FID_SIZE_CONNECTABLE ||
+ fh_type != FILEID_NILFS_WITH_PARENT)
+ return NULL;
+
+ return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);
+}
+
+static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp,
+ int connectable)
+{
+ struct nilfs_fid *fid = (struct nilfs_fid *)fh;
+ struct inode *inode = dentry->d_inode;
+ struct nilfs_root *root = NILFS_I(inode)->i_root;
+ int type;
+
+ if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE ||
+ (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE))
+ return 255;
+
+ fid->cno = root->cno;
+ fid->ino = inode->i_ino;
+ fid->gen = inode->i_generation;
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+
+ spin_lock(&dentry->d_lock);
+ parent = dentry->d_parent->d_inode;
+ fid->parent_ino = parent->i_ino;
+ fid->parent_gen = parent->i_generation;
+ spin_unlock(&dentry->d_lock);
+
+ type = FILEID_NILFS_WITH_PARENT;
+ *lenp = NILFS_FID_SIZE_CONNECTABLE;
+ } else {
+ type = FILEID_NILFS_WITHOUT_PARENT;
+ *lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
+ }
+
+ return type;
+}
+
const struct inode_operations nilfs_dir_inode_operations = {
.create = nilfs_create,
.lookup = nilfs_lookup,
@@ -491,4 +588,12 @@ const struct inode_operations nilfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
+ .permission = nilfs_permission,
+};
+
+const struct export_operations nilfs_export_ops = {
+ .encode_fh = nilfs_encode_fh,
+ .fh_to_dentry = nilfs_fh_to_dentry,
+ .fh_to_parent = nilfs_fh_to_parent,
+ .get_parent = nilfs_get_parent,
};
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index d3d54046e5f..f7560da5a56 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -59,6 +59,7 @@ struct nilfs_inode_info {
#endif
struct buffer_head *i_bh; /* i_bh contains a new or dirty
disk inode */
+ struct nilfs_root *i_root;
struct inode vfs_inode;
};
@@ -100,7 +101,6 @@ enum {
NILFS_I_INODE_DIRTY, /* write_inode is requested */
NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */
- NILFS_I_GCDAT, /* shadow DAT, on memory only */
};
/*
@@ -192,7 +192,7 @@ static inline int nilfs_doing_construction(void)
static inline struct inode *nilfs_dat_inode(const struct the_nilfs *nilfs)
{
- return nilfs_doing_gc() ? nilfs->ns_gc_dat : nilfs->ns_dat;
+ return nilfs->ns_dat;
}
/*
@@ -200,12 +200,9 @@ static inline struct inode *nilfs_dat_inode(const struct the_nilfs *nilfs)
*/
#ifdef CONFIG_NILFS_POSIX_ACL
#error "NILFS: not yet supported POSIX ACL"
-extern int nilfs_permission(struct inode *, int, struct nameidata *);
extern int nilfs_acl_chmod(struct inode *);
extern int nilfs_init_acl(struct inode *, struct inode *);
#else
-#define nilfs_permission NULL
-
static inline int nilfs_acl_chmod(struct inode *inode)
{
return 0;
@@ -247,11 +244,19 @@ extern int nilfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern void nilfs_set_inode_flags(struct inode *);
extern int nilfs_read_inode_common(struct inode *, struct nilfs_inode *);
extern void nilfs_write_inode_common(struct inode *, struct nilfs_inode *, int);
-extern struct inode *nilfs_iget(struct super_block *, unsigned long);
+struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino);
+struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino);
+struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
+ unsigned long ino);
+extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
+ unsigned long ino, __u64 cno);
extern void nilfs_update_inode(struct inode *, struct buffer_head *);
extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *);
extern int nilfs_setattr(struct dentry *, struct iattr *);
+int nilfs_permission(struct inode *inode, int mask);
extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *,
struct buffer_head **);
extern int nilfs_inode_dirty(struct inode *);
@@ -260,11 +265,7 @@ extern int nilfs_set_file_dirty(struct nilfs_sb_info *, struct inode *,
extern int nilfs_mark_inode_dirty(struct inode *);
extern void nilfs_dirty_inode(struct inode *);
-/* namei.c */
-extern struct dentry *nilfs_get_parent(struct dentry *);
-
/* super.c */
-extern struct inode *nilfs_alloc_inode_common(struct the_nilfs *);
extern struct inode *nilfs_alloc_inode(struct super_block *);
extern void nilfs_destroy_inode(struct inode *);
extern void nilfs_error(struct super_block *, const char *, const char *, ...)
@@ -283,8 +284,9 @@ extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *,
int flip);
extern int nilfs_commit_super(struct nilfs_sb_info *, int);
extern int nilfs_cleanup_super(struct nilfs_sb_info *);
-extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64);
-extern void nilfs_detach_checkpoint(struct nilfs_sb_info *);
+int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
+ struct nilfs_root **root);
+int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno);
/* gcinode.c */
int nilfs_gccache_submit_read_data(struct inode *, sector_t, sector_t, __u64,
@@ -292,16 +294,8 @@ int nilfs_gccache_submit_read_data(struct inode *, sector_t, sector_t, __u64,
int nilfs_gccache_submit_read_node(struct inode *, sector_t, __u64,
struct buffer_head **);
int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *);
-int nilfs_init_gccache(struct the_nilfs *);
-void nilfs_destroy_gccache(struct the_nilfs *);
-void nilfs_clear_gcinode(struct inode *);
-struct inode *nilfs_gc_iget(struct the_nilfs *, ino_t, __u64);
-void nilfs_remove_all_gcinode(struct the_nilfs *);
-
-/* gcdat.c */
-int nilfs_init_gcdat_inode(struct the_nilfs *);
-void nilfs_commit_gcdat_inode(struct the_nilfs *);
-void nilfs_clear_gcdat_inode(struct the_nilfs *);
+int nilfs_init_gcinode(struct inode *inode);
+void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs);
/*
* Inodes and files operations
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index aab11db2cb0..a6c3c2e817f 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -79,8 +79,8 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
{
int blkbits = inode->i_blkbits;
pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits);
- struct page *page, *opage;
- struct buffer_head *bh, *obh;
+ struct page *page;
+ struct buffer_head *bh;
page = grab_cache_page(mapping, index);
if (unlikely(!page))
@@ -92,30 +92,6 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode,
page_cache_release(page);
return NULL;
}
- if (!buffer_uptodate(bh) && mapping->assoc_mapping != NULL) {
- /*
- * Shadow page cache uses assoc_mapping to point its original
- * page cache. The following code tries the original cache
- * if the given cache is a shadow and it didn't hit.
- */
- opage = find_lock_page(mapping->assoc_mapping, index);
- if (!opage)
- return bh;
-
- obh = __nilfs_get_page_block(opage, blkoff, index, blkbits,
- b_state);
- if (buffer_uptodate(obh)) {
- nilfs_copy_buffer(bh, obh);
- if (buffer_dirty(obh)) {
- nilfs_mark_buffer_dirty(bh);
- if (!buffer_nilfs_node(bh) && NILFS_MDT(inode))
- nilfs_mdt_mark_dirty(inode);
- }
- }
- brelse(obh);
- unlock_page(opage);
- page_cache_release(opage);
- }
return bh;
}
@@ -131,6 +107,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
lock_buffer(bh);
clear_buffer_nilfs_volatile(bh);
clear_buffer_nilfs_checked(bh);
+ clear_buffer_nilfs_redirected(bh);
clear_buffer_dirty(bh);
if (nilfs_page_buffers_clean(page))
__nilfs_clear_page_dirty(page);
@@ -483,6 +460,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
clear_buffer_dirty(bh);
clear_buffer_nilfs_volatile(bh);
clear_buffer_nilfs_checked(bh);
+ clear_buffer_nilfs_redirected(bh);
clear_buffer_uptodate(bh);
clear_buffer_mapped(bh);
unlock_buffer(bh);
@@ -513,6 +491,31 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
}
return nc;
}
+
+void nilfs_mapping_init_once(struct address_space *mapping)
+{
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+}
+
+void nilfs_mapping_init(struct address_space *mapping,
+ struct backing_dev_info *bdi,
+ const struct address_space_operations *aops)
+{
+ mapping->host = NULL;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = bdi;
+ mapping->a_ops = aops;
+}
/*
* NILFS2 needs clear_page_dirty() in the following two cases:
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index f53d8da41ed..fb9e8a8a203 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -35,12 +35,14 @@ enum {
BH_NILFS_Node,
BH_NILFS_Volatile,
BH_NILFS_Checked,
+ BH_NILFS_Redirected,
};
BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */
BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */
BUFFER_FNS(NILFS_Volatile, nilfs_volatile)
BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
+BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
void nilfs_mark_buffer_dirty(struct buffer_head *bh);
@@ -59,6 +61,10 @@ void nilfs_free_private_page(struct page *);
int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_pages(struct address_space *);
+void nilfs_mapping_init_once(struct address_space *mapping);
+void nilfs_mapping_init(struct address_space *mapping,
+ struct backing_dev_info *bdi,
+ const struct address_space_operations *aops);
unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned);
#define NILFS_PAGE_BUG(page, m, a...) \
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index d0c35ef39f6..5d2711c28da 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -440,7 +440,6 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
segnum[2] = ri->ri_segnum;
segnum[3] = ri->ri_nextnum;
- nilfs_attach_writer(nilfs, sbi);
/*
* Releasing the next segment of the latest super root.
* The next segment is invalidated by this recovery.
@@ -480,7 +479,6 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
failed:
/* No need to recover sufile because it will be destroyed on error */
- nilfs_detach_writer(nilfs, sbi);
return err;
}
@@ -504,6 +502,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi,
+ struct nilfs_root *root,
struct list_head *head,
unsigned long *nr_salvaged_blocks)
{
@@ -515,7 +514,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
int err = 0, err2 = 0;
list_for_each_entry_safe(rb, n, head, list) {
- inode = nilfs_iget(sbi->s_super, rb->ino);
+ inode = nilfs_iget(sbi->s_super, root, rb->ino);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
inode = NULL;
@@ -578,6 +577,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi,
+ struct nilfs_root *root,
struct nilfs_recovery_info *ri)
{
struct buffer_head *bh_sum = NULL;
@@ -597,7 +597,6 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
};
int state = RF_INIT_ST;
- nilfs_attach_writer(nilfs, sbi);
pseg_start = ri->ri_lsegs_start;
seg_seq = ri->ri_lsegs_start_seq;
segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
@@ -649,7 +648,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
goto failed;
if (flags & NILFS_SS_LOGEND) {
err = nilfs_recover_dsync_blocks(
- nilfs, sbi, &dsync_blocks,
+ nilfs, sbi, root, &dsync_blocks,
&nsalvaged_blocks);
if (unlikely(err))
goto failed;
@@ -688,7 +687,6 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
out:
brelse(bh_sum);
dispose_recovery_list(&dsync_blocks);
- nilfs_detach_writer(nilfs, sbi);
return err;
confused:
@@ -746,19 +744,20 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi,
struct nilfs_recovery_info *ri)
{
+ struct nilfs_root *root;
int err;
if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
return 0;
- err = nilfs_attach_checkpoint(sbi, ri->ri_cno);
+ err = nilfs_attach_checkpoint(sbi, ri->ri_cno, true, &root);
if (unlikely(err)) {
printk(KERN_ERR
"NILFS: error loading the latest checkpoint.\n");
return err;
}
- err = nilfs_do_roll_forward(nilfs, sbi, ri);
+ err = nilfs_do_roll_forward(nilfs, sbi, root, ri);
if (unlikely(err))
goto failed;
@@ -770,7 +769,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
goto failed;
}
- err = nilfs_attach_segment_constructor(sbi);
+ err = nilfs_attach_segment_constructor(sbi, root);
if (unlikely(err))
goto failed;
@@ -788,7 +787,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
}
failed:
- nilfs_detach_checkpoint(sbi);
+ nilfs_put_root(root);
return err;
}
diff --git a/fs/nilfs2/sb.h b/fs/nilfs2/sb.h
index 0776ccc2504..35a07157b98 100644
--- a/fs/nilfs2/sb.h
+++ b/fs/nilfs2/sb.h
@@ -42,11 +42,6 @@ struct nilfs_sc_info;
* NILFS super-block data in memory
*/
struct nilfs_sb_info {
- /* Snapshot status */
- __u64 s_snapshot_cno; /* Checkpoint number */
- atomic_t s_inodes_count;
- atomic_t s_blocks_count; /* Reserved (might be deleted) */
-
/* Mount options */
unsigned long s_mount_opt;
uid_t s_resuid;
@@ -59,8 +54,6 @@ struct nilfs_sb_info {
/* Fundamental members */
struct super_block *s_super; /* reverse pointer to super_block */
struct the_nilfs *s_nilfs;
- struct list_head s_list; /* list head for nilfs->ns_supers */
- atomic_t s_count; /* reference count */
/* Segment constructor */
struct list_head s_dirty_files; /* dirty files list */
@@ -68,9 +61,6 @@ struct nilfs_sb_info {
spinlock_t s_inode_lock; /* Lock for the nilfs inode.
It covers s_dirty_files list */
- /* Metadata files */
- struct inode *s_ifile; /* index file inode */
-
/* Inode allocator */
spinlock_t s_next_gen_lock;
u32 s_next_generation;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 4588fb9e93d..0f83e93935b 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -371,7 +371,8 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
struct bio *bio = wi->bio;
int err;
- if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
+ if (segbuf->sb_nbio > 0 &&
+ bdi_write_congested(segbuf->sb_super->s_bdi)) {
wait_for_completion(&segbuf->sb_bio_event);
segbuf->sb_nbio--;
if (unlikely(atomic_read(&segbuf->sb_err))) {
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 9fd051a33c4..d926af62617 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -191,6 +191,8 @@ int nilfs_transaction_begin(struct super_block *sb,
if (ret > 0)
return 0;
+ vfs_check_frozen(sb, SB_FREEZE_WRITE);
+
sbi = NILFS_SB(sb);
nilfs = sbi->s_nilfs;
down_read(&nilfs->ns_segctor_sem);
@@ -366,8 +368,7 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
if (nilfs_doing_gc())
flags = NILFS_SS_GC;
- err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime,
- sci->sc_sbi->s_nilfs->ns_cno);
+ err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
if (unlikely(err))
return err;
@@ -440,17 +441,26 @@ static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
struct nilfs_finfo *finfo;
struct nilfs_inode_info *ii;
struct nilfs_segment_buffer *segbuf;
+ __u64 cno;
if (sci->sc_blk_cnt == 0)
return;
ii = NILFS_I(inode);
+
+ if (test_bit(NILFS_I_GCINODE, &ii->i_state))
+ cno = ii->i_cno;
+ else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
+ cno = 0;
+ else
+ cno = sci->sc_cno;
+
finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
sizeof(*finfo));
finfo->fi_ino = cpu_to_le64(inode->i_ino);
finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
- finfo->fi_cno = cpu_to_le64(ii->i_cno);
+ finfo->fi_cno = cpu_to_le64(cno);
segbuf = sci->sc_curseg;
segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
@@ -755,12 +765,12 @@ static void nilfs_dispose_list(struct nilfs_sb_info *sbi,
}
}
-static int nilfs_test_metadata_dirty(struct nilfs_sb_info *sbi)
+static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
+ struct nilfs_root *root)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
int ret = 0;
- if (nilfs_mdt_fetch_dirty(sbi->s_ifile))
+ if (nilfs_mdt_fetch_dirty(root->ifile))
ret++;
if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
ret++;
@@ -785,7 +795,7 @@ static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
struct nilfs_sb_info *sbi = sci->sc_sbi;
int ret = 0;
- if (nilfs_test_metadata_dirty(sbi))
+ if (nilfs_test_metadata_dirty(sbi->s_nilfs, sci->sc_root))
set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
spin_lock(&sbi->s_inode_lock);
@@ -801,7 +811,7 @@ static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
- nilfs_mdt_clear_dirty(sbi->s_ifile);
+ nilfs_mdt_clear_dirty(sci->sc_root->ifile);
nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
nilfs_mdt_clear_dirty(nilfs->ns_sufile);
nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs));
@@ -848,9 +858,9 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
raw_cp->cp_snapshot_list.ssl_next = 0;
raw_cp->cp_snapshot_list.ssl_prev = 0;
raw_cp->cp_inodes_count =
- cpu_to_le64(atomic_read(&sbi->s_inodes_count));
+ cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
raw_cp->cp_blocks_count =
- cpu_to_le64(atomic_read(&sbi->s_blocks_count));
+ cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
raw_cp->cp_nblk_inc =
cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
@@ -861,7 +871,8 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
else
nilfs_checkpoint_set_minor(raw_cp);
- nilfs_write_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode, 1);
+ nilfs_write_inode_common(sci->sc_root->ifile,
+ &raw_cp->cp_ifile_inode, 1);
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
return 0;
@@ -886,13 +897,12 @@ static void nilfs_fill_in_file_bmap(struct inode *ifile,
}
}
-static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci,
- struct inode *ifile)
+static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
{
struct nilfs_inode_info *ii;
list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
- nilfs_fill_in_file_bmap(ifile, ii);
+ nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
set_bit(NILFS_I_COLLECTED, &ii->i_state);
}
}
@@ -1135,7 +1145,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
/* Fall through */
case NILFS_ST_IFILE:
- err = nilfs_segctor_scan_file(sci, sbi->s_ifile,
+ err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
&nilfs_sc_file_ops);
if (unlikely(err))
break;
@@ -1900,6 +1910,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_redirected(bh);
if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
@@ -1936,11 +1947,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
nilfs_drop_collected_inodes(&sci->sc_dirty_files);
- if (nilfs_doing_gc()) {
+ if (nilfs_doing_gc())
nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
- if (update_sr)
- nilfs_commit_gcdat_inode(nilfs);
- } else
+ else
nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
sci->sc_nblk_inc += sci->sc_nblk_this_inc;
@@ -1976,7 +1985,7 @@ static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
struct nilfs_sb_info *sbi)
{
struct nilfs_inode_info *ii, *n;
- __u64 cno = sbi->s_nilfs->ns_cno;
+ struct inode *ifile = sci->sc_root->ifile;
spin_lock(&sbi->s_inode_lock);
retry:
@@ -1987,14 +1996,14 @@ static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
spin_unlock(&sbi->s_inode_lock);
err = nilfs_ifile_get_inode_block(
- sbi->s_ifile, ii->vfs_inode.i_ino, &ibh);
+ ifile, ii->vfs_inode.i_ino, &ibh);
if (unlikely(err)) {
nilfs_warning(sbi->s_super, __func__,
"failed to get inode block.\n");
return err;
}
nilfs_mdt_mark_buffer_dirty(ibh);
- nilfs_mdt_mark_dirty(sbi->s_ifile);
+ nilfs_mdt_mark_dirty(ifile);
spin_lock(&sbi->s_inode_lock);
if (likely(!ii->i_bh))
ii->i_bh = ibh;
@@ -2002,7 +2011,6 @@ static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
brelse(ibh);
goto retry;
}
- ii->i_cno = cno;
clear_bit(NILFS_I_QUEUED, &ii->i_state);
set_bit(NILFS_I_BUSY, &ii->i_state);
@@ -2011,8 +2019,6 @@ static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
}
spin_unlock(&sbi->s_inode_lock);
- NILFS_I(sbi->s_ifile)->i_cno = cno;
-
return 0;
}
@@ -2021,19 +2027,13 @@ static void nilfs_segctor_check_out_files(struct nilfs_sc_info *sci,
{
struct nilfs_transaction_info *ti = current->journal_info;
struct nilfs_inode_info *ii, *n;
- __u64 cno = sbi->s_nilfs->ns_cno;
spin_lock(&sbi->s_inode_lock);
list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
- test_bit(NILFS_I_DIRTY, &ii->i_state)) {
- /* The current checkpoint number (=nilfs->ns_cno) is
- changed between check-in and check-out only if the
- super root is written out. So, we can update i_cno
- for the inodes that remain in the dirty list. */
- ii->i_cno = cno;
+ test_bit(NILFS_I_DIRTY, &ii->i_state))
continue;
- }
+
clear_bit(NILFS_I_BUSY, &ii->i_state);
brelse(ii->i_bh);
ii->i_bh = NULL;
@@ -2054,12 +2054,13 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
int err;
sci->sc_stage.scnt = NILFS_ST_INIT;
+ sci->sc_cno = nilfs->ns_cno;
err = nilfs_segctor_check_in_files(sci, sbi);
if (unlikely(err))
goto out;
- if (nilfs_test_metadata_dirty(sbi))
+ if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
if (nilfs_segctor_clean(sci))
@@ -2091,7 +2092,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
goto failed;
if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
- nilfs_segctor_fill_in_file_bmap(sci, sbi->s_ifile);
+ nilfs_segctor_fill_in_file_bmap(sci);
if (mode == SC_LSEG_SR &&
sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
@@ -2452,9 +2453,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
list_for_each_entry_safe(ii, n, head, i_dirty) {
if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
continue;
- hlist_del_init(&ii->vfs_inode.i_hash);
list_del_init(&ii->i_dirty);
- nilfs_clear_gcinode(&ii->vfs_inode);
+ iput(&ii->vfs_inode);
}
}
@@ -2472,13 +2472,15 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
nilfs_transaction_lock(sbi, &ti, 1);
- err = nilfs_init_gcdat_inode(nilfs);
+ err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
if (unlikely(err))
goto out_unlock;
err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
- if (unlikely(err))
+ if (unlikely(err)) {
+ nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
goto out_unlock;
+ }
sci->sc_freesegs = kbufs[4];
sci->sc_nfreesegs = argv[4].v_nmembs;
@@ -2510,7 +2512,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
out_unlock:
sci->sc_freesegs = NULL;
sci->sc_nfreesegs = 0;
- nilfs_clear_gcdat_inode(nilfs);
+ nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
nilfs_transaction_unlock(sbi);
return err;
}
@@ -2672,6 +2674,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
}
static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
+ __acquires(&sci->sc_state_lock)
+ __releases(&sci->sc_state_lock)
{
sci->sc_state |= NILFS_SEGCTOR_QUIT;
@@ -2686,7 +2690,8 @@ static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
/*
* Setup & clean-up functions
*/
-static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
+static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi,
+ struct nilfs_root *root)
{
struct nilfs_sc_info *sci;
@@ -2697,6 +2702,9 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
sci->sc_sbi = sbi;
sci->sc_super = sbi->s_super;
+ nilfs_get_root(root);
+ sci->sc_root = root;
+
init_waitqueue_head(&sci->sc_wait_request);
init_waitqueue_head(&sci->sc_wait_daemon);
init_waitqueue_head(&sci->sc_wait_task);
@@ -2771,6 +2779,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
WARN_ON(!list_empty(&sci->sc_segbufs));
WARN_ON(!list_empty(&sci->sc_write_logs));
+ nilfs_put_root(sci->sc_root);
+
down_write(&sbi->s_nilfs->ns_segctor_sem);
del_timer_sync(&sci->sc_timer);
@@ -2780,6 +2790,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
/**
* nilfs_attach_segment_constructor - attach a segment constructor
* @sbi: nilfs_sb_info
+ * @root: root object of the current filesystem tree
*
* nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
* initializes it, and starts the segment constructor.
@@ -2789,9 +2800,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
*
* %-ENOMEM - Insufficient memory available.
*/
-int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
+int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
+ struct nilfs_root *root)
{
- struct the_nilfs *nilfs = sbi->s_nilfs;
int err;
if (NILFS_SC(sbi)) {
@@ -2803,14 +2814,12 @@ int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi)
nilfs_detach_segment_constructor(sbi);
}
- sbi->s_sc_info = nilfs_segctor_new(sbi);
+ sbi->s_sc_info = nilfs_segctor_new(sbi, root);
if (!sbi->s_sc_info)
return -ENOMEM;
- nilfs_attach_writer(nilfs, sbi);
err = nilfs_segctor_start_thread(NILFS_SC(sbi));
if (err) {
- nilfs_detach_writer(nilfs, sbi);
kfree(sbi->s_sc_info);
sbi->s_sc_info = NULL;
}
@@ -2847,5 +2856,4 @@ void nilfs_detach_segment_constructor(struct nilfs_sb_info *sbi)
up_write(&nilfs->ns_segctor_sem);
nilfs_dispose_list(sbi, &garbage_list, 1);
- nilfs_detach_writer(nilfs, sbi);
}
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 17c487bd815..cd8056e7cbe 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -29,6 +29,8 @@
#include <linux/nilfs2_fs.h>
#include "sb.h"
+struct nilfs_root;
+
/**
* struct nilfs_recovery_info - Recovery information
* @ri_need_recovery: Recovery status
@@ -87,6 +89,7 @@ struct nilfs_segsum_pointer {
* struct nilfs_sc_info - Segment constructor information
* @sc_super: Back pointer to super_block struct
* @sc_sbi: Back pointer to nilfs_sb_info struct
+ * @sc_root: root object of the current filesystem tree
* @sc_nblk_inc: Block count of current generation
* @sc_dirty_files: List of files to be written
* @sc_gc_inodes: List of GC inodes having blocks to be written
@@ -107,6 +110,7 @@ struct nilfs_segsum_pointer {
* @sc_datablk_cnt: Data block count of a file
* @sc_nblk_this_inc: Number of blocks included in the current logical segment
* @sc_seg_ctime: Creation time
+ * @sc_cno: checkpoint number of current log
* @sc_flags: Internal flags
* @sc_state_lock: spinlock for sc_state and so on
* @sc_state: Segctord state flags
@@ -128,6 +132,7 @@ struct nilfs_segsum_pointer {
struct nilfs_sc_info {
struct super_block *sc_super;
struct nilfs_sb_info *sc_sbi;
+ struct nilfs_root *sc_root;
unsigned long sc_nblk_inc;
@@ -156,7 +161,7 @@ struct nilfs_sc_info {
unsigned long sc_datablk_cnt;
unsigned long sc_nblk_this_inc;
time_t sc_seg_ctime;
-
+ __u64 sc_cno;
unsigned long sc_flags;
spinlock_t sc_state_lock;
@@ -230,7 +235,8 @@ extern void nilfs_flush_segment(struct super_block *, ino_t);
extern int nilfs_clean_segments(struct super_block *, struct nilfs_argv *,
void **);
-extern int nilfs_attach_segment_constructor(struct nilfs_sb_info *);
+int nilfs_attach_segment_constructor(struct nilfs_sb_info *sbi,
+ struct nilfs_root *root);
extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *);
/* recovery.c */
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 3c6cc6005c2..1d6f488ccae 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -505,7 +505,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
{
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
- struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
+ struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
void *kaddr;
int ret;
@@ -583,7 +583,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
struct nilfs_segment_usage *su;
struct nilfs_suinfo *si = buf;
size_t susz = NILFS_MDT(sufile)->mi_entry_size;
- struct the_nilfs *nilfs = NILFS_MDT(sufile)->mi_nilfs;
+ struct the_nilfs *nilfs = NILFS_I_NILFS(sufile);
void *kaddr;
unsigned long nsegs, segusages_per_block;
ssize_t n;
@@ -635,46 +635,55 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
}
/**
- * nilfs_sufile_read - read sufile inode
- * @sufile: sufile inode
+ * nilfs_sufile_read - read or get sufile inode
+ * @sb: super block instance
+ * @susize: size of a segment usage entry
* @raw_inode: on-disk sufile inode
+ * @inodep: buffer to store the inode
*/
-int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
+int nilfs_sufile_read(struct super_block *sb, size_t susize,
+ struct nilfs_inode *raw_inode, struct inode **inodep)
{
- struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
+ struct inode *sufile;
+ struct nilfs_sufile_info *sui;
struct buffer_head *header_bh;
struct nilfs_sufile_header *header;
void *kaddr;
- int ret;
+ int err;
- ret = nilfs_read_inode_common(sufile, raw_inode);
- if (ret < 0)
- return ret;
+ sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
+ if (unlikely(!sufile))
+ return -ENOMEM;
+ if (!(sufile->i_state & I_NEW))
+ goto out;
- ret = nilfs_sufile_get_header_block(sufile, &header_bh);
- if (!ret) {
- kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
- header = kaddr + bh_offset(header_bh);
- sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
- kunmap_atomic(kaddr, KM_USER0);
- brelse(header_bh);
- }
- return ret;
-}
+ err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
+ if (err)
+ goto failed;
-/**
- * nilfs_sufile_new - create sufile
- * @nilfs: nilfs object
- * @susize: size of a segment usage entry
- */
-struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
-{
- struct inode *sufile;
+ nilfs_mdt_set_entry_size(sufile, susize,
+ sizeof(struct nilfs_sufile_header));
+
+ err = nilfs_read_inode_common(sufile, raw_inode);
+ if (err)
+ goto failed;
+
+ err = nilfs_sufile_get_header_block(sufile, &header_bh);
+ if (err)
+ goto failed;
- sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
- sizeof(struct nilfs_sufile_info));
- if (sufile)
- nilfs_mdt_set_entry_size(sufile, susize,
- sizeof(struct nilfs_sufile_header));
- return sufile;
+ sui = NILFS_SUI(sufile);
+ kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
+ header = kaddr + bh_offset(header_bh);
+ sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
+ kunmap_atomic(kaddr, KM_USER0);
+ brelse(header_bh);
+
+ unlock_new_inode(sufile);
+ out:
+ *inodep = sufile;
+ return 0;
+ failed:
+ iget_failed(sufile);
+ return err;
}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 15163b8aff7..a943fbacb45 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -31,7 +31,7 @@
static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
{
- return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
+ return NILFS_I_NILFS(sufile)->ns_nsegments;
}
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
@@ -61,8 +61,8 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
struct buffer_head *);
-int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
-struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
+int nilfs_sufile_read(struct super_block *sb, size_t susize,
+ struct nilfs_inode *raw_inode, struct inode **inodep);
/**
* nilfs_sufile_scrap - make a segment garbage
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 922263393c7..35ae03c0db8 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -45,14 +45,13 @@
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/crc32.h>
-#include <linux/smp_lock.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
#include <linux/kobject.h>
-#include <linux/exportfs.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include "nilfs.h"
+#include "export.h"
#include "mdt.h"
#include "alloc.h"
#include "btree.h"
@@ -69,11 +68,12 @@ MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
"(NILFS)");
MODULE_LICENSE("GPL");
-struct kmem_cache *nilfs_inode_cachep;
+static struct kmem_cache *nilfs_inode_cachep;
struct kmem_cache *nilfs_transaction_cachep;
struct kmem_cache *nilfs_segbuf_cachep;
struct kmem_cache *nilfs_btree_path_cache;
+static int nilfs_setup_super(struct nilfs_sb_info *sbi, int is_mount);
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
static void nilfs_set_error(struct nilfs_sb_info *sbi)
@@ -147,7 +147,7 @@ void nilfs_warning(struct super_block *sb, const char *function,
}
-struct inode *nilfs_alloc_inode_common(struct the_nilfs *nilfs)
+struct inode *nilfs_alloc_inode(struct super_block *sb)
{
struct nilfs_inode_info *ii;
@@ -156,18 +156,20 @@ struct inode *nilfs_alloc_inode_common(struct the_nilfs *nilfs)
return NULL;
ii->i_bh = NULL;
ii->i_state = 0;
+ ii->i_cno = 0;
ii->vfs_inode.i_version = 1;
- nilfs_btnode_cache_init(&ii->i_btnode_cache, nilfs->ns_bdi);
+ nilfs_btnode_cache_init(&ii->i_btnode_cache, sb->s_bdi);
return &ii->vfs_inode;
}
-struct inode *nilfs_alloc_inode(struct super_block *sb)
-{
- return nilfs_alloc_inode_common(NILFS_SB(sb)->s_nilfs);
-}
-
void nilfs_destroy_inode(struct inode *inode)
{
+ struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
+
+ if (mdi) {
+ kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
+ kfree(mdi);
+ }
kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
}
@@ -178,17 +180,9 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
retry:
set_buffer_dirty(nilfs->ns_sbh[0]);
-
if (nilfs_test_opt(sbi, BARRIER)) {
err = __sync_dirty_buffer(nilfs->ns_sbh[0],
- WRITE_SYNC | WRITE_BARRIER);
- if (err == -EOPNOTSUPP) {
- nilfs_warning(sbi->s_super, __func__,
- "barrier-based sync failed. "
- "disabling barriers\n");
- nilfs_clear_opt(sbi, BARRIER);
- goto retry;
- }
+ WRITE_SYNC | WRITE_FLUSH_FUA);
} else {
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
}
@@ -342,8 +336,6 @@ static void nilfs_put_super(struct super_block *sb)
struct nilfs_sb_info *sbi = NILFS_SB(sb);
struct the_nilfs *nilfs = sbi->s_nilfs;
- lock_kernel();
-
nilfs_detach_segment_constructor(sbi);
if (!(sb->s_flags & MS_RDONLY)) {
@@ -351,18 +343,15 @@ static void nilfs_put_super(struct super_block *sb)
nilfs_cleanup_super(sbi);
up_write(&nilfs->ns_sem);
}
- down_write(&nilfs->ns_super_sem);
- if (nilfs->ns_current == sbi)
- nilfs->ns_current = NULL;
- up_write(&nilfs->ns_super_sem);
- nilfs_detach_checkpoint(sbi);
- put_nilfs(sbi->s_nilfs);
+ iput(nilfs->ns_sufile);
+ iput(nilfs->ns_cpfile);
+ iput(nilfs->ns_dat);
+
+ destroy_nilfs(nilfs);
sbi->s_super = NULL;
sb->s_fs_info = NULL;
- nilfs_put_sbinfo(sbi);
-
- unlock_kernel();
+ kfree(sbi);
}
static int nilfs_sync_fs(struct super_block *sb, int wait)
@@ -389,21 +378,22 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
return err;
}
-int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
+int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno, int curr_mnt,
+ struct nilfs_root **rootp)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_root *root;
struct nilfs_checkpoint *raw_cp;
struct buffer_head *bh_cp;
- int err;
+ int err = -ENOMEM;
- down_write(&nilfs->ns_super_sem);
- list_add(&sbi->s_list, &nilfs->ns_supers);
- up_write(&nilfs->ns_super_sem);
+ root = nilfs_find_or_create_root(
+ nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno);
+ if (!root)
+ return err;
- err = -ENOMEM;
- sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
- if (!sbi->s_ifile)
- goto delist;
+ if (root->ifile)
+ goto reuse; /* already attached checkpoint */
down_read(&nilfs->ns_segctor_sem);
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
@@ -419,45 +409,64 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
}
goto failed;
}
- err = nilfs_read_inode_common(sbi->s_ifile, &raw_cp->cp_ifile_inode);
- if (unlikely(err))
+
+ err = nilfs_ifile_read(sbi->s_super, root, nilfs->ns_inode_size,
+ &raw_cp->cp_ifile_inode, &root->ifile);
+ if (err)
goto failed_bh;
- atomic_set(&sbi->s_inodes_count, le64_to_cpu(raw_cp->cp_inodes_count));
- atomic_set(&sbi->s_blocks_count, le64_to_cpu(raw_cp->cp_blocks_count));
+
+ atomic_set(&root->inodes_count, le64_to_cpu(raw_cp->cp_inodes_count));
+ atomic_set(&root->blocks_count, le64_to_cpu(raw_cp->cp_blocks_count));
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
+
+ reuse:
+ *rootp = root;
return 0;
failed_bh:
nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, cno, bh_cp);
failed:
- nilfs_mdt_destroy(sbi->s_ifile);
- sbi->s_ifile = NULL;
+ nilfs_put_root(root);
- delist:
- down_write(&nilfs->ns_super_sem);
- list_del_init(&sbi->s_list);
- up_write(&nilfs->ns_super_sem);
+ return err;
+}
+static int nilfs_freeze(struct super_block *sb)
+{
+ struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct the_nilfs *nilfs = sbi->s_nilfs;
+ int err;
+
+ if (sb->s_flags & MS_RDONLY)
+ return 0;
+
+ /* Mark super block clean */
+ down_write(&nilfs->ns_sem);
+ err = nilfs_cleanup_super(sbi);
+ up_write(&nilfs->ns_sem);
return err;
}
-void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
+static int nilfs_unfreeze(struct super_block *sb)
{
+ struct nilfs_sb_info *sbi = NILFS_SB(sb);
struct the_nilfs *nilfs = sbi->s_nilfs;
- nilfs_mdt_destroy(sbi->s_ifile);
- sbi->s_ifile = NULL;
- down_write(&nilfs->ns_super_sem);
- list_del_init(&sbi->s_list);
- up_write(&nilfs->ns_super_sem);
+ if (sb->s_flags & MS_RDONLY)
+ return 0;
+
+ down_write(&nilfs->ns_sem);
+ nilfs_setup_super(sbi, false);
+ up_write(&nilfs->ns_sem);
+ return 0;
}
static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_root *root = NILFS_I(dentry->d_inode)->i_root;
+ struct the_nilfs *nilfs = root->nilfs;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
unsigned long long blocks;
unsigned long overhead;
@@ -493,7 +502,7 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree = nfreeblocks;
buf->f_bavail = (buf->f_bfree >= nrsvblocks) ?
(buf->f_bfree - nrsvblocks) : 0;
- buf->f_files = atomic_read(&sbi->s_inodes_count);
+ buf->f_files = atomic_read(&root->inodes_count);
buf->f_ffree = 0; /* nilfs_count_free_inodes(sb); */
buf->f_namelen = NILFS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
@@ -506,12 +515,12 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct super_block *sb = vfs->mnt_sb;
struct nilfs_sb_info *sbi = NILFS_SB(sb);
+ struct nilfs_root *root = NILFS_I(vfs->mnt_root->d_inode)->i_root;
if (!nilfs_test_opt(sbi, BARRIER))
seq_puts(seq, ",nobarrier");
- if (nilfs_test_opt(sbi, SNAPSHOT))
- seq_printf(seq, ",cp=%llu",
- (unsigned long long int)sbi->s_snapshot_cno);
+ if (root->cno != NILFS_CPTREE_CURRENT_CNO)
+ seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
if (nilfs_test_opt(sbi, ERRORS_PANIC))
seq_puts(seq, ",errors=panic");
if (nilfs_test_opt(sbi, ERRORS_CONT))
@@ -537,6 +546,8 @@ static const struct super_operations nilfs_sops = {
.put_super = nilfs_put_super,
/* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
+ .freeze_fs = nilfs_freeze,
+ .unfreeze_fs = nilfs_unfreeze,
/* .write_super_lockfs */
/* .unlockfs */
.statfs = nilfs_statfs,
@@ -545,48 +556,6 @@ static const struct super_operations nilfs_sops = {
.show_options = nilfs_show_options
};
-static struct inode *
-nilfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
-{
- struct inode *inode;
-
- if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO &&
- ino != NILFS_SKETCH_INO)
- return ERR_PTR(-ESTALE);
-
- inode = nilfs_iget(sb, ino);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
- if (generation && inode->i_generation != generation) {
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
-
- return inode;
-}
-
-static struct dentry *
-nilfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len,
- int fh_type)
-{
- return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
- nilfs_nfs_get_inode);
-}
-
-static struct dentry *
-nilfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len,
- int fh_type)
-{
- return generic_fh_to_parent(sb, fid, fh_len, fh_type,
- nilfs_nfs_get_inode);
-}
-
-static const struct export_operations nilfs_export_ops = {
- .fh_to_dentry = nilfs_fh_to_dentry,
- .fh_to_parent = nilfs_fh_to_parent,
- .get_parent = nilfs_get_parent,
-};
-
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
@@ -612,7 +581,6 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
struct nilfs_sb_info *sbi = NILFS_SB(sb);
char *p;
substring_t args[MAX_OPT_ARGS];
- int option;
if (!options)
return 1;
@@ -650,30 +618,12 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
nilfs_write_opt(sbi, ERROR_MODE, ERRORS_CONT);
break;
case Opt_snapshot:
- if (match_int(&args[0], &option) || option <= 0)
- return 0;
if (is_remount) {
- if (!nilfs_test_opt(sbi, SNAPSHOT)) {
- printk(KERN_ERR
- "NILFS: cannot change regular "
- "mount to snapshot.\n");
- return 0;
- } else if (option != sbi->s_snapshot_cno) {
- printk(KERN_ERR
- "NILFS: cannot remount to a "
- "different snapshot.\n");
- return 0;
- }
- break;
- }
- if (!(sb->s_flags & MS_RDONLY)) {
- printk(KERN_ERR "NILFS: cannot mount snapshot "
- "read/write. A read-only option is "
- "required.\n");
+ printk(KERN_ERR
+ "NILFS: \"%s\" option is invalid "
+ "for remount.\n", p);
return 0;
}
- sbi->s_snapshot_cno = option;
- nilfs_set_opt(sbi, SNAPSHOT);
break;
case Opt_norecovery:
nilfs_set_opt(sbi, NORECOVERY);
@@ -701,7 +651,7 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi,
NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
}
-static int nilfs_setup_super(struct nilfs_sb_info *sbi)
+static int nilfs_setup_super(struct nilfs_sb_info *sbi, int is_mount)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
struct nilfs_super_block **sbp;
@@ -713,6 +663,9 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
if (!sbp)
return -EIO;
+ if (!is_mount)
+ goto skip_mount_setup;
+
max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
@@ -729,9 +682,11 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
+ sbp[0]->s_mtime = cpu_to_le64(get_seconds());
+
+skip_mount_setup:
sbp[0]->s_state =
cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
- sbp[0]->s_mtime = cpu_to_le64(get_seconds());
/* synchronize sbp[1] with sbp[0] */
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
@@ -798,22 +753,156 @@ int nilfs_check_feature_compatibility(struct super_block *sb,
return 0;
}
+static int nilfs_get_root_dentry(struct super_block *sb,
+ struct nilfs_root *root,
+ struct dentry **root_dentry)
+{
+ struct inode *inode;
+ struct dentry *dentry;
+ int ret = 0;
+
+ inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
+ if (IS_ERR(inode)) {
+ printk(KERN_ERR "NILFS: get root inode failed\n");
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+ if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
+ iput(inode);
+ printk(KERN_ERR "NILFS: corrupt root inode.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (root->cno == NILFS_CPTREE_CURRENT_CNO) {
+ dentry = d_find_alias(inode);
+ if (!dentry) {
+ dentry = d_alloc_root(inode);
+ if (!dentry) {
+ iput(inode);
+ ret = -ENOMEM;
+ goto failed_dentry;
+ }
+ } else {
+ iput(inode);
+ }
+ } else {
+ dentry = d_obtain_alias(inode);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto failed_dentry;
+ }
+ }
+ *root_dentry = dentry;
+ out:
+ return ret;
+
+ failed_dentry:
+ printk(KERN_ERR "NILFS: get root dentry failed\n");
+ goto out;
+}
+
+static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
+ struct dentry **root_dentry)
+{
+ struct the_nilfs *nilfs = NILFS_SB(s)->s_nilfs;
+ struct nilfs_root *root;
+ int ret;
+
+ down_read(&nilfs->ns_segctor_sem);
+ ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
+ up_read(&nilfs->ns_segctor_sem);
+ if (ret < 0) {
+ ret = (ret == -ENOENT) ? -EINVAL : ret;
+ goto out;
+ } else if (!ret) {
+ printk(KERN_ERR "NILFS: The specified checkpoint is "
+ "not a snapshot (checkpoint number=%llu).\n",
+ (unsigned long long)cno);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nilfs_attach_checkpoint(NILFS_SB(s), cno, false, &root);
+ if (ret) {
+ printk(KERN_ERR "NILFS: error loading snapshot "
+ "(checkpoint number=%llu).\n",
+ (unsigned long long)cno);
+ goto out;
+ }
+ ret = nilfs_get_root_dentry(s, root, root_dentry);
+ nilfs_put_root(root);
+ out:
+ return ret;
+}
+
+static int nilfs_tree_was_touched(struct dentry *root_dentry)
+{
+ return atomic_read(&root_dentry->d_count) > 1;
+}
+
+/**
+ * nilfs_try_to_shrink_tree() - try to shrink dentries of a checkpoint
+ * @root_dentry: root dentry of the tree to be shrunk
+ *
+ * This function returns true if the tree was in-use.
+ */
+static int nilfs_try_to_shrink_tree(struct dentry *root_dentry)
+{
+ if (have_submounts(root_dentry))
+ return true;
+ shrink_dcache_parent(root_dentry);
+ return nilfs_tree_was_touched(root_dentry);
+}
+
+int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
+{
+ struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
+ struct nilfs_root *root;
+ struct inode *inode;
+ struct dentry *dentry;
+ int ret;
+
+ if (cno < 0 || cno > nilfs->ns_cno)
+ return false;
+
+ if (cno >= nilfs_last_cno(nilfs))
+ return true; /* protect recent checkpoints */
+
+ ret = false;
+ root = nilfs_lookup_root(NILFS_SB(sb)->s_nilfs, cno);
+ if (root) {
+ inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
+ if (inode) {
+ dentry = d_find_alias(inode);
+ if (dentry) {
+ if (nilfs_tree_was_touched(dentry))
+ ret = nilfs_try_to_shrink_tree(dentry);
+ dput(dentry);
+ }
+ iput(inode);
+ }
+ nilfs_put_root(root);
+ }
+ return ret;
+}
+
/**
* nilfs_fill_super() - initialize a super block instance
* @sb: super_block
* @data: mount options
* @silent: silent mode flag
- * @nilfs: the_nilfs struct
*
* This function is called exclusively by nilfs->ns_mount_mutex.
* So, the recovery process is protected from other simultaneous mounts.
*/
static int
-nilfs_fill_super(struct super_block *sb, void *data, int silent,
- struct the_nilfs *nilfs)
+nilfs_fill_super(struct super_block *sb, void *data, int silent)
{
+ struct the_nilfs *nilfs;
struct nilfs_sb_info *sbi;
- struct inode *root;
+ struct nilfs_root *fsroot;
+ struct backing_dev_info *bdi;
__u64 cno;
int err;
@@ -822,19 +911,21 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
return -ENOMEM;
sb->s_fs_info = sbi;
+ sbi->s_super = sb;
- get_nilfs(nilfs);
+ nilfs = alloc_nilfs(sb->s_bdev);
+ if (!nilfs) {
+ err = -ENOMEM;
+ goto failed_sbi;
+ }
sbi->s_nilfs = nilfs;
- sbi->s_super = sb;
- atomic_set(&sbi->s_count, 1);
err = init_nilfs(nilfs, sbi, (char *)data);
if (err)
- goto failed_sbi;
+ goto failed_nilfs;
spin_lock_init(&sbi->s_inode_lock);
INIT_LIST_HEAD(&sbi->s_dirty_files);
- INIT_LIST_HEAD(&sbi->s_list);
/*
* Following initialization is overlapped because
@@ -850,94 +941,59 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
sb->s_export_op = &nilfs_export_ops;
sb->s_root = NULL;
sb->s_time_gran = 1;
- sb->s_bdi = nilfs->ns_bdi;
+
+ bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
+ sb->s_bdi = bdi ? : &default_backing_dev_info;
err = load_nilfs(nilfs, sbi);
if (err)
- goto failed_sbi;
+ goto failed_nilfs;
cno = nilfs_last_cno(nilfs);
-
- if (sb->s_flags & MS_RDONLY) {
- if (nilfs_test_opt(sbi, SNAPSHOT)) {
- down_read(&nilfs->ns_segctor_sem);
- err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile,
- sbi->s_snapshot_cno);
- up_read(&nilfs->ns_segctor_sem);
- if (err < 0) {
- if (err == -ENOENT)
- err = -EINVAL;
- goto failed_sbi;
- }
- if (!err) {
- printk(KERN_ERR
- "NILFS: The specified checkpoint is "
- "not a snapshot "
- "(checkpoint number=%llu).\n",
- (unsigned long long)sbi->s_snapshot_cno);
- err = -EINVAL;
- goto failed_sbi;
- }
- cno = sbi->s_snapshot_cno;
- }
- }
-
- err = nilfs_attach_checkpoint(sbi, cno);
+ err = nilfs_attach_checkpoint(sbi, cno, true, &fsroot);
if (err) {
- printk(KERN_ERR "NILFS: error loading a checkpoint"
- " (checkpoint number=%llu).\n", (unsigned long long)cno);
- goto failed_sbi;
+ printk(KERN_ERR "NILFS: error loading last checkpoint "
+ "(checkpoint number=%llu).\n", (unsigned long long)cno);
+ goto failed_unload;
}
if (!(sb->s_flags & MS_RDONLY)) {
- err = nilfs_attach_segment_constructor(sbi);
+ err = nilfs_attach_segment_constructor(sbi, fsroot);
if (err)
goto failed_checkpoint;
}
- root = nilfs_iget(sb, NILFS_ROOT_INO);
- if (IS_ERR(root)) {
- printk(KERN_ERR "NILFS: get root inode failed\n");
- err = PTR_ERR(root);
- goto failed_segctor;
- }
- if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- iput(root);
- printk(KERN_ERR "NILFS: corrupt root inode.\n");
- err = -EINVAL;
- goto failed_segctor;
- }
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
- iput(root);
- printk(KERN_ERR "NILFS: get root dentry failed\n");
- err = -ENOMEM;
+ err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root);
+ if (err)
goto failed_segctor;
- }
+
+ nilfs_put_root(fsroot);
if (!(sb->s_flags & MS_RDONLY)) {
down_write(&nilfs->ns_sem);
- nilfs_setup_super(sbi);
+ nilfs_setup_super(sbi, true);
up_write(&nilfs->ns_sem);
}
- down_write(&nilfs->ns_super_sem);
- if (!nilfs_test_opt(sbi, SNAPSHOT))
- nilfs->ns_current = sbi;
- up_write(&nilfs->ns_super_sem);
-
return 0;
failed_segctor:
nilfs_detach_segment_constructor(sbi);
failed_checkpoint:
- nilfs_detach_checkpoint(sbi);
+ nilfs_put_root(fsroot);
+
+ failed_unload:
+ iput(nilfs->ns_sufile);
+ iput(nilfs->ns_cpfile);
+ iput(nilfs->ns_dat);
+
+ failed_nilfs:
+ destroy_nilfs(nilfs);
failed_sbi:
- put_nilfs(nilfs);
sb->s_fs_info = NULL;
- nilfs_put_sbinfo(sbi);
+ kfree(sbi);
return err;
}
@@ -947,15 +1003,10 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
struct the_nilfs *nilfs = sbi->s_nilfs;
unsigned long old_sb_flags;
struct nilfs_mount_options old_opts;
- int was_snapshot, err;
-
- lock_kernel();
+ int err;
- down_write(&nilfs->ns_super_sem);
old_sb_flags = sb->s_flags;
old_opts.mount_opt = sbi->s_mount_opt;
- old_opts.snapshot_cno = sbi->s_snapshot_cno;
- was_snapshot = nilfs_test_opt(sbi, SNAPSHOT);
if (!parse_options(data, sb, 1)) {
err = -EINVAL;
@@ -964,11 +1015,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
err = -EINVAL;
- if (was_snapshot && !(*flags & MS_RDONLY)) {
- printk(KERN_ERR "NILFS (device %s): cannot remount snapshot "
- "read/write.\n", sb->s_id);
- goto restore_opts;
- }
if (!nilfs_valid_fs(nilfs)) {
printk(KERN_WARNING "NILFS (device %s): couldn't "
@@ -993,6 +1039,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
up_write(&nilfs->ns_sem);
} else {
__u64 features;
+ struct nilfs_root *root;
/*
* Mounting a RDONLY partition read-write, so reread and
@@ -1014,25 +1061,21 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~MS_RDONLY;
- err = nilfs_attach_segment_constructor(sbi);
+ root = NILFS_I(sb->s_root->d_inode)->i_root;
+ err = nilfs_attach_segment_constructor(sbi, root);
if (err)
goto restore_opts;
down_write(&nilfs->ns_sem);
- nilfs_setup_super(sbi);
+ nilfs_setup_super(sbi, true);
up_write(&nilfs->ns_sem);
}
out:
- up_write(&nilfs->ns_super_sem);
- unlock_kernel();
return 0;
restore_opts:
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.mount_opt;
- sbi->s_snapshot_cno = old_opts.snapshot_cno;
- up_write(&nilfs->ns_super_sem);
- unlock_kernel();
return err;
}
@@ -1052,7 +1095,7 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
{
char *p, *options = data;
substring_t args[MAX_OPT_ARGS];
- int option, token;
+ int token;
int ret = 0;
do {
@@ -1060,16 +1103,18 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
if (p != NULL && *p) {
token = match_token(p, tokens, args);
if (token == Opt_snapshot) {
- if (!(sd->flags & MS_RDONLY))
+ if (!(sd->flags & MS_RDONLY)) {
ret++;
- else {
- ret = match_int(&args[0], &option);
- if (!ret) {
- if (option > 0)
- sd->cno = option;
- else
- ret++;
- }
+ } else {
+ sd->cno = simple_strtoull(args[0].from,
+ NULL, 0);
+ /*
+ * No need to see the end pointer;
+ * match_token() has done syntax
+ * checking.
+ */
+ if (sd->cno == 0)
+ ret++;
}
}
if (ret)
@@ -1086,18 +1131,14 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
static int nilfs_set_bdev_super(struct super_block *s, void *data)
{
- struct nilfs_super_data *sd = data;
-
- s->s_bdev = sd->bdev;
+ s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev;
return 0;
}
static int nilfs_test_bdev_super(struct super_block *s, void *data)
{
- struct nilfs_super_data *sd = data;
-
- return sd->sbi && s->s_fs_info == (void *)sd->sbi;
+ return (void *)s->s_bdev == data;
}
static int
@@ -1107,8 +1148,8 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
struct nilfs_super_data sd;
struct super_block *s;
fmode_t mode = FMODE_READ;
- struct the_nilfs *nilfs;
- int err, need_to_close = 1;
+ struct dentry *root_dentry;
+ int err, s_new = false;
if (!(flags & MS_RDONLY))
mode |= FMODE_WRITE;
@@ -1117,12 +1158,6 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
if (IS_ERR(sd.bdev))
return PTR_ERR(sd.bdev);
- /*
- * To get mount instance using sget() vfs-routine, NILFS needs
- * much more information than normal filesystems to identify mount
- * instance. For snapshot mounts, not only a mount type (ro-mount
- * or rw-mount) but also a checkpoint number is required.
- */
sd.cno = 0;
sd.flags = flags;
if (nilfs_identify((char *)data, &sd)) {
@@ -1130,94 +1165,86 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
goto failed;
}
- nilfs = find_or_create_nilfs(sd.bdev);
- if (!nilfs) {
- err = -ENOMEM;
- goto failed;
- }
-
- mutex_lock(&nilfs->ns_mount_mutex);
-
- if (!sd.cno) {
- /*
- * Check if an exclusive mount exists or not.
- * Snapshot mounts coexist with a current mount
- * (i.e. rw-mount or ro-mount), whereas rw-mount and
- * ro-mount are mutually exclusive.
- */
- down_read(&nilfs->ns_super_sem);
- if (nilfs->ns_current &&
- ((nilfs->ns_current->s_super->s_flags ^ flags)
- & MS_RDONLY)) {
- up_read(&nilfs->ns_super_sem);
- err = -EBUSY;
- goto failed_unlock;
- }
- up_read(&nilfs->ns_super_sem);
- }
-
- /*
- * Find existing nilfs_sb_info struct
- */
- sd.sbi = nilfs_find_sbinfo(nilfs, !(flags & MS_RDONLY), sd.cno);
-
/*
- * Get super block instance holding the nilfs_sb_info struct.
- * A new instance is allocated if no existing mount is present or
- * existing instance has been unmounted.
+ * once the super is inserted into the list by sget, s_umount
+ * will protect the lockfs code from trying to start a snapshot
+ * while we are mounting
*/
- s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, &sd);
- if (sd.sbi)
- nilfs_put_sbinfo(sd.sbi);
-
+ mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
+ if (sd.bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
+ err = -EBUSY;
+ goto failed;
+ }
+ s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev);
+ mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
if (IS_ERR(s)) {
err = PTR_ERR(s);
- goto failed_unlock;
+ goto failed;
}
if (!s->s_root) {
char b[BDEVNAME_SIZE];
+ s_new = true;
+
/* New superblock instance created */
s->s_flags = flags;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(sd.bdev));
- err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0,
- nilfs);
+ err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (err)
- goto cancel_new;
+ goto failed_super;
s->s_flags |= MS_ACTIVE;
- need_to_close = 0;
+ } else if (!sd.cno) {
+ int busy = false;
+
+ if (nilfs_tree_was_touched(s->s_root)) {
+ busy = nilfs_try_to_shrink_tree(s->s_root);
+ if (busy && (flags ^ s->s_flags) & MS_RDONLY) {
+ printk(KERN_ERR "NILFS: the device already "
+ "has a %s mount.\n",
+ (s->s_flags & MS_RDONLY) ?
+ "read-only" : "read/write");
+ err = -EBUSY;
+ goto failed_super;
+ }
+ }
+ if (!busy) {
+ /*
+ * Try remount to setup mount states if the current
+ * tree is not mounted and only snapshots use this sb.
+ */
+ err = nilfs_remount(s, &flags, data);
+ if (err)
+ goto failed_super;
+ }
}
- mutex_unlock(&nilfs->ns_mount_mutex);
- put_nilfs(nilfs);
- if (need_to_close)
- close_bdev_exclusive(sd.bdev, mode);
- simple_set_mnt(mnt, s);
- return 0;
+ if (sd.cno) {
+ err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
+ if (err)
+ goto failed_super;
+ } else {
+ root_dentry = dget(s->s_root);
+ }
- failed_unlock:
- mutex_unlock(&nilfs->ns_mount_mutex);
- put_nilfs(nilfs);
- failed:
- close_bdev_exclusive(sd.bdev, mode);
+ if (!s_new)
+ close_bdev_exclusive(sd.bdev, mode);
- return err;
+ mnt->mnt_sb = s;
+ mnt->mnt_root = root_dentry;
+ return 0;
- cancel_new:
- /* Abandoning the newly allocated superblock */
- mutex_unlock(&nilfs->ns_mount_mutex);
- put_nilfs(nilfs);
+ failed_super:
deactivate_locked_super(s);
- /*
- * deactivate_locked_super() invokes close_bdev_exclusive().
- * We must finish all post-cleaning before this call;
- * put_nilfs() needs the block device.
- */
+
+ failed:
+ if (!s_new)
+ close_bdev_exclusive(sd.bdev, mode);
return err;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ba7c10c917f..0254be2d73c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -35,9 +35,6 @@
#include "segbuf.h"
-static LIST_HEAD(nilfs_objects);
-static DEFINE_SPINLOCK(nilfs_lock);
-
static int nilfs_valid_sb(struct nilfs_super_block *sbp);
void nilfs_set_last_segment(struct the_nilfs *nilfs,
@@ -61,16 +58,13 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
}
/**
- * alloc_nilfs - allocate the_nilfs structure
+ * alloc_nilfs - allocate a nilfs object
* @bdev: block device to which the_nilfs is related
*
- * alloc_nilfs() allocates memory for the_nilfs and
- * initializes its reference count and locks.
- *
* Return Value: On success, pointer to the_nilfs is returned.
* On error, NULL is returned.
*/
-static struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+struct the_nilfs *alloc_nilfs(struct block_device *bdev)
{
struct the_nilfs *nilfs;
@@ -79,103 +73,38 @@ static struct the_nilfs *alloc_nilfs(struct block_device *bdev)
return NULL;
nilfs->ns_bdev = bdev;
- atomic_set(&nilfs->ns_count, 1);
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
- init_rwsem(&nilfs->ns_super_sem);
- mutex_init(&nilfs->ns_mount_mutex);
- init_rwsem(&nilfs->ns_writer_sem);
- INIT_LIST_HEAD(&nilfs->ns_list);
- INIT_LIST_HEAD(&nilfs->ns_supers);
+ INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_last_segment_lock);
- nilfs->ns_gc_inodes_h = NULL;
+ nilfs->ns_cptree = RB_ROOT;
+ spin_lock_init(&nilfs->ns_cptree_lock);
init_rwsem(&nilfs->ns_segctor_sem);
return nilfs;
}
/**
- * find_or_create_nilfs - find or create nilfs object
- * @bdev: block device to which the_nilfs is related
- *
- * find_nilfs() looks up an existent nilfs object created on the
- * device and gets the reference count of the object. If no nilfs object
- * is found on the device, a new nilfs object is allocated.
- *
- * Return Value: On success, pointer to the nilfs object is returned.
- * On error, NULL is returned.
- */
-struct the_nilfs *find_or_create_nilfs(struct block_device *bdev)
-{
- struct the_nilfs *nilfs, *new = NULL;
-
- retry:
- spin_lock(&nilfs_lock);
- list_for_each_entry(nilfs, &nilfs_objects, ns_list) {
- if (nilfs->ns_bdev == bdev) {
- get_nilfs(nilfs);
- spin_unlock(&nilfs_lock);
- if (new)
- put_nilfs(new);
- return nilfs; /* existing object */
- }
- }
- if (new) {
- list_add_tail(&new->ns_list, &nilfs_objects);
- spin_unlock(&nilfs_lock);
- return new; /* new object */
- }
- spin_unlock(&nilfs_lock);
-
- new = alloc_nilfs(bdev);
- if (new)
- goto retry;
- return NULL; /* insufficient memory */
-}
-
-/**
- * put_nilfs - release a reference to the_nilfs
- * @nilfs: the_nilfs structure to be released
- *
- * put_nilfs() decrements a reference counter of the_nilfs.
- * If the reference count reaches zero, the_nilfs is freed.
+ * destroy_nilfs - destroy nilfs object
+ * @nilfs: nilfs object to be released
*/
-void put_nilfs(struct the_nilfs *nilfs)
+void destroy_nilfs(struct the_nilfs *nilfs)
{
- spin_lock(&nilfs_lock);
- if (!atomic_dec_and_test(&nilfs->ns_count)) {
- spin_unlock(&nilfs_lock);
- return;
- }
- list_del_init(&nilfs->ns_list);
- spin_unlock(&nilfs_lock);
-
- /*
- * Increment of ns_count never occurs below because the caller
- * of get_nilfs() holds at least one reference to the_nilfs.
- * Thus its exclusion control is not required here.
- */
-
might_sleep();
- if (nilfs_loaded(nilfs)) {
- nilfs_mdt_destroy(nilfs->ns_sufile);
- nilfs_mdt_destroy(nilfs->ns_cpfile);
- nilfs_mdt_destroy(nilfs->ns_dat);
- nilfs_mdt_destroy(nilfs->ns_gc_dat);
- }
if (nilfs_init(nilfs)) {
- nilfs_destroy_gccache(nilfs);
brelse(nilfs->ns_sbh[0]);
brelse(nilfs->ns_sbh[1]);
}
kfree(nilfs);
}
-static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block)
+static int nilfs_load_super_root(struct the_nilfs *nilfs,
+ struct super_block *sb, sector_t sr_block)
{
struct buffer_head *bh_sr;
struct nilfs_super_root *raw_sr;
struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ struct nilfs_inode *rawi;
unsigned dat_entry_size, segment_usage_size, checkpoint_size;
unsigned inode_size;
int err;
@@ -192,40 +121,22 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block)
inode_size = nilfs->ns_inode_size;
- err = -ENOMEM;
- nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
- if (unlikely(!nilfs->ns_dat))
+ rawi = (void *)bh_sr->b_data + NILFS_SR_DAT_OFFSET(inode_size);
+ err = nilfs_dat_read(sb, dat_entry_size, rawi, &nilfs->ns_dat);
+ if (err)
goto failed;
- nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
- if (unlikely(!nilfs->ns_gc_dat))
+ rawi = (void *)bh_sr->b_data + NILFS_SR_CPFILE_OFFSET(inode_size);
+ err = nilfs_cpfile_read(sb, checkpoint_size, rawi, &nilfs->ns_cpfile);
+ if (err)
goto failed_dat;
- nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
- if (unlikely(!nilfs->ns_cpfile))
- goto failed_gc_dat;
-
- nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
- if (unlikely(!nilfs->ns_sufile))
+ rawi = (void *)bh_sr->b_data + NILFS_SR_SUFILE_OFFSET(inode_size);
+ err = nilfs_sufile_read(sb, segment_usage_size, rawi,
+ &nilfs->ns_sufile);
+ if (err)
goto failed_cpfile;
- nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
-
- err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
- NILFS_SR_DAT_OFFSET(inode_size));
- if (unlikely(err))
- goto failed_sufile;
-
- err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
- NILFS_SR_CPFILE_OFFSET(inode_size));
- if (unlikely(err))
- goto failed_sufile;
-
- err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
- NILFS_SR_SUFILE_OFFSET(inode_size));
- if (unlikely(err))
- goto failed_sufile;
-
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime);
@@ -233,17 +144,11 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block)
brelse(bh_sr);
return err;
- failed_sufile:
- nilfs_mdt_destroy(nilfs->ns_sufile);
-
failed_cpfile:
- nilfs_mdt_destroy(nilfs->ns_cpfile);
-
- failed_gc_dat:
- nilfs_mdt_destroy(nilfs->ns_gc_dat);
+ iput(nilfs->ns_cpfile);
failed_dat:
- nilfs_mdt_destroy(nilfs->ns_dat);
+ iput(nilfs->ns_dat);
goto failed;
}
@@ -306,15 +211,6 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
int valid_fs = nilfs_valid_fs(nilfs);
int err;
- if (nilfs_loaded(nilfs)) {
- if (valid_fs ||
- ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
- return 0;
- printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
- "recovery state.\n");
- return -EINVAL;
- }
-
if (!valid_fs) {
printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
if (s_flags & MS_RDONLY) {
@@ -375,7 +271,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto scan_error;
}
- err = nilfs_load_super_root(nilfs, ri.ri_super_root);
+ err = nilfs_load_super_root(nilfs, sbi->s_super, ri.ri_super_root);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: error loading super root.\n");
goto failed;
@@ -443,10 +339,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto failed;
failed_unload:
- nilfs_mdt_destroy(nilfs->ns_cpfile);
- nilfs_mdt_destroy(nilfs->ns_sufile);
- nilfs_mdt_destroy(nilfs->ns_dat);
- nilfs_mdt_destroy(nilfs->ns_gc_dat);
+ iput(nilfs->ns_cpfile);
+ iput(nilfs->ns_sufile);
+ iput(nilfs->ns_dat);
failed:
nilfs_clear_recovery_info(&ri);
@@ -468,8 +363,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
{
- if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) {
- printk(KERN_ERR "NILFS: revision mismatch "
+ if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
+ printk(KERN_ERR "NILFS: unsupported revision "
"(superblock rev.=%d.%d, current rev.=%d.%d). "
"Please check the version of mkfs.nilfs.\n",
le32_to_cpu(sbp->s_rev_level),
@@ -631,12 +526,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
*
* init_nilfs() performs common initialization per block device (e.g.
* reading the super block, getting disk layout information, initializing
- * shared fields in the_nilfs). It takes on some portion of the jobs
- * typically done by a fill_super() routine. This division arises from
- * the nature that multiple NILFS instances may be simultaneously
- * mounted on a device.
- * For multiple mounts on the same device, only the first mount
- * invokes these tasks.
+ * shared fields in the_nilfs).
*
* Return Value: On success, 0 is returned. On error, a negative error
* code is returned.
@@ -645,32 +535,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
{
struct super_block *sb = sbi->s_super;
struct nilfs_super_block *sbp;
- struct backing_dev_info *bdi;
int blocksize;
int err;
down_write(&nilfs->ns_sem);
- if (nilfs_init(nilfs)) {
- /* Load values from existing the_nilfs */
- sbp = nilfs->ns_sbp[0];
- err = nilfs_store_magic_and_option(sb, sbp, data);
- if (err)
- goto out;
-
- err = nilfs_check_feature_compatibility(sb, sbp);
- if (err)
- goto out;
-
- blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
- if (sb->s_blocksize != blocksize &&
- !sb_set_blocksize(sb, blocksize)) {
- printk(KERN_ERR "NILFS: blocksize %d unfit to device\n",
- blocksize);
- err = -EINVAL;
- }
- sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
- goto out;
- }
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
@@ -729,18 +597,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
- bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
- nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
-
err = nilfs_store_log_cursor(nilfs, sbp);
if (err)
goto failed_sbh;
- /* Initialize gcinode cache */
- err = nilfs_init_gccache(nilfs);
- if (err)
- goto failed_sbh;
-
set_nilfs_init(nilfs);
err = 0;
out:
@@ -775,9 +635,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block,
nblocks * sects_per_block,
- GFP_NOFS,
- BLKDEV_IFL_WAIT |
- BLKDEV_IFL_BARRIER);
+ GFP_NOFS, 0);
if (ret < 0)
return ret;
nblocks = 0;
@@ -787,8 +645,7 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block,
nblocks * sects_per_block,
- GFP_NOFS,
- BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
+ GFP_NOFS, 0);
return ret;
}
@@ -815,79 +672,92 @@ int nilfs_near_disk_full(struct the_nilfs *nilfs)
return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
}
-/**
- * nilfs_find_sbinfo - find existing nilfs_sb_info structure
- * @nilfs: nilfs object
- * @rw_mount: mount type (non-zero value for read/write mount)
- * @cno: checkpoint number (zero for read-only mount)
- *
- * nilfs_find_sbinfo() returns the nilfs_sb_info structure which
- * @rw_mount and @cno (in case of snapshots) matched. If no instance
- * was found, NULL is returned. Although the super block instance can
- * be unmounted after this function returns, the nilfs_sb_info struct
- * is kept on memory until nilfs_put_sbinfo() is called.
- */
-struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs,
- int rw_mount, __u64 cno)
+struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno)
{
- struct nilfs_sb_info *sbi;
-
- down_read(&nilfs->ns_super_sem);
- /*
- * The SNAPSHOT flag and sb->s_flags are supposed to be
- * protected with nilfs->ns_super_sem.
- */
- sbi = nilfs->ns_current;
- if (rw_mount) {
- if (sbi && !(sbi->s_super->s_flags & MS_RDONLY))
- goto found; /* read/write mount */
- else
- goto out;
- } else if (cno == 0) {
- if (sbi && (sbi->s_super->s_flags & MS_RDONLY))
- goto found; /* read-only mount */
- else
- goto out;
+ struct rb_node *n;
+ struct nilfs_root *root;
+
+ spin_lock(&nilfs->ns_cptree_lock);
+ n = nilfs->ns_cptree.rb_node;
+ while (n) {
+ root = rb_entry(n, struct nilfs_root, rb_node);
+
+ if (cno < root->cno) {
+ n = n->rb_left;
+ } else if (cno > root->cno) {
+ n = n->rb_right;
+ } else {
+ atomic_inc(&root->count);
+ spin_unlock(&nilfs->ns_cptree_lock);
+ return root;
+ }
}
+ spin_unlock(&nilfs->ns_cptree_lock);
- list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
- if (nilfs_test_opt(sbi, SNAPSHOT) &&
- sbi->s_snapshot_cno == cno)
- goto found; /* snapshot mount */
- }
- out:
- up_read(&nilfs->ns_super_sem);
return NULL;
-
- found:
- atomic_inc(&sbi->s_count);
- up_read(&nilfs->ns_super_sem);
- return sbi;
}
-int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno,
- int snapshot_mount)
+struct nilfs_root *
+nilfs_find_or_create_root(struct the_nilfs *nilfs, __u64 cno)
{
- struct nilfs_sb_info *sbi;
- int ret = 0;
+ struct rb_node **p, *parent;
+ struct nilfs_root *root, *new;
- down_read(&nilfs->ns_super_sem);
- if (cno == 0 || cno > nilfs->ns_cno)
- goto out_unlock;
+ root = nilfs_lookup_root(nilfs, cno);
+ if (root)
+ return root;
- list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
- if (sbi->s_snapshot_cno == cno &&
- (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) {
- /* exclude read-only mounts */
- ret++;
- break;
+ new = kmalloc(sizeof(*root), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ spin_lock(&nilfs->ns_cptree_lock);
+
+ p = &nilfs->ns_cptree.rb_node;
+ parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ root = rb_entry(parent, struct nilfs_root, rb_node);
+
+ if (cno < root->cno) {
+ p = &(*p)->rb_left;
+ } else if (cno > root->cno) {
+ p = &(*p)->rb_right;
+ } else {
+ atomic_inc(&root->count);
+ spin_unlock(&nilfs->ns_cptree_lock);
+ kfree(new);
+ return root;
}
}
- /* for protecting recent checkpoints */
- if (cno >= nilfs_last_cno(nilfs))
- ret++;
- out_unlock:
- up_read(&nilfs->ns_super_sem);
- return ret;
+ new->cno = cno;
+ new->ifile = NULL;
+ new->nilfs = nilfs;
+ atomic_set(&new->count, 1);
+ atomic_set(&new->inodes_count, 0);
+ atomic_set(&new->blocks_count, 0);
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, &nilfs->ns_cptree);
+
+ spin_unlock(&nilfs->ns_cptree_lock);
+
+ return new;
+}
+
+void nilfs_put_root(struct nilfs_root *root)
+{
+ if (atomic_dec_and_test(&root->count)) {
+ struct the_nilfs *nilfs = root->nilfs;
+
+ spin_lock(&nilfs->ns_cptree_lock);
+ rb_erase(&root->rb_node, &nilfs->ns_cptree);
+ spin_unlock(&nilfs->ns_cptree_lock);
+ if (root->ifile)
+ iput(root->ifile);
+
+ kfree(root);
+ }
}
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index f785a7b0ab9..69226e14b74 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
+#include <linux/rbtree.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -45,22 +46,13 @@ enum {
/**
* struct the_nilfs - struct to supervise multiple nilfs mount points
* @ns_flags: flags
- * @ns_count: reference count
- * @ns_list: list head for nilfs_list
* @ns_bdev: block device
- * @ns_bdi: backing dev info
- * @ns_writer: back pointer to writable nilfs_sb_info
* @ns_sem: semaphore for shared states
- * @ns_super_sem: semaphore for global operations across super block instances
- * @ns_mount_mutex: mutex protecting mount process of nilfs
- * @ns_writer_sem: semaphore protecting ns_writer attach/detach
- * @ns_current: back pointer to current mount
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
* @ns_sbwtime: previous write time of super block
* @ns_sbwcount: write count of super block
* @ns_sbsize: size of valid data in super block
- * @ns_supers: list of nilfs super block structs
* @ns_seg_seq: segment sequence counter
* @ns_segnum: index number of the latest full segment.
* @ns_nextnum: index number of the full segment index to be used next
@@ -79,9 +71,9 @@ enum {
* @ns_dat: DAT file inode
* @ns_cpfile: checkpoint file inode
* @ns_sufile: segusage file inode
- * @ns_gc_dat: shadow inode of the DAT file inode for GC
+ * @ns_cptree: rb-tree of all mounted checkpoints (nilfs_root)
+ * @ns_cptree_lock: lock protecting @ns_cptree
* @ns_gc_inodes: dummy inodes to keep live blocks
- * @ns_gc_inodes_h: hash list to keep dummy inode holding live blocks
* @ns_blocksize_bits: bit length of block size
* @ns_blocksize: block size
* @ns_nsegments: number of segments in filesystem
@@ -95,22 +87,9 @@ enum {
*/
struct the_nilfs {
unsigned long ns_flags;
- atomic_t ns_count;
- struct list_head ns_list;
struct block_device *ns_bdev;
- struct backing_dev_info *ns_bdi;
- struct nilfs_sb_info *ns_writer;
struct rw_semaphore ns_sem;
- struct rw_semaphore ns_super_sem;
- struct mutex ns_mount_mutex;
- struct rw_semaphore ns_writer_sem;
-
- /*
- * components protected by ns_super_sem
- */
- struct nilfs_sb_info *ns_current;
- struct list_head ns_supers;
/*
* used for
@@ -163,11 +142,13 @@ struct the_nilfs {
struct inode *ns_dat;
struct inode *ns_cpfile;
struct inode *ns_sufile;
- struct inode *ns_gc_dat;
- /* GC inode list and hash table head */
+ /* Checkpoint tree */
+ struct rb_root ns_cptree;
+ spinlock_t ns_cptree_lock;
+
+ /* GC inode list */
struct list_head ns_gc_inodes;
- struct hlist_head *ns_gc_inodes_h;
/* Disk layout information (static) */
unsigned int ns_blocksize_bits;
@@ -182,9 +163,6 @@ struct the_nilfs {
u32 ns_crc_seed;
};
-#define NILFS_GCINODE_HASH_BITS 8
-#define NILFS_GCINODE_HASH_SIZE (1<<NILFS_GCINODE_HASH_BITS)
-
#define THE_NILFS_FNS(bit, name) \
static inline void set_nilfs_##name(struct the_nilfs *nilfs) \
{ \
@@ -205,6 +183,32 @@ THE_NILFS_FNS(DISCONTINUED, discontinued)
THE_NILFS_FNS(GC_RUNNING, gc_running)
THE_NILFS_FNS(SB_DIRTY, sb_dirty)
+/**
+ * struct nilfs_root - nilfs root object
+ * @cno: checkpoint number
+ * @rb_node: red-black tree node
+ * @count: refcount of this structure
+ * @nilfs: nilfs object
+ * @ifile: inode file
+ * @root: root inode
+ * @inodes_count: number of inodes
+ * @blocks_count: number of blocks (Reserved)
+ */
+struct nilfs_root {
+ __u64 cno;
+ struct rb_node rb_node;
+
+ atomic_t count;
+ struct the_nilfs *nilfs;
+ struct inode *ifile;
+
+ atomic_t inodes_count;
+ atomic_t blocks_count;
+};
+
+/* Special checkpoint number */
+#define NILFS_CPTREE_CURRENT_CNO 0
+
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
@@ -221,46 +225,25 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
}
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
-struct the_nilfs *find_or_create_nilfs(struct block_device *);
-void put_nilfs(struct the_nilfs *);
+struct the_nilfs *alloc_nilfs(struct block_device *bdev);
+void destroy_nilfs(struct the_nilfs *nilfs);
int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *);
int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *);
int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t);
int nilfs_count_free_blocks(struct the_nilfs *, sector_t *);
+struct nilfs_root *nilfs_lookup_root(struct the_nilfs *nilfs, __u64 cno);
+struct nilfs_root *nilfs_find_or_create_root(struct the_nilfs *nilfs,
+ __u64 cno);
+void nilfs_put_root(struct nilfs_root *root);
struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64);
-int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
int nilfs_near_disk_full(struct the_nilfs *);
void nilfs_fall_back_super_block(struct the_nilfs *);
void nilfs_swap_super_block(struct the_nilfs *);
-static inline void get_nilfs(struct the_nilfs *nilfs)
-{
- /* Caller must have at least one reference of the_nilfs. */
- atomic_inc(&nilfs->ns_count);
-}
-
-static inline void
-nilfs_attach_writer(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
-{
- down_write(&nilfs->ns_writer_sem);
- nilfs->ns_writer = sbi;
- up_write(&nilfs->ns_writer_sem);
-}
-
-static inline void
-nilfs_detach_writer(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
-{
- down_write(&nilfs->ns_writer_sem);
- if (sbi == nilfs->ns_writer)
- nilfs->ns_writer = NULL;
- up_write(&nilfs->ns_writer_sem);
-}
-
-static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
+static inline void nilfs_get_root(struct nilfs_root *root)
{
- if (atomic_dec_and_test(&sbi->s_count))
- kfree(sbi);
+ atomic_inc(&root->count);
}
static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
diff --git a/fs/no-block.c b/fs/no-block.c
index d269a93d346..6e40e42a43d 100644
--- a/fs/no-block.c
+++ b/fs/no-block.c
@@ -19,4 +19,5 @@ static int no_blkdev_open(struct inode * inode, struct file * filp)
const struct file_operations def_blk_fops = {
.open = no_blkdev_open,
+ .llseek = noop_llseek,
};
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 5ed8e58d7bf..bbcb98e7fcc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -433,6 +433,7 @@ static const struct file_operations fanotify_fops = {
.release = fanotify_release,
.unlocked_ioctl = fanotify_ioctl,
.compat_ioctl = fanotify_ioctl,
+ .llseek = noop_llseek,
};
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index bf7f6d776c3..24edc1185d5 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -344,6 +344,7 @@ static const struct file_operations inotify_fops = {
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
+ .llseek = noop_llseek,
};
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 512806171bf..19c5180f8a2 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -30,7 +30,6 @@
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/moduleparam.h>
-#include <linux/smp_lock.h>
#include <linux/bitmap.h>
#include "sysctl.h"
@@ -445,7 +444,6 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
ntfs_debug("Entering with remount options string: %s", opt);
- lock_kernel();
#ifndef NTFS_RW
/* For read-only compiled driver, enforce read-only flag. */
*flags |= MS_RDONLY;
@@ -469,18 +467,15 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
if (NVolErrors(vol)) {
ntfs_error(sb, "Volume has errors and is read-only%s",
es);
- unlock_kernel();
return -EROFS;
}
if (vol->vol_flags & VOLUME_IS_DIRTY) {
ntfs_error(sb, "Volume is dirty and read-only%s", es);
- unlock_kernel();
return -EROFS;
}
if (vol->vol_flags & VOLUME_MODIFIED_BY_CHKDSK) {
ntfs_error(sb, "Volume has been modified by chkdsk "
"and is read-only%s", es);
- unlock_kernel();
return -EROFS;
}
if (vol->vol_flags & VOLUME_MUST_MOUNT_RO_MASK) {
@@ -488,13 +483,11 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
"(0x%x) and is read-only%s",
(unsigned)le16_to_cpu(vol->vol_flags),
es);
- unlock_kernel();
return -EROFS;
}
if (ntfs_set_volume_flags(vol, VOLUME_IS_DIRTY)) {
ntfs_error(sb, "Failed to set dirty bit in volume "
"information flags%s", es);
- unlock_kernel();
return -EROFS;
}
#if 0
@@ -514,21 +507,18 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
ntfs_error(sb, "Failed to empty journal $LogFile%s",
es);
NVolSetErrors(vol);
- unlock_kernel();
return -EROFS;
}
if (!ntfs_mark_quotas_out_of_date(vol)) {
ntfs_error(sb, "Failed to mark quotas out of date%s",
es);
NVolSetErrors(vol);
- unlock_kernel();
return -EROFS;
}
if (!ntfs_stamp_usnjrnl(vol)) {
ntfs_error(sb, "Failed to stamp transation log "
"($UsnJrnl)%s", es);
NVolSetErrors(vol);
- unlock_kernel();
return -EROFS;
}
} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
@@ -544,11 +534,9 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
// TODO: Deal with *flags.
- if (!parse_options(vol, opt)) {
- unlock_kernel();
+ if (!parse_options(vol, opt))
return -EINVAL;
- }
- unlock_kernel();
+
ntfs_debug("Done.");
return 0;
}
@@ -2261,8 +2249,6 @@ static void ntfs_put_super(struct super_block *sb)
ntfs_debug("Entering.");
- lock_kernel();
-
#ifdef NTFS_RW
/*
* Commit all inodes while they are still open in case some of them
@@ -2433,8 +2419,6 @@ static void ntfs_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
kfree(vol);
-
- unlock_kernel();
}
/**
@@ -2772,8 +2756,6 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
init_rwsem(&vol->mftbmp_lock);
init_rwsem(&vol->lcnbmp_lock);
- unlock_kernel();
-
/* By default, enable sparse support. */
NVolSetSparseEnabled(vol);
@@ -2940,7 +2922,6 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
}
mutex_unlock(&ntfs_lock);
sb->s_export_op = &ntfs_export_ops;
- lock_kernel();
lockdep_on();
return 0;
}
@@ -3057,7 +3038,6 @@ iput_tmp_ino_err_out_now:
}
/* Errors at this stage are irrelevant. */
err_out_now:
- lock_kernel();
sb->s_fs_info = NULL;
kfree(vol);
ntfs_debug("Failed, returning -EINVAL.");
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 96fa7ebc530..15fdbdf9eb4 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -129,7 +129,7 @@ struct o2net_node {
struct o2net_sock_container {
struct kref sc_kref;
- /* the next two are vaild for the life time of the sc */
+ /* the next two are valid for the life time of the sc */
struct socket *sc_sock;
struct o2nm_node *sc_node;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index c2903b84bb7..a7ebd9d42dc 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -612,6 +612,7 @@ static const struct file_operations dlmfs_file_operations = {
.poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
+ .llseek = default_llseek,
};
static const struct inode_operations dlmfs_dir_inode_operations = {
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 9e8cc4346b7..1ca6867935b 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -187,8 +187,7 @@ static int ocfs2_sync_file(struct file *file, int datasync)
* platter
*/
if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
- NULL, BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
goto bail;
}
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 2dc57bca068..252e7c82f92 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -22,7 +22,6 @@
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/reboot.h>
#include <asm/uaccess.h>
@@ -612,12 +611,10 @@ static int ocfs2_control_open(struct inode *inode, struct file *file)
return -ENOMEM;
p->op_this_node = -1;
- lock_kernel();
mutex_lock(&ocfs2_control_lock);
file->private_data = p;
list_add(&p->op_list, &ocfs2_control_private_list);
mutex_unlock(&ocfs2_control_lock);
- unlock_kernel();
return 0;
}
@@ -628,6 +625,7 @@ static const struct file_operations ocfs2_control_fops = {
.read = ocfs2_control_read,
.write = ocfs2_control_write,
.owner = THIS_MODULE,
+ .llseek = default_llseek,
};
static struct miscdevice ocfs2_control_device = {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index a8a0ca44f88..56f0cb39582 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -630,8 +630,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
struct ocfs2_super *osb = OCFS2_SB(sb);
u32 tmp;
- lock_kernel();
-
if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
!ocfs2_check_set_options(sb, &parsed_options)) {
ret = -EINVAL;
@@ -739,7 +737,6 @@ unlock_osb:
MS_POSIXACL : 0);
}
out:
- unlock_kernel();
return ret;
}
@@ -1696,13 +1693,9 @@ static void ocfs2_put_super(struct super_block *sb)
{
mlog_entry("(0x%p)\n", sb);
- lock_kernel();
-
ocfs2_sync_blockdev(sb);
ocfs2_dismount_volume(sb, 0);
- unlock_kernel();
-
mlog_exit_void();
}
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 79fbf3f390f..0a8b0ad0c7e 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -352,6 +352,7 @@ static void part_release(struct device *dev)
{
struct hd_struct *p = dev_to_part(dev);
free_part_stats(p);
+ free_part_info(p);
kfree(p);
}
@@ -401,7 +402,8 @@ static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
whole_disk_show, NULL);
struct hd_struct *add_partition(struct gendisk *disk, int partno,
- sector_t start, sector_t len, int flags)
+ sector_t start, sector_t len, int flags,
+ struct partition_meta_info *info)
{
struct hd_struct *p;
dev_t devt = MKDEV(0, 0);
@@ -438,6 +440,14 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
p->partno = partno;
p->policy = get_disk_ro(disk);
+ if (info) {
+ struct partition_meta_info *pinfo = alloc_part_info(disk);
+ if (!pinfo)
+ goto out_free_stats;
+ memcpy(pinfo, info, sizeof(*info));
+ p->info = pinfo;
+ }
+
dname = dev_name(ddev);
if (isdigit(dname[strlen(dname) - 1]))
dev_set_name(pdev, "%sp%d", dname, partno);
@@ -451,7 +461,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
err = blk_alloc_devt(p, &devt);
if (err)
- goto out_free_stats;
+ goto out_free_info;
pdev->devt = devt;
/* delay uevent until 'holders' subdir is created */
@@ -481,6 +491,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
return p;
+out_free_info:
+ free_part_info(p);
out_free_stats:
free_part_stats(p);
out_free:
@@ -513,14 +525,14 @@ void register_disk(struct gendisk *disk)
if (device_add(ddev))
return;
-#ifndef CONFIG_SYSFS_DEPRECATED
- err = sysfs_create_link(block_depr, &ddev->kobj,
- kobject_name(&ddev->kobj));
- if (err) {
- device_del(ddev);
- return;
+ if (!sysfs_deprecated) {
+ err = sysfs_create_link(block_depr, &ddev->kobj,
+ kobject_name(&ddev->kobj));
+ if (err) {
+ device_del(ddev);
+ return;
+ }
}
-#endif
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
@@ -642,6 +654,7 @@ rescan:
/* add partitions */
for (p = 1; p < state->limit; p++) {
sector_t size, from;
+ struct partition_meta_info *info = NULL;
size = state->parts[p].size;
if (!size)
@@ -675,8 +688,12 @@ rescan:
size = get_capacity(disk) - from;
}
}
+
+ if (state->parts[p].has_info)
+ info = &state->parts[p].info;
part = add_partition(disk, p, from, size,
- state->parts[p].flags);
+ state->parts[p].flags,
+ &state->parts[p].info);
if (IS_ERR(part)) {
printk(KERN_ERR " %s: p%d could not be added: %ld\n",
disk->disk_name, p, -PTR_ERR(part));
@@ -737,8 +754,7 @@ void del_gendisk(struct gendisk *disk)
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
disk->driverfs_dev = NULL;
-#ifndef CONFIG_SYSFS_DEPRECATED
- sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
-#endif
+ if (!sysfs_deprecated)
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
device_del(disk_to_dev(disk));
}
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
index 8e4e103ba21..d68bf4dc3bc 100644
--- a/fs/partitions/check.h
+++ b/fs/partitions/check.h
@@ -1,5 +1,6 @@
#include <linux/pagemap.h>
#include <linux/blkdev.h>
+#include <linux/genhd.h>
/*
* add_gd_partition adds a partitions details to the devices partition
@@ -12,6 +13,8 @@ struct parsed_partitions {
sector_t from;
sector_t size;
int flags;
+ bool has_info;
+ struct partition_meta_info info;
} parts[DISK_MAX_PARTS];
int next;
int limit;
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index dbb44d4bb8a..ac0ccb5026a 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -94,6 +94,7 @@
*
************************************************************/
#include <linux/crc32.h>
+#include <linux/ctype.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include "check.h"
@@ -604,6 +605,7 @@ int efi_partition(struct parsed_partitions *state)
gpt_entry *ptes = NULL;
u32 i;
unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
+ u8 unparsed_guid[37];
if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
kfree(gpt);
@@ -614,6 +616,9 @@ int efi_partition(struct parsed_partitions *state)
pr_debug("GUID Partition Table is valid! Yea!\n");
for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+ struct partition_meta_info *info;
+ unsigned label_count = 0;
+ unsigned label_max;
u64 start = le64_to_cpu(ptes[i].starting_lba);
u64 size = le64_to_cpu(ptes[i].ending_lba) -
le64_to_cpu(ptes[i].starting_lba) + 1ULL;
@@ -627,6 +632,26 @@ int efi_partition(struct parsed_partitions *state)
if (!efi_guidcmp(ptes[i].partition_type_guid,
PARTITION_LINUX_RAID_GUID))
state->parts[i + 1].flags = ADDPART_FLAG_RAID;
+
+ info = &state->parts[i + 1].info;
+ /* Instead of doing a manual swap to big endian, reuse the
+ * common ASCII hex format as the interim.
+ */
+ efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
+ part_pack_uuid(unparsed_guid, info->uuid);
+
+ /* Naively convert UTF16-LE to 7 bits. */
+ label_max = min(sizeof(info->volname) - 1,
+ sizeof(ptes[i].partition_name));
+ info->volname[label_max] = 0;
+ while (label_count < label_max) {
+ u8 c = ptes[i].partition_name[label_count] & 0xff;
+ if (c && !isprint(c))
+ c = '!';
+ info->volname[label_count] = c;
+ label_count++;
+ }
+ state->parts[i + 1].has_info = true;
}
kfree(ptes);
kfree(gpt);
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 5bf8a04b5d9..789c625c7aa 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -5,7 +5,7 @@
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
- * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/
+ * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
diff --git a/fs/partitions/ldm.h b/fs/partitions/ldm.h
index d1fb50b28d8..374242c0971 100644
--- a/fs/partitions/ldm.h
+++ b/fs/partitions/ldm.h
@@ -5,7 +5,7 @@
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
- * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/
+ * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/fs/pipe.c b/fs/pipe.c
index 279eef96c51..37eb1ebeaa9 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
- error = ret;
+ ret = error;
break;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e4addaa542..dc5d5f51f3f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1151,6 +1151,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
static const struct file_operations proc_oom_score_adj_operations = {
.read = oom_score_adj_read,
.write = oom_score_adj_write,
+ .llseek = default_llseek,
};
#ifdef CONFIG_AUDITSYSCALL
@@ -2039,11 +2040,13 @@ static ssize_t proc_fdinfo_read(struct file *file, char __user *buf,
static const struct file_operations proc_fdinfo_file_operations = {
.open = nonseekable_open,
.read = proc_fdinfo_read,
+ .llseek = no_llseek,
};
static const struct file_operations proc_fd_operations = {
.read = generic_read_dir,
.readdir = proc_readfd,
+ .llseek = default_llseek,
};
/*
@@ -2112,6 +2115,7 @@ static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir)
static const struct file_operations proc_fdinfo_operations = {
.read = generic_read_dir,
.readdir = proc_readfdinfo,
+ .llseek = default_llseek,
};
/*
@@ -2343,6 +2347,7 @@ static int proc_attr_dir_readdir(struct file * filp,
static const struct file_operations proc_attr_dir_operations = {
.read = generic_read_dir,
.readdir = proc_attr_dir_readdir,
+ .llseek = default_llseek,
};
static struct dentry *proc_attr_dir_lookup(struct inode *dir,
@@ -2751,6 +2756,7 @@ static int proc_tgid_base_readdir(struct file * filp,
static const struct file_operations proc_tgid_base_operations = {
.read = generic_read_dir,
.readdir = proc_tgid_base_readdir,
+ .llseek = default_llseek,
};
static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
@@ -3088,6 +3094,7 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den
static const struct file_operations proc_tid_base_operations = {
.read = generic_read_dir,
.readdir = proc_tid_base_readdir,
+ .llseek = default_llseek,
};
static const struct inode_operations proc_tid_base_inode_operations = {
@@ -3324,4 +3331,5 @@ static const struct inode_operations proc_task_inode_operations = {
static const struct file_operations proc_task_operations = {
.read = generic_read_dir,
.readdir = proc_task_readdir,
+ .llseek = default_llseek,
};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 5be436ea088..2fc52552271 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -364,6 +364,7 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
static const struct file_operations proc_sys_file_operations = {
.read = proc_sys_read,
.write = proc_sys_write,
+ .llseek = default_llseek,
};
static const struct file_operations proc_sys_dir_file_operations = {
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 4258384ed22..93d99b31632 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -179,6 +179,7 @@ static int proc_root_readdir(struct file * filp,
static const struct file_operations proc_root_operations = {
.read = generic_read_dir,
.readdir = proc_root_readdir,
+ .llseek = default_llseek,
};
/*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1dbca4e8cc1..871e25ed006 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -539,6 +539,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
const struct file_operations proc_clear_refs_operations = {
.write = clear_refs_write,
+ .llseek = noop_llseek,
};
struct pagemapread {
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 6e8fc62b40a..7b0329468a5 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -11,7 +11,6 @@
* 20-06-1998 by Frank Denis : Linux 2.1.99+ & dcache support.
*/
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include "qnx4.h"
@@ -29,8 +28,6 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
QNX4DEBUG((KERN_INFO "qnx4_readdir:i_size = %ld\n", (long) inode->i_size));
QNX4DEBUG((KERN_INFO "filp->f_pos = %ld\n", (long) filp->f_pos));
- lock_kernel();
-
while (filp->f_pos < inode->i_size) {
blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
bh = sb_bread(inode->i_sb, blknum);
@@ -71,7 +68,6 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
brelse(bh);
}
out:
- unlock_kernel();
return 0;
}
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 16829722be9..01bad30026f 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/highuid.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
@@ -157,8 +156,6 @@ static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- lock_kernel();
-
buf->f_type = sb->s_magic;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
@@ -168,8 +165,6 @@ static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
- unlock_kernel();
-
return 0;
}
@@ -283,7 +278,6 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
goto outi;
brelse(bh);
-
return 0;
outi:
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index 58703ebba87..275327b5615 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -12,7 +12,6 @@
* 04-07-1998 by Frank Denis : first step for rmdir/unlink.
*/
-#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include "qnx4.h"
@@ -109,7 +108,6 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
int len = dentry->d_name.len;
struct inode *foundinode = NULL;
- lock_kernel();
if (!(bh = qnx4_find_entry(len, dir, name, &de, &ino)))
goto out;
/* The entry is linked, let's get the real info */
@@ -123,13 +121,11 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
foundinode = qnx4_iget(dir->i_sb, ino);
if (IS_ERR(foundinode)) {
- unlock_kernel();
QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n",
PTR_ERR(foundinode)));
return ERR_CAST(foundinode);
}
out:
- unlock_kernel();
d_add(dentry, foundinode);
return NULL;
diff --git a/fs/read_write.c b/fs/read_write.c
index 74e36586e4d..e757ef26e4c 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -124,7 +124,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
{
loff_t retval;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (origin) {
case SEEK_END:
offset += i_size_read(file->f_path.dentry->d_inode);
@@ -145,7 +145,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
retval = offset;
}
out:
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return retval;
}
EXPORT_SYMBOL(default_llseek);
@@ -156,7 +156,6 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
fn = no_llseek;
if (file->f_mode & FMODE_LSEEK) {
- fn = default_llseek;
if (file->f_op && file->f_op->llseek)
fn = file->f_op->llseek;
}
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
index 513f431038f..7cd46666ba2 100644
--- a/fs/reiserfs/Kconfig
+++ b/fs/reiserfs/Kconfig
@@ -10,7 +10,8 @@ config REISERFS_FS
In general, ReiserFS is as fast as ext2, but is very efficient with
large directories and small files. Additional patches are needed
- for NFS and quotas, please see <http://www.namesys.com/> for links.
+ for NFS and quotas, please see
+ <https://reiser4.wiki.kernel.org/index.php/Main_Page> for links.
It is more easily extended to have features currently found in
database and keyword search systems than block allocation based file
@@ -18,7 +19,8 @@ config REISERFS_FS
plugins consistent with our motto ``It takes more than a license to
make source code open.''
- Read <http://www.namesys.com/> to learn more about reiserfs.
+ Read <https://reiser4.wiki.kernel.org/index.php/Main_Page>
+ to learn more about reiserfs.
Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
diff --git a/fs/reiserfs/README b/fs/reiserfs/README
index 14e8c9d460e..e2f7a264e3f 100644
--- a/fs/reiserfs/README
+++ b/fs/reiserfs/README
@@ -43,7 +43,7 @@ to address the fair crediting issue in the next GPL version.)
[END LICENSING]
Reiserfs is a file system based on balanced tree algorithms, which is
-described at http://devlinux.com/namesys.
+described at https://reiser4.wiki.kernel.org/index.php/Main_Page
Stop reading here. Go there, then return.
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 6846371498b..91f080cc76c 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -152,8 +152,7 @@ static int reiserfs_sync_file(struct file *filp, int datasync)
barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (barrier_done < 0)
return barrier_done;
return (err < 0) ? -EIO : 0;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 812e2c05aa2..076c8b19468 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -138,13 +138,6 @@ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
return 0;
}
-static void disable_barrier(struct super_block *s)
-{
- REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
- printk("reiserfs: disabling flush barriers on %s\n",
- reiserfs_bdevname(s));
-}
-
static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
*sb)
{
@@ -677,30 +670,6 @@ static void submit_ordered_buffer(struct buffer_head *bh)
submit_bh(WRITE, bh);
}
-static int submit_barrier_buffer(struct buffer_head *bh)
-{
- get_bh(bh);
- bh->b_end_io = reiserfs_end_ordered_io;
- clear_buffer_dirty(bh);
- if (!buffer_uptodate(bh))
- BUG();
- return submit_bh(WRITE_BARRIER, bh);
-}
-
-static void check_barrier_completion(struct super_block *s,
- struct buffer_head *bh)
-{
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- disable_barrier(s);
- set_buffer_uptodate(bh);
- set_buffer_dirty(bh);
- reiserfs_write_unlock(s);
- sync_dirty_buffer(bh);
- reiserfs_write_lock(s);
- }
-}
-
#define CHUNK_SIZE 32
struct buffer_chunk {
struct buffer_head *bh[CHUNK_SIZE];
@@ -1009,7 +978,6 @@ static int flush_commit_list(struct super_block *s,
struct buffer_head *tbh = NULL;
unsigned int trans_id = jl->j_trans_id;
struct reiserfs_journal *journal = SB_JOURNAL(s);
- int barrier = 0;
int retval = 0;
int write_len;
@@ -1094,24 +1062,6 @@ static int flush_commit_list(struct super_block *s,
}
atomic_dec(&journal->j_async_throttle);
- /* We're skipping the commit if there's an error */
- if (retval || reiserfs_is_journal_aborted(journal))
- barrier = 0;
-
- /* wait on everything written so far before writing the commit
- * if we are in barrier mode, send the commit down now
- */
- barrier = reiserfs_barrier_flush(s);
- if (barrier) {
- int ret;
- lock_buffer(jl->j_commit_bh);
- ret = submit_barrier_buffer(jl->j_commit_bh);
- if (ret == -EOPNOTSUPP) {
- set_buffer_uptodate(jl->j_commit_bh);
- disable_barrier(s);
- barrier = 0;
- }
- }
for (i = 0; i < (jl->j_len + 1); i++) {
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
@@ -1143,27 +1093,22 @@ static int flush_commit_list(struct super_block *s,
BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
- if (!barrier) {
- /* If there was a write error in the journal - we can't commit
- * this transaction - it will be invalid and, if successful,
- * will just end up propagating the write error out to
- * the file system. */
- if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
- if (buffer_dirty(jl->j_commit_bh))
- BUG();
- mark_buffer_dirty(jl->j_commit_bh) ;
- reiserfs_write_unlock(s);
- sync_dirty_buffer(jl->j_commit_bh) ;
- reiserfs_write_lock(s);
- }
- } else {
+ /* If there was a write error in the journal - we can't commit
+ * this transaction - it will be invalid and, if successful,
+ * will just end up propagating the write error out to
+ * the file system. */
+ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
+ if (buffer_dirty(jl->j_commit_bh))
+ BUG();
+ mark_buffer_dirty(jl->j_commit_bh) ;
reiserfs_write_unlock(s);
- wait_on_buffer(jl->j_commit_bh);
+ if (reiserfs_barrier_flush(s))
+ __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
+ else
+ sync_dirty_buffer(jl->j_commit_bh);
reiserfs_write_lock(s);
}
- check_barrier_completion(s, jl->j_commit_bh);
-
/* If there was a write error in the journal - we can't commit this
* transaction - it will be invalid and, if successful, will just end
* up propagating the write error out to the filesystem. */
@@ -1319,26 +1264,15 @@ static int _update_journal_header_block(struct super_block *sb,
jh->j_first_unflushed_offset = cpu_to_le32(offset);
jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
- if (reiserfs_barrier_flush(sb)) {
- int ret;
- lock_buffer(journal->j_header_bh);
- ret = submit_barrier_buffer(journal->j_header_bh);
- if (ret == -EOPNOTSUPP) {
- set_buffer_uptodate(journal->j_header_bh);
- disable_barrier(sb);
- goto sync;
- }
- reiserfs_write_unlock(sb);
- wait_on_buffer(journal->j_header_bh);
- reiserfs_write_lock(sb);
- check_barrier_completion(sb, journal->j_header_bh);
- } else {
- sync:
- set_buffer_dirty(journal->j_header_bh);
- reiserfs_write_unlock(sb);
+ set_buffer_dirty(journal->j_header_bh);
+ reiserfs_write_unlock(sb);
+
+ if (reiserfs_barrier_flush(sb))
+ __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
+ else
sync_dirty_buffer(journal->j_header_bh);
- reiserfs_write_lock(sb);
- }
+
+ reiserfs_write_lock(sb);
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
"IO error during journal replay");
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 42d21354689..268580535c9 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -282,6 +282,7 @@ error:
static const struct file_operations romfs_dir_operations = {
.read = generic_read_dir,
.readdir = romfs_readdir,
+ .llseek = default_llseek,
};
static const struct inode_operations romfs_dir_inode_operations = {
diff --git a/fs/seq_file.c b/fs/seq_file.c
index e1f437be6c3..0e7cb1395a9 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -131,7 +131,7 @@ Eoverflow:
*/
ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
- struct seq_file *m = (struct seq_file *)file->private_data;
+ struct seq_file *m = file->private_data;
size_t copied = 0;
loff_t pos;
size_t n;
@@ -280,7 +280,7 @@ EXPORT_SYMBOL(seq_read);
*/
loff_t seq_lseek(struct file *file, loff_t offset, int origin)
{
- struct seq_file *m = (struct seq_file *)file->private_data;
+ struct seq_file *m = file->private_data;
loff_t retval = -EINVAL;
mutex_lock(&m->lock);
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(seq_lseek);
*/
int seq_release(struct inode *inode, struct file *file)
{
- struct seq_file *m = (struct seq_file *)file->private_data;
+ struct seq_file *m = file->private_data;
kfree(m->buf);
kfree(m);
return 0;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index bdd4496ae67..492465b451d 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -216,6 +216,7 @@ static const struct file_operations signalfd_fops = {
.release = signalfd_release,
.poll = signalfd_poll,
.read = signalfd_read,
+ .llseek = noop_llseek,
};
SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
diff --git a/fs/smbfs/Kconfig b/fs/smbfs/Kconfig
index e668127c8b2..2bc24a8c403 100644
--- a/fs/smbfs/Kconfig
+++ b/fs/smbfs/Kconfig
@@ -1,5 +1,6 @@
config SMB_FS
tristate "SMB file system support (OBSOLETE, please use CIFS)"
+ depends on BKL # probably unfixable
depends on INET
select NLS
help
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 450c9194198..8fc5e50e142 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -501,6 +501,8 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
void *mem;
static int warn_count;
+ lock_kernel();
+
if (warn_count < 5) {
warn_count++;
printk(KERN_EMERG "smbfs is deprecated and will be removed"
@@ -621,6 +623,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
smb_new_dentry(sb->s_root);
+ unlock_kernel();
return 0;
out_no_root:
@@ -643,9 +646,11 @@ out_wrong_data:
out_no_data:
printk(KERN_ERR "smb_fill_super: missing data argument\n");
out_fail:
+ unlock_kernel();
return -EINVAL;
out_no_server:
printk(KERN_ERR "smb_fill_super: cannot allocate struct smb_sb_info\n");
+ unlock_kernel();
return -ENOMEM;
}
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c
index 12b933ac658..0dc340aa2be 100644
--- a/fs/squashfs/dir.c
+++ b/fs/squashfs/dir.c
@@ -230,5 +230,6 @@ failed_read:
const struct file_operations squashfs_dir_ops = {
.read = generic_read_dir,
- .readdir = squashfs_readdir
+ .readdir = squashfs_readdir,
+ .llseek = default_llseek,
};
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 88b4f860665..07a4f115604 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -30,7 +30,6 @@
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/init.h>
@@ -354,8 +353,6 @@ static int squashfs_remount(struct super_block *sb, int *flags, char *data)
static void squashfs_put_super(struct super_block *sb)
{
- lock_kernel();
-
if (sb->s_fs_info) {
struct squashfs_sb_info *sbi = sb->s_fs_info;
squashfs_cache_delete(sbi->block_cache);
@@ -370,8 +367,6 @@ static void squashfs_put_super(struct super_block *sb)
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
-
- unlock_kernel();
}
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 4e321f7353f..a4759833d62 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -179,30 +179,14 @@ static void bin_vma_open(struct vm_area_struct *vma)
struct bin_buffer *bb = file->private_data;
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
- if (!bb->vm_ops || !bb->vm_ops->open)
- return;
-
- if (!sysfs_get_active(attr_sd))
- return;
-
- bb->vm_ops->open(vma);
-
- sysfs_put_active(attr_sd);
-}
-
-static void bin_vma_close(struct vm_area_struct *vma)
-{
- struct file *file = vma->vm_file;
- struct bin_buffer *bb = file->private_data;
- struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
-
- if (!bb->vm_ops || !bb->vm_ops->close)
+ if (!bb->vm_ops)
return;
if (!sysfs_get_active(attr_sd))
return;
- bb->vm_ops->close(vma);
+ if (bb->vm_ops->open)
+ bb->vm_ops->open(vma);
sysfs_put_active(attr_sd);
}
@@ -214,13 +198,15 @@ static int bin_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
int ret;
- if (!bb->vm_ops || !bb->vm_ops->fault)
+ if (!bb->vm_ops)
return VM_FAULT_SIGBUS;
if (!sysfs_get_active(attr_sd))
return VM_FAULT_SIGBUS;
- ret = bb->vm_ops->fault(vma, vmf);
+ ret = VM_FAULT_SIGBUS;
+ if (bb->vm_ops->fault)
+ ret = bb->vm_ops->fault(vma, vmf);
sysfs_put_active(attr_sd);
return ret;
@@ -236,13 +222,12 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (!bb->vm_ops)
return VM_FAULT_SIGBUS;
- if (!bb->vm_ops->page_mkwrite)
- return 0;
-
if (!sysfs_get_active(attr_sd))
return VM_FAULT_SIGBUS;
- ret = bb->vm_ops->page_mkwrite(vma, vmf);
+ ret = 0;
+ if (bb->vm_ops->page_mkwrite)
+ ret = bb->vm_ops->page_mkwrite(vma, vmf);
sysfs_put_active(attr_sd);
return ret;
@@ -256,13 +241,15 @@ static int bin_access(struct vm_area_struct *vma, unsigned long addr,
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
int ret;
- if (!bb->vm_ops || !bb->vm_ops->access)
+ if (!bb->vm_ops)
return -EINVAL;
if (!sysfs_get_active(attr_sd))
return -EINVAL;
- ret = bb->vm_ops->access(vma, addr, buf, len, write);
+ ret = -EINVAL;
+ if (bb->vm_ops->access)
+ ret = bb->vm_ops->access(vma, addr, buf, len, write);
sysfs_put_active(attr_sd);
return ret;
@@ -276,13 +263,15 @@ static int bin_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
int ret;
- if (!bb->vm_ops || !bb->vm_ops->set_policy)
+ if (!bb->vm_ops)
return 0;
if (!sysfs_get_active(attr_sd))
return -EINVAL;
- ret = bb->vm_ops->set_policy(vma, new);
+ ret = 0;
+ if (bb->vm_ops->set_policy)
+ ret = bb->vm_ops->set_policy(vma, new);
sysfs_put_active(attr_sd);
return ret;
@@ -296,13 +285,15 @@ static struct mempolicy *bin_get_policy(struct vm_area_struct *vma,
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct mempolicy *pol;
- if (!bb->vm_ops || !bb->vm_ops->get_policy)
+ if (!bb->vm_ops)
return vma->vm_policy;
if (!sysfs_get_active(attr_sd))
return vma->vm_policy;
- pol = bb->vm_ops->get_policy(vma, addr);
+ pol = vma->vm_policy;
+ if (bb->vm_ops->get_policy)
+ pol = bb->vm_ops->get_policy(vma, addr);
sysfs_put_active(attr_sd);
return pol;
@@ -316,13 +307,15 @@ static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
int ret;
- if (!bb->vm_ops || !bb->vm_ops->migrate)
+ if (!bb->vm_ops)
return 0;
if (!sysfs_get_active(attr_sd))
return 0;
- ret = bb->vm_ops->migrate(vma, from, to, flags);
+ ret = 0;
+ if (bb->vm_ops->migrate)
+ ret = bb->vm_ops->migrate(vma, from, to, flags);
sysfs_put_active(attr_sd);
return ret;
@@ -331,7 +324,6 @@ static int bin_migrate(struct vm_area_struct *vma, const nodemask_t *from,
static const struct vm_operations_struct bin_vm_ops = {
.open = bin_vma_open,
- .close = bin_vma_close,
.fault = bin_fault,
.page_mkwrite = bin_page_mkwrite,
.access = bin_access,
@@ -377,6 +369,14 @@ static int mmap(struct file *file, struct vm_area_struct *vma)
if (bb->mmapped && bb->vm_ops != vma->vm_ops)
goto out_put;
+ /*
+ * It is not possible to successfully wrap close.
+ * So error if someone is trying to use close.
+ */
+ rc = -EINVAL;
+ if (vma->vm_ops && vma->vm_ops->close)
+ goto out_put;
+
rc = 0;
bb->mmapped = 1;
bb->vm_ops = vma->vm_ops;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index b86ab8eff79..8c4fc1425b3 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -144,6 +144,7 @@ static const struct file_operations timerfd_fops = {
.release = timerfd_release,
.poll = timerfd_poll,
.read = timerfd_read,
+ .llseek = noop_llseek,
};
static struct file *timerfd_fget(int fd)
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 37fa7ed062d..02429d81ca3 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -63,7 +63,9 @@ static int do_commit(struct ubifs_info *c)
struct ubifs_lp_stats lst;
dbg_cmt("start");
- if (c->ro_media) {
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+
+ if (c->ro_error) {
err = -EROFS;
goto out_up;
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index c2a68baa782..0bee4dbffc3 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2239,6 +2239,162 @@ out_free:
return err;
}
+/**
+ * dbg_check_data_nodes_order - check that list of data nodes is sorted.
+ * @c: UBIFS file-system description object
+ * @head: the list of nodes ('struct ubifs_scan_node' objects)
+ *
+ * This function returns zero if the list of data nodes is sorted correctly,
+ * and %-EINVAL if not.
+ */
+int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
+{
+ struct list_head *cur;
+ struct ubifs_scan_node *sa, *sb;
+
+ if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ return 0;
+
+ for (cur = head->next; cur->next != head; cur = cur->next) {
+ ino_t inuma, inumb;
+ uint32_t blka, blkb;
+
+ cond_resched();
+ sa = container_of(cur, struct ubifs_scan_node, list);
+ sb = container_of(cur->next, struct ubifs_scan_node, list);
+
+ if (sa->type != UBIFS_DATA_NODE) {
+ ubifs_err("bad node type %d", sa->type);
+ dbg_dump_node(c, sa->node);
+ return -EINVAL;
+ }
+ if (sb->type != UBIFS_DATA_NODE) {
+ ubifs_err("bad node type %d", sb->type);
+ dbg_dump_node(c, sb->node);
+ return -EINVAL;
+ }
+
+ inuma = key_inum(c, &sa->key);
+ inumb = key_inum(c, &sb->key);
+
+ if (inuma < inumb)
+ continue;
+ if (inuma > inumb) {
+ ubifs_err("larger inum %lu goes before inum %lu",
+ (unsigned long)inuma, (unsigned long)inumb);
+ goto error_dump;
+ }
+
+ blka = key_block(c, &sa->key);
+ blkb = key_block(c, &sb->key);
+
+ if (blka > blkb) {
+ ubifs_err("larger block %u goes before %u", blka, blkb);
+ goto error_dump;
+ }
+ if (blka == blkb) {
+ ubifs_err("two data nodes for the same block");
+ goto error_dump;
+ }
+ }
+
+ return 0;
+
+error_dump:
+ dbg_dump_node(c, sa->node);
+ dbg_dump_node(c, sb->node);
+ return -EINVAL;
+}
+
+/**
+ * dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
+ * @c: UBIFS file-system description object
+ * @head: the list of nodes ('struct ubifs_scan_node' objects)
+ *
+ * This function returns zero if the list of non-data nodes is sorted correctly,
+ * and %-EINVAL if not.
+ */
+int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
+{
+ struct list_head *cur;
+ struct ubifs_scan_node *sa, *sb;
+
+ if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ return 0;
+
+ for (cur = head->next; cur->next != head; cur = cur->next) {
+ ino_t inuma, inumb;
+ uint32_t hasha, hashb;
+
+ cond_resched();
+ sa = container_of(cur, struct ubifs_scan_node, list);
+ sb = container_of(cur->next, struct ubifs_scan_node, list);
+
+ if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
+ sa->type != UBIFS_XENT_NODE) {
+ ubifs_err("bad node type %d", sa->type);
+ dbg_dump_node(c, sa->node);
+ return -EINVAL;
+ }
+ if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
+ sa->type != UBIFS_XENT_NODE) {
+ ubifs_err("bad node type %d", sb->type);
+ dbg_dump_node(c, sb->node);
+ return -EINVAL;
+ }
+
+ if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
+ ubifs_err("non-inode node goes before inode node");
+ goto error_dump;
+ }
+
+ if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
+ continue;
+
+ if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
+ /* Inode nodes are sorted in descending size order */
+ if (sa->len < sb->len) {
+ ubifs_err("smaller inode node goes first");
+ goto error_dump;
+ }
+ continue;
+ }
+
+ /*
+ * This is either a dentry or xentry, which should be sorted in
+ * ascending (parent ino, hash) order.
+ */
+ inuma = key_inum(c, &sa->key);
+ inumb = key_inum(c, &sb->key);
+
+ if (inuma < inumb)
+ continue;
+ if (inuma > inumb) {
+ ubifs_err("larger inum %lu goes before inum %lu",
+ (unsigned long)inuma, (unsigned long)inumb);
+ goto error_dump;
+ }
+
+ hasha = key_block(c, &sa->key);
+ hashb = key_block(c, &sb->key);
+
+ if (hasha > hashb) {
+ ubifs_err("larger hash %u goes before %u", hasha, hashb);
+ goto error_dump;
+ }
+ }
+
+ return 0;
+
+error_dump:
+ ubifs_msg("dumping first node");
+ dbg_dump_node(c, sa->node);
+ ubifs_msg("dumping second node");
+ dbg_dump_node(c, sb->node);
+ return -EINVAL;
+ return 0;
+}
+
static int invocation_cnt;
int dbg_force_in_the_gaps(void)
@@ -2625,6 +2781,7 @@ static const struct file_operations dfs_fops = {
.open = open_debugfs_file,
.write = write_debugfs_file,
.owner = THIS_MODULE,
+ .llseek = default_llseek,
};
/**
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 29d960101ea..69ebe472915 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -324,6 +324,8 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col);
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
loff_t size);
+int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
+int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
/* Force the use of in-the-gaps method for testing */
@@ -465,6 +467,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_check_lprops(c) 0
#define dbg_check_lpt_nodes(c, cnode, row, col) 0
#define dbg_check_inode_size(c, inode, size) 0
+#define dbg_check_data_nodes_order(c, head) 0
+#define dbg_check_nondata_nodes_order(c, head) 0
#define dbg_force_in_the_gaps_enabled 0
#define dbg_force_in_the_gaps() 0
#define dbg_failure_mode 0
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 03ae894c45d..d77db7e3648 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -433,8 +433,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
+ ubifs_assert(!c->ro_media && !c->ro_mount);
- if (unlikely(c->ro_media))
+ if (unlikely(c->ro_error))
return -EROFS;
/* Try out the fast-path part first */
@@ -1439,9 +1440,9 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vm
dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
i_size_read(inode));
- ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
+ ubifs_assert(!c->ro_media && !c->ro_mount);
- if (unlikely(c->ro_media))
+ if (unlikely(c->ro_error))
return VM_FAULT_SIGBUS; /* -EROFS */
/*
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 918d1582ca0..151f1088282 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -125,10 +125,16 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
struct ubifs_scan_node *sa, *sb;
cond_resched();
+ if (a == b)
+ return 0;
+
sa = list_entry(a, struct ubifs_scan_node, list);
sb = list_entry(b, struct ubifs_scan_node, list);
+
ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
+ ubifs_assert(sa->type == UBIFS_DATA_NODE);
+ ubifs_assert(sb->type == UBIFS_DATA_NODE);
inuma = key_inum(c, &sa->key);
inumb = key_inum(c, &sb->key);
@@ -157,28 +163,40 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
*/
int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
{
- int typea, typeb;
ino_t inuma, inumb;
struct ubifs_info *c = priv;
struct ubifs_scan_node *sa, *sb;
cond_resched();
+ if (a == b)
+ return 0;
+
sa = list_entry(a, struct ubifs_scan_node, list);
sb = list_entry(b, struct ubifs_scan_node, list);
- typea = key_type(c, &sa->key);
- typeb = key_type(c, &sb->key);
- ubifs_assert(typea != UBIFS_DATA_KEY && typeb != UBIFS_DATA_KEY);
+
+ ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
+ key_type(c, &sb->key) != UBIFS_DATA_KEY);
+ ubifs_assert(sa->type != UBIFS_DATA_NODE &&
+ sb->type != UBIFS_DATA_NODE);
/* Inodes go before directory entries */
- if (typea == UBIFS_INO_KEY) {
- if (typeb == UBIFS_INO_KEY)
+ if (sa->type == UBIFS_INO_NODE) {
+ if (sb->type == UBIFS_INO_NODE)
return sb->len - sa->len;
return -1;
}
- if (typeb == UBIFS_INO_KEY)
+ if (sb->type == UBIFS_INO_NODE)
return 1;
- ubifs_assert(typea == UBIFS_DENT_KEY && typeb == UBIFS_DENT_KEY);
+ ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
+ key_type(c, &sa->key) == UBIFS_XENT_KEY);
+ ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
+ key_type(c, &sb->key) == UBIFS_XENT_KEY);
+ ubifs_assert(sa->type == UBIFS_DENT_NODE ||
+ sa->type == UBIFS_XENT_NODE);
+ ubifs_assert(sb->type == UBIFS_DENT_NODE ||
+ sb->type == UBIFS_XENT_NODE);
+
inuma = key_inum(c, &sa->key);
inumb = key_inum(c, &sb->key);
@@ -224,17 +242,33 @@ int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
struct list_head *nondata, int *min)
{
+ int err;
struct ubifs_scan_node *snod, *tmp;
*min = INT_MAX;
/* Separate data nodes and non-data nodes */
list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
- int err;
+ ubifs_assert(snod->type == UBIFS_INO_NODE ||
+ snod->type == UBIFS_DATA_NODE ||
+ snod->type == UBIFS_DENT_NODE ||
+ snod->type == UBIFS_XENT_NODE ||
+ snod->type == UBIFS_TRUN_NODE);
+
+ if (snod->type != UBIFS_INO_NODE &&
+ snod->type != UBIFS_DATA_NODE &&
+ snod->type != UBIFS_DENT_NODE &&
+ snod->type != UBIFS_XENT_NODE) {
+ /* Probably truncation node, zap it */
+ list_del(&snod->list);
+ kfree(snod);
+ continue;
+ }
- ubifs_assert(snod->type != UBIFS_IDX_NODE);
- ubifs_assert(snod->type != UBIFS_REF_NODE);
- ubifs_assert(snod->type != UBIFS_CS_NODE);
+ ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
+ key_type(c, &snod->key) == UBIFS_INO_KEY ||
+ key_type(c, &snod->key) == UBIFS_DENT_KEY ||
+ key_type(c, &snod->key) == UBIFS_XENT_KEY);
err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
snod->offs, 0);
@@ -258,6 +292,13 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
/* Sort data and non-data nodes */
list_sort(c, &sleb->nodes, &data_nodes_cmp);
list_sort(c, nondata, &nondata_nodes_cmp);
+
+ err = dbg_check_data_nodes_order(c, &sleb->nodes);
+ if (err)
+ return err;
+ err = dbg_check_nondata_nodes_order(c, nondata);
+ if (err)
+ return err;
return 0;
}
@@ -575,13 +616,14 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
ubifs_assert_cmt_locked(c);
+ ubifs_assert(!c->ro_media && !c->ro_mount);
if (ubifs_gc_should_commit(c))
return -EAGAIN;
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
- if (c->ro_media) {
+ if (c->ro_error) {
ret = -EROFS;
goto out_unlock;
}
@@ -677,14 +719,12 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
ret = ubifs_garbage_collect_leb(c, &lp);
if (ret < 0) {
- if (ret == -EAGAIN || ret == -ENOSPC) {
+ if (ret == -EAGAIN) {
/*
- * These codes are not errors, so we have to
- * return the LEB to lprops. But if the
- * 'ubifs_return_leb()' function fails, its
- * failure code is propagated to the caller
- * instead of the original '-EAGAIN' or
- * '-ENOSPC'.
+ * This is not error, so we have to return the
+ * LEB to lprops. But if 'ubifs_return_leb()'
+ * fails, its failure code is propagated to the
+ * caller instead of the original '-EAGAIN'.
*/
err = ubifs_return_leb(c, lp.lnum);
if (err)
@@ -774,8 +814,8 @@ out_unlock:
out:
ubifs_assert(ret < 0);
ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
- ubifs_ro_mode(c, ret);
ubifs_wbuf_sync_nolock(wbuf);
+ ubifs_ro_mode(c, ret);
mutex_unlock(&wbuf->io_mutex);
ubifs_return_leb(c, lp.lnum);
return ret;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index bcf5a16f30b..d82173182ee 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -61,8 +61,8 @@
*/
void ubifs_ro_mode(struct ubifs_info *c, int err)
{
- if (!c->ro_media) {
- c->ro_media = 1;
+ if (!c->ro_error) {
+ c->ro_error = 1;
c->no_chk_data_crc = 0;
c->vfs_sb->s_flags |= MS_RDONLY;
ubifs_warn("switched to read-only mode, error %d", err);
@@ -356,11 +356,11 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
dbg_io("LEB %d:%d, %d bytes, jhead %s",
wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
- ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
ubifs_assert(!(wbuf->avail & 7));
ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
+ ubifs_assert(!c->ro_media && !c->ro_mount);
- if (c->ro_media)
+ if (c->ro_error)
return -EROFS;
ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail);
@@ -440,11 +440,12 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
{
int err, i;
+ ubifs_assert(!c->ro_media && !c->ro_mount);
if (!c->need_wbuf_sync)
return 0;
c->need_wbuf_sync = 0;
- if (c->ro_media) {
+ if (c->ro_error) {
err = -EROFS;
goto out_timers;
}
@@ -519,6 +520,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size);
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
+ ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
err = -ENOSPC;
@@ -527,7 +529,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
cancel_wbuf_timer_nolock(wbuf);
- if (c->ro_media)
+ if (c->ro_error)
return -EROFS;
if (aligned_len <= wbuf->avail) {
@@ -663,8 +665,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
buf_len);
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
+ ubifs_assert(!c->ro_media && !c->ro_mount);
- if (c->ro_media)
+ if (c->ro_error)
return -EROFS;
ubifs_prepare_node(c, buf, len, 1);
@@ -815,7 +818,8 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
return 0;
out:
- ubifs_err("bad node at LEB %d:%d", lnum, offs);
+ ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
+ ubi_is_mapped(c->ubi, lnum));
dbg_dump_node(c, buf);
dbg_dump_stack();
return -EINVAL;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index d321baeca68..914f1bd89e5 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -122,11 +122,12 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
* better to try to allocate space at the ends of eraseblocks. This is
* what the squeeze parameter does.
*/
+ ubifs_assert(!c->ro_media && !c->ro_mount);
squeeze = (jhead == BASEHD);
again:
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
- if (c->ro_media) {
+ if (c->ro_error) {
err = -EROFS;
goto out_unlock;
}
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index 0f530c684f0..92a8491a8f8 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -306,6 +306,20 @@ static inline void trun_key_init(const struct ubifs_info *c,
}
/**
+ * invalid_key_init - initialize invalid node key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ *
+ * This is a helper function which marks a @key object as invalid.
+ */
+static inline void invalid_key_init(const struct ubifs_info *c,
+ union ubifs_key *key)
+{
+ key->u32[0] = 0xDEADBEAF;
+ key->u32[1] = UBIFS_INVALID_KEY;
+}
+
+/**
* key_type - get key type.
* @c: UBIFS file-system description object
* @key: key to get type of
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index c345e125f42..4d0cb124146 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -159,7 +159,7 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
jhead = &c->jheads[bud->jhead];
list_add_tail(&bud->list, &jhead->buds_list);
} else
- ubifs_assert(c->replaying && (c->vfs_sb->s_flags & MS_RDONLY));
+ ubifs_assert(c->replaying && c->ro_mount);
/*
* Note, although this is a new bud, we anyway account this space now,
@@ -223,8 +223,8 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
}
mutex_lock(&c->log_mutex);
-
- if (c->ro_media) {
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error) {
err = -EROFS;
goto out_unlock;
}
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index 0084a33c4c6..72775d35b99 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1363,6 +1363,7 @@ static int read_lsave(struct ubifs_info *c)
goto out;
for (i = 0; i < c->lsave_cnt; i++) {
int lnum = c->lsave[i];
+ struct ubifs_lprops *lprops;
/*
* Due to automatic resizing, the values in the lsave table
@@ -1370,7 +1371,11 @@ static int read_lsave(struct ubifs_info *c)
*/
if (lnum >= c->leb_cnt)
continue;
- ubifs_lpt_lookup(c, lnum);
+ lprops = ubifs_lpt_lookup(c, lnum);
+ if (IS_ERR(lprops)) {
+ err = PTR_ERR(lprops);
+ goto out;
+ }
}
out:
vfree(buf);
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index d12535b7fc7..5c90dec5db0 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -705,6 +705,9 @@ static int make_tree_dirty(struct ubifs_info *c)
struct ubifs_pnode *pnode;
pnode = pnode_lookup(c, 0);
+ if (IS_ERR(pnode))
+ return PTR_ERR(pnode);
+
while (pnode) {
do_make_pnode_dirty(c, pnode);
pnode = next_pnode_to_dirty(c, pnode);
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 28beaeedadc..21f47afdacf 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -361,7 +361,8 @@ int ubifs_write_master(struct ubifs_info *c)
{
int err, lnum, offs, len;
- if (c->ro_media)
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
return -EROFS;
lnum = UBIFS_MST_LNUM;
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 4fa81d867e4..c3de04dc952 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -132,7 +132,8 @@ static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum)
{
int err;
- if (c->ro_media)
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
return -EROFS;
err = ubi_leb_unmap(c->ubi, lnum);
if (err) {
@@ -159,7 +160,8 @@ static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum,
{
int err;
- if (c->ro_media)
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
return -EROFS;
err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
if (err) {
@@ -186,7 +188,8 @@ static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum,
{
int err;
- if (c->ro_media)
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
return -EROFS;
err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
if (err) {
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index daae9e1f538..77e9b874b6c 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -292,7 +292,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
- if ((c->vfs_sb->s_flags & MS_RDONLY)) {
+ if (c->ro_mount) {
/* Read-only mode. Keep a copy for switching to rw mode */
c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
if (!c->rcvrd_mst_node) {
@@ -469,7 +469,7 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
endpt = snod->offs + snod->len;
}
- if ((c->vfs_sb->s_flags & MS_RDONLY) && !c->remounting_rw) {
+ if (c->ro_mount && !c->remounting_rw) {
/* Add to recovery list */
struct ubifs_unclean_leb *ucleb;
@@ -772,7 +772,8 @@ out_free:
* @sbuf: LEB-sized buffer to use
*
* This function does a scan of a LEB, but caters for errors that might have
- * been caused by the unclean unmount from which we are attempting to recover.
+ * been caused by unclean reboots from which we are attempting to recover
+ * (assume that only the last log LEB can be corrupted by an unclean reboot).
*
* This function returns %0 on success and a negative error code on failure.
*/
@@ -883,7 +884,7 @@ int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
{
int err;
- ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY) || c->remounting_rw);
+ ubifs_assert(!c->ro_mount || c->remounting_rw);
dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
@@ -1461,7 +1462,7 @@ int ubifs_recover_size(struct ubifs_info *c)
}
}
if (e->exists && e->i_size < e->d_size) {
- if (!e->inode && (c->vfs_sb->s_flags & MS_RDONLY)) {
+ if (!e->inode && c->ro_mount) {
/* Fix the inode size and pin it in memory */
struct inode *inode;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 5c2d6d759a3..eed0fcff8d7 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -627,8 +627,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
ubifs_assert(sleb->endpt - offs >= used);
ubifs_assert(sleb->endpt % c->min_io_size == 0);
- if (sleb->endpt + c->min_io_size <= c->leb_size &&
- !(c->vfs_sb->s_flags & MS_RDONLY))
+ if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount)
err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum,
sleb->endpt, UBI_SHORTTERM);
@@ -840,6 +839,11 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
return PTR_ERR(sleb);
+ /*
+ * Note, the below function will recover this log LEB only if
+ * it is the last, because unclean reboots can possibly corrupt
+ * only the tail of the log.
+ */
sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
if (IS_ERR(sleb))
return PTR_ERR(sleb);
@@ -851,7 +855,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
}
node = sleb->buf;
-
snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
if (c->cs_sqnum == 0) {
/*
@@ -898,7 +901,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
}
list_for_each_entry(snod, &sleb->nodes, list) {
-
cond_resched();
if (snod->sqnum >= SQNUM_WATERMARK) {
@@ -1011,7 +1013,6 @@ out:
int ubifs_replay_journal(struct ubifs_info *c)
{
int err, i, lnum, offs, free;
- void *sbuf = NULL;
BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
@@ -1026,14 +1027,8 @@ int ubifs_replay_journal(struct ubifs_info *c)
return -EINVAL;
}
- sbuf = vmalloc(c->leb_size);
- if (!sbuf)
- return -ENOMEM;
-
dbg_mnt("start replaying the journal");
-
c->replaying = 1;
-
lnum = c->ltail_lnum = c->lhead_lnum;
offs = c->lhead_offs;
@@ -1046,7 +1041,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
lnum = UBIFS_LOG_LNUM;
offs = 0;
}
- err = replay_log_leb(c, lnum, offs, sbuf);
+ err = replay_log_leb(c, lnum, offs, c->sbuf);
if (err == 1)
/* We hit the end of the log */
break;
@@ -1079,7 +1074,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
out:
destroy_replay_tree(c);
destroy_bud_list(c);
- vfree(sbuf);
c->replaying = 0;
return err;
}
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 96cb62c8a9d..bf31b4729e5 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -542,11 +542,8 @@ int ubifs_read_superblock(struct ubifs_info *c)
* due to the unavailability of time-travelling equipment.
*/
if (c->fmt_version > UBIFS_FORMAT_VERSION) {
- struct super_block *sb = c->vfs_sb;
- int mounting_ro = sb->s_flags & MS_RDONLY;
-
- ubifs_assert(!c->ro_media || mounting_ro);
- if (!mounting_ro ||
+ ubifs_assert(!c->ro_media || c->ro_mount);
+ if (!c->ro_mount ||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
ubifs_err("on-flash format version is w%d/r%d, but "
"software only supports up to version "
@@ -624,7 +621,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
c->old_leb_cnt = c->leb_cnt;
if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
- if (c->vfs_sb->s_flags & MS_RDONLY)
+ if (c->ro_mount)
dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
c->old_leb_cnt, c->leb_cnt);
else {
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 96c52538419..3e1ee57dbea 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -197,7 +197,7 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
struct ubifs_ino_node *ino = buf;
struct ubifs_scan_node *snod;
- snod = kzalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
+ snod = kmalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
if (!snod)
return -ENOMEM;
@@ -212,13 +212,15 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
case UBIFS_DENT_NODE:
case UBIFS_XENT_NODE:
case UBIFS_DATA_NODE:
- case UBIFS_TRUN_NODE:
/*
* The key is in the same place in all keyed
* nodes.
*/
key_read(c, &ino->key, &snod->key);
break;
+ default:
+ invalid_key_init(c, &snod->key);
+ break;
}
list_add_tail(&snod->list, &sleb->nodes);
sleb->nodes_cnt += 1;
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 0b201114a5a..46961c00323 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -250,7 +250,7 @@ static int kick_a_thread(void)
dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt);
if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN ||
- c->ro_media) {
+ c->ro_mount || c->ro_error) {
mutex_unlock(&c->umount_mutex);
continue;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index cd5900b85d3..9a47c9f0ad0 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1137,11 +1137,11 @@ static int check_free_space(struct ubifs_info *c)
*/
static int mount_ubifs(struct ubifs_info *c)
{
- struct super_block *sb = c->vfs_sb;
- int err, mounted_read_only = (sb->s_flags & MS_RDONLY);
+ int err;
long long x;
size_t sz;
+ c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
err = init_constants_early(c);
if (err)
return err;
@@ -1154,7 +1154,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_free;
- if (c->empty && (mounted_read_only || c->ro_media)) {
+ if (c->empty && (c->ro_mount || c->ro_media)) {
/*
* This UBI volume is empty, and read-only, or the file system
* is mounted read-only - we cannot format it.
@@ -1165,7 +1165,7 @@ static int mount_ubifs(struct ubifs_info *c)
goto out_free;
}
- if (c->ro_media && !mounted_read_only) {
+ if (c->ro_media && !c->ro_mount) {
ubifs_err("cannot mount read-write - read-only media");
err = -EROFS;
goto out_free;
@@ -1185,7 +1185,7 @@ static int mount_ubifs(struct ubifs_info *c)
if (!c->sbuf)
goto out_free;
- if (!mounted_read_only) {
+ if (!c->ro_mount) {
c->ileb_buf = vmalloc(c->leb_size);
if (!c->ileb_buf)
goto out_free;
@@ -1228,7 +1228,7 @@ static int mount_ubifs(struct ubifs_info *c)
}
sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
- if (!mounted_read_only) {
+ if (!c->ro_mount) {
err = alloc_wbufs(c);
if (err)
goto out_cbuf;
@@ -1254,12 +1254,12 @@ static int mount_ubifs(struct ubifs_info *c)
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
ubifs_msg("recovery needed");
c->need_recovery = 1;
- if (!mounted_read_only) {
+ if (!c->ro_mount) {
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out_master;
}
- } else if (!mounted_read_only) {
+ } else if (!c->ro_mount) {
/*
* Set the "dirty" flag so that if we reboot uncleanly we
* will notice this immediately on the next mount.
@@ -1270,7 +1270,7 @@ static int mount_ubifs(struct ubifs_info *c)
goto out_master;
}
- err = ubifs_lpt_init(c, 1, !mounted_read_only);
+ err = ubifs_lpt_init(c, 1, !c->ro_mount);
if (err)
goto out_lpt;
@@ -1285,11 +1285,11 @@ static int mount_ubifs(struct ubifs_info *c)
/* Calculate 'min_idx_lebs' after journal replay */
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
- err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
+ err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
if (err)
goto out_orphans;
- if (!mounted_read_only) {
+ if (!c->ro_mount) {
int lnum;
err = check_free_space(c);
@@ -1351,7 +1351,7 @@ static int mount_ubifs(struct ubifs_info *c)
spin_unlock(&ubifs_infos_lock);
if (c->need_recovery) {
- if (mounted_read_only)
+ if (c->ro_mount)
ubifs_msg("recovery deferred");
else {
c->need_recovery = 0;
@@ -1378,7 +1378,7 @@ static int mount_ubifs(struct ubifs_info *c)
ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
c->vi.ubi_num, c->vi.vol_id, c->vi.name);
- if (mounted_read_only)
+ if (c->ro_mount)
ubifs_msg("mounted read-only");
x = (long long)c->main_lebs * c->leb_size;
ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d "
@@ -1640,7 +1640,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
}
dbg_gen("re-mounted read-write");
- c->vfs_sb->s_flags &= ~MS_RDONLY;
+ c->ro_mount = 0;
c->remounting_rw = 0;
c->always_chk_crc = 0;
err = dbg_check_space_info(c);
@@ -1676,7 +1676,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
int i, err;
ubifs_assert(!c->need_recovery);
- ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
+ ubifs_assert(!c->ro_mount);
mutex_lock(&c->umount_mutex);
if (c->bgt) {
@@ -1686,10 +1686,8 @@ static void ubifs_remount_ro(struct ubifs_info *c)
dbg_save_space_info(c);
- for (i = 0; i < c->jhead_cnt; i++) {
+ for (i = 0; i < c->jhead_cnt; i++)
ubifs_wbuf_sync(&c->jheads[i].wbuf);
- hrtimer_cancel(&c->jheads[i].wbuf.timer);
- }
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
@@ -1704,6 +1702,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
+ c->ro_mount = 1;
err = dbg_check_space_info(c);
if (err)
ubifs_ro_mode(c, err);
@@ -1735,7 +1734,7 @@ static void ubifs_put_super(struct super_block *sb)
* the mutex is locked.
*/
mutex_lock(&c->umount_mutex);
- if (!(c->vfs_sb->s_flags & MS_RDONLY)) {
+ if (!c->ro_mount) {
/*
* First of all kill the background thread to make sure it does
* not interfere with un-mounting and freeing resources.
@@ -1745,23 +1744,22 @@ static void ubifs_put_super(struct super_block *sb)
c->bgt = NULL;
}
- /* Synchronize write-buffers */
- if (c->jheads)
- for (i = 0; i < c->jhead_cnt; i++)
- ubifs_wbuf_sync(&c->jheads[i].wbuf);
-
/*
- * On fatal errors c->ro_media is set to 1, in which case we do
+ * On fatal errors c->ro_error is set to 1, in which case we do
* not write the master node.
*/
- if (!c->ro_media) {
+ if (!c->ro_error) {
+ int err;
+
+ /* Synchronize write-buffers */
+ for (i = 0; i < c->jhead_cnt; i++)
+ ubifs_wbuf_sync(&c->jheads[i].wbuf);
+
/*
* We are being cleanly unmounted which means the
* orphans were killed - indicate this in the master
* node. Also save the reserved GC LEB number.
*/
- int err;
-
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
@@ -1774,6 +1772,10 @@ static void ubifs_put_super(struct super_block *sb)
*/
ubifs_err("failed to write master node, "
"error %d", err);
+ } else {
+ for (i = 0; i < c->jhead_cnt; i++)
+ /* Make sure write-buffer timers are canceled */
+ hrtimer_cancel(&c->jheads[i].wbuf.timer);
}
}
@@ -1797,17 +1799,21 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
return err;
}
- if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+ if (c->ro_mount && !(*flags & MS_RDONLY)) {
+ if (c->ro_error) {
+ ubifs_msg("cannot re-mount R/W due to prior errors");
+ return -EROFS;
+ }
if (c->ro_media) {
- ubifs_msg("cannot re-mount due to prior errors");
+ ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
return -EROFS;
}
err = ubifs_remount_rw(c);
if (err)
return err;
- } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
- if (c->ro_media) {
- ubifs_msg("cannot re-mount due to prior errors");
+ } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+ if (c->ro_error) {
+ ubifs_msg("cannot re-mount R/O due to prior errors");
return -EROFS;
}
ubifs_remount_ro(c);
@@ -2049,8 +2055,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- ubifs_err("cannot open \"%s\", error %d",
- name, (int)PTR_ERR(ubi));
+ dbg_err("cannot open \"%s\", error %d",
+ name, (int)PTR_ERR(ubi));
return PTR_ERR(ubi);
}
ubi_get_volume_info(ubi, &vi);
@@ -2064,9 +2070,11 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
}
if (sb->s_root) {
+ struct ubifs_info *c1 = sb->s_fs_info;
+
/* A new mount point for already mounted UBIFS */
dbg_gen("this ubi volume is already mounted");
- if ((flags ^ sb->s_flags) & MS_RDONLY) {
+ if (!!(flags & MS_RDONLY) != c1->ro_mount) {
err = -EBUSY;
goto out_deact;
}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 2194915220e..ad9cf013362 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1177,6 +1177,7 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
unsigned long time = get_seconds();
dbg_tnc("search key %s", DBGKEY(key));
+ ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
znode = c->zroot.znode;
if (unlikely(!znode)) {
@@ -2966,7 +2967,7 @@ static struct ubifs_znode *right_znode(struct ubifs_info *c,
*
* This function searches an indexing node by its first key @key and its
* address @lnum:@offs. It looks up the indexing tree by pulling all indexing
- * nodes it traverses to TNC. This function is called fro indexing nodes which
+ * nodes it traverses to TNC. This function is called for indexing nodes which
* were found on the media by scanning, for example when garbage-collecting or
* when doing in-the-gaps commit. This means that the indexing node which is
* looked for does not have to have exactly the same leftmost key @key, because
@@ -2988,6 +2989,8 @@ static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
struct ubifs_znode *znode, *zn;
int n, nn;
+ ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
+
/*
* The arguments have probably been read off flash, so don't assume
* they are valid.
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 0c9876b396d..381d6b207a5 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -119,8 +119,12 @@
* in TNC. However, when replaying, it is handy to introduce fake "truncation"
* keys for truncation nodes because the code becomes simpler. So we define
* %UBIFS_TRUN_KEY type.
+ *
+ * But otherwise, out of the journal reply scope, the truncation keys are
+ * invalid.
*/
-#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
+#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
+#define UBIFS_INVALID_KEY UBIFS_KEY_TYPES_CNT
/*
* How much a directory entry/extended attribute entry adds to the parent/host
@@ -1028,6 +1032,8 @@ struct ubifs_debug_info;
* @max_leb_cnt: maximum count of logical eraseblocks
* @old_leb_cnt: count of logical eraseblocks before re-size
* @ro_media: the underlying UBI volume is read-only
+ * @ro_mount: the file-system was mounted as read-only
+ * @ro_error: UBIFS switched to R/O mode because an error happened
*
* @dirty_pg_cnt: number of dirty pages (not used)
* @dirty_zn_cnt: number of dirty znodes
@@ -1168,11 +1174,14 @@ struct ubifs_debug_info;
* @replay_sqnum: sequence number of node currently being replayed
* @need_recovery: file-system needs recovery
* @replaying: set to %1 during journal replay
- * @unclean_leb_list: LEBs to recover when mounting ro to rw
- * @rcvrd_mst_node: recovered master node to write when mounting ro to rw
+ * @unclean_leb_list: LEBs to recover when re-mounting R/O mounted FS to R/W
+ * mode
+ * @rcvrd_mst_node: recovered master node to write when re-mounting R/O mounted
+ * FS to R/W mode
* @size_tree: inode size information for recovery
- * @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY)
- * @always_chk_crc: always check CRCs (while mounting and remounting rw)
+ * @remounting_rw: set while re-mounting from R/O mode to R/W mode
+ * @always_chk_crc: always check CRCs (while mounting and remounting to R/W
+ * mode)
* @mount_opts: UBIFS-specific mount options
*
* @dbg: debugging-related information
@@ -1268,7 +1277,9 @@ struct ubifs_info {
int leb_cnt;
int max_leb_cnt;
int old_leb_cnt;
- int ro_media;
+ unsigned int ro_media:1;
+ unsigned int ro_mount:1;
+ unsigned int ro_error:1;
atomic_long_t dirty_pg_cnt;
atomic_long_t dirty_zn_cnt;
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index 0e0e99bd6bc..f8def3c8ea4 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,5 +1,6 @@
config UDF_FS
tristate "UDF file system support"
+ depends on BKL # needs serious work to remove
select CRC_ITU_T
help
This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 65412d84a45..76f3d6d97b4 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1880,6 +1880,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
struct kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
+ lock_kernel();
+
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
uopt.uid = -1;
uopt.gid = -1;
@@ -1888,8 +1890,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
uopt.dmode = UDF_INVALID_MODE;
sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
- if (!sbi)
+ if (!sbi) {
+ unlock_kernel();
return -ENOMEM;
+ }
sb->s_fs_info = sbi;
@@ -2035,6 +2039,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
+ unlock_kernel();
return 0;
error_out:
@@ -2055,6 +2060,7 @@ error_out:
kfree(sbi);
sb->s_fs_info = NULL;
+ unlock_kernel();
return -EINVAL;
}
diff --git a/fs/ufs/Kconfig b/fs/ufs/Kconfig
index e4f10a40768..30c8f223253 100644
--- a/fs/ufs/Kconfig
+++ b/fs/ufs/Kconfig
@@ -1,6 +1,7 @@
config UFS_FS
tristate "UFS file system support (read only)"
depends on BLOCK
+ depends on BKL # probably fixable
help
BSD and derivate versions of Unix (such as SunOS, FreeBSD, NetBSD,
OpenBSD and NeXTstep) use a file system called UFS. Some System V
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index d510c1b9181..6b9be90dae7 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -696,6 +696,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
unsigned maxsymlen;
int ret = -EINVAL;
+ lock_kernel();
+
uspi = NULL;
ubh = NULL;
flags = 0;
@@ -1163,6 +1165,7 @@ magic_found:
goto failed;
UFSD("EXIT\n");
+ unlock_kernel();
return 0;
dalloc_failed:
@@ -1174,10 +1177,12 @@ failed:
kfree(sbi);
sb->s_fs_info = NULL;
UFSD("EXIT (FAILED)\n");
+ unlock_kernel();
return ret;
failed_nomem:
UFSD("EXIT (NOMEM)\n");
+ unlock_kernel();
return -ENOMEM;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 286e36e21da..ba5312802aa 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -188,8 +188,8 @@ _xfs_buf_initialize(
atomic_set(&bp->b_hold, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_list);
- INIT_LIST_HEAD(&bp->b_hash_list);
- init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
+ RB_CLEAR_NODE(&bp->b_rbnode);
+ sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
bp->b_file_offset = range_base;
@@ -262,8 +262,6 @@ xfs_buf_free(
{
trace_xfs_buf_free(bp, _RET_IP_);
- ASSERT(list_empty(&bp->b_hash_list));
-
if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
uint i;
@@ -422,8 +420,10 @@ _xfs_buf_find(
{
xfs_off_t range_base;
size_t range_length;
- xfs_bufhash_t *hash;
- xfs_buf_t *bp, *n;
+ struct xfs_perag *pag;
+ struct rb_node **rbp;
+ struct rb_node *parent;
+ xfs_buf_t *bp;
range_base = (ioff << BBSHIFT);
range_length = (isize << BBSHIFT);
@@ -432,14 +432,37 @@ _xfs_buf_find(
ASSERT(!(range_length < (1 << btp->bt_sshift)));
ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
- hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
-
- spin_lock(&hash->bh_lock);
-
- list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
- ASSERT(btp == bp->b_target);
- if (bp->b_file_offset == range_base &&
- bp->b_buffer_length == range_length) {
+ /* get tree root */
+ pag = xfs_perag_get(btp->bt_mount,
+ xfs_daddr_to_agno(btp->bt_mount, ioff));
+
+ /* walk tree */
+ spin_lock(&pag->pag_buf_lock);
+ rbp = &pag->pag_buf_tree.rb_node;
+ parent = NULL;
+ bp = NULL;
+ while (*rbp) {
+ parent = *rbp;
+ bp = rb_entry(parent, struct xfs_buf, b_rbnode);
+
+ if (range_base < bp->b_file_offset)
+ rbp = &(*rbp)->rb_left;
+ else if (range_base > bp->b_file_offset)
+ rbp = &(*rbp)->rb_right;
+ else {
+ /*
+ * found a block offset match. If the range doesn't
+ * match, the only way this is allowed is if the buffer
+ * in the cache is stale and the transaction that made
+ * it stale has not yet committed. i.e. we are
+ * reallocating a busy extent. Skip this buffer and
+ * continue searching to the right for an exact match.
+ */
+ if (bp->b_buffer_length != range_length) {
+ ASSERT(bp->b_flags & XBF_STALE);
+ rbp = &(*rbp)->rb_right;
+ continue;
+ }
atomic_inc(&bp->b_hold);
goto found;
}
@@ -449,17 +472,21 @@ _xfs_buf_find(
if (new_bp) {
_xfs_buf_initialize(new_bp, btp, range_base,
range_length, flags);
- new_bp->b_hash = hash;
- list_add(&new_bp->b_hash_list, &hash->bh_list);
+ rb_link_node(&new_bp->b_rbnode, parent, rbp);
+ rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
+ /* the buffer keeps the perag reference until it is freed */
+ new_bp->b_pag = pag;
+ spin_unlock(&pag->pag_buf_lock);
} else {
XFS_STATS_INC(xb_miss_locked);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
}
-
- spin_unlock(&hash->bh_lock);
return new_bp;
found:
- spin_unlock(&hash->bh_lock);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
/* Attempt to get the semaphore without sleeping,
* if this does not work then we need to drop the
@@ -625,8 +652,7 @@ void
xfs_buf_readahead(
xfs_buftarg_t *target,
xfs_off_t ioff,
- size_t isize,
- xfs_buf_flags_t flags)
+ size_t isize)
{
struct backing_dev_info *bdi;
@@ -634,8 +660,42 @@ xfs_buf_readahead(
if (bdi_read_congested(bdi))
return;
- flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
- xfs_buf_read(target, ioff, isize, flags);
+ xfs_buf_read(target, ioff, isize,
+ XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
+}
+
+/*
+ * Read an uncached buffer from disk. Allocates and returns a locked
+ * buffer containing the disk contents or nothing.
+ */
+struct xfs_buf *
+xfs_buf_read_uncached(
+ struct xfs_mount *mp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t daddr,
+ size_t length,
+ int flags)
+{
+ xfs_buf_t *bp;
+ int error;
+
+ bp = xfs_buf_get_uncached(target, length, flags);
+ if (!bp)
+ return NULL;
+
+ /* set up the buffer for a read IO */
+ xfs_buf_lock(bp);
+ XFS_BUF_SET_ADDR(bp, daddr);
+ XFS_BUF_READ(bp);
+ XFS_BUF_BUSY(bp);
+
+ xfsbdstrat(mp, bp);
+ error = xfs_buf_iowait(bp);
+ if (error || bp->b_error) {
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+ return bp;
}
xfs_buf_t *
@@ -707,9 +767,10 @@ xfs_buf_associate_memory(
}
xfs_buf_t *
-xfs_buf_get_noaddr(
+xfs_buf_get_uncached(
+ struct xfs_buftarg *target,
size_t len,
- xfs_buftarg_t *target)
+ int flags)
{
unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
int error, i;
@@ -725,7 +786,7 @@ xfs_buf_get_noaddr(
goto fail_free_buf;
for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(GFP_KERNEL);
+ bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
if (!bp->b_pages[i])
goto fail_free_mem;
}
@@ -740,7 +801,7 @@ xfs_buf_get_noaddr(
xfs_buf_unlock(bp);
- trace_xfs_buf_get_noaddr(bp, _RET_IP_);
+ trace_xfs_buf_get_uncached(bp, _RET_IP_);
return bp;
fail_free_mem:
@@ -774,29 +835,30 @@ void
xfs_buf_rele(
xfs_buf_t *bp)
{
- xfs_bufhash_t *hash = bp->b_hash;
+ struct xfs_perag *pag = bp->b_pag;
trace_xfs_buf_rele(bp, _RET_IP_);
- if (unlikely(!hash)) {
+ if (!pag) {
ASSERT(!bp->b_relse);
+ ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
if (atomic_dec_and_test(&bp->b_hold))
xfs_buf_free(bp);
return;
}
+ ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
ASSERT(atomic_read(&bp->b_hold) > 0);
- if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
+ if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
if (bp->b_relse) {
atomic_inc(&bp->b_hold);
- spin_unlock(&hash->bh_lock);
- (*(bp->b_relse)) (bp);
- } else if (bp->b_flags & XBF_FS_MANAGED) {
- spin_unlock(&hash->bh_lock);
+ spin_unlock(&pag->pag_buf_lock);
+ bp->b_relse(bp);
} else {
ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
- list_del_init(&bp->b_hash_list);
- spin_unlock(&hash->bh_lock);
+ rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
xfs_buf_free(bp);
}
}
@@ -859,7 +921,7 @@ xfs_buf_lock(
trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
- xfs_log_force(bp->b_mount, 0);
+ xfs_log_force(bp->b_target->bt_mount, 0);
if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(bp->b_target->bt_mapping);
down(&bp->b_sema);
@@ -924,19 +986,7 @@ xfs_buf_iodone_work(
xfs_buf_t *bp =
container_of(work, xfs_buf_t, b_iodone_work);
- /*
- * We can get an EOPNOTSUPP to ordered writes. Here we clear the
- * ordered flag and reissue them. Because we can't tell the higher
- * layers directly that they should not issue ordered I/O anymore, they
- * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
- */
- if ((bp->b_error == EOPNOTSUPP) &&
- (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
- trace_xfs_buf_ordered_retry(bp, _RET_IP_);
- bp->b_flags &= ~XBF_ORDERED;
- bp->b_flags |= _XFS_BARRIER_FAILED;
- xfs_buf_iorequest(bp);
- } else if (bp->b_iodone)
+ if (bp->b_iodone)
(*(bp->b_iodone))(bp);
else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
@@ -982,7 +1032,6 @@ xfs_bwrite(
{
int error;
- bp->b_mount = mp;
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
@@ -1003,8 +1052,6 @@ xfs_bdwrite(
{
trace_xfs_buf_bdwrite(bp, _RET_IP_);
- bp->b_mount = mp;
-
bp->b_flags &= ~XBF_READ;
bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
@@ -1013,7 +1060,7 @@ xfs_bdwrite(
/*
* Called when we want to stop a buffer from getting written or read.
- * We attach the EIO error, muck with its flags, and call biodone
+ * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
* so that the proper iodone callbacks get called.
*/
STATIC int
@@ -1030,21 +1077,21 @@ xfs_bioerror(
XFS_BUF_ERROR(bp, EIO);
/*
- * We're calling biodone, so delete XBF_DONE flag.
+ * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
*/
XFS_BUF_UNREAD(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
return EIO;
}
/*
* Same as xfs_bioerror, except that we are releasing the buffer
- * here ourselves, and avoiding the biodone call.
+ * here ourselves, and avoiding the xfs_buf_ioend call.
* This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors.
*/
@@ -1093,7 +1140,7 @@ int
xfs_bdstrat_cb(
struct xfs_buf *bp)
{
- if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
+ if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
trace_xfs_bdstrat_shut(bp, _RET_IP_);
/*
* Metadata write that didn't get logged but
@@ -1195,7 +1242,7 @@ _xfs_buf_ioapply(
if (bp->b_flags & XBF_ORDERED) {
ASSERT(!(bp->b_flags & XBF_READ));
- rw = WRITE_BARRIER;
+ rw = WRITE_FLUSH_FUA;
} else if (bp->b_flags & XBF_LOG_BUFFER) {
ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
bp->b_flags &= ~_XBF_RUN_QUEUES;
@@ -1399,62 +1446,24 @@ xfs_buf_iomove(
*/
void
xfs_wait_buftarg(
- xfs_buftarg_t *btp)
-{
- xfs_buf_t *bp, *n;
- xfs_bufhash_t *hash;
- uint i;
-
- for (i = 0; i < (1 << btp->bt_hashshift); i++) {
- hash = &btp->bt_hash[i];
-again:
- spin_lock(&hash->bh_lock);
- list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
- ASSERT(btp == bp->b_target);
- if (!(bp->b_flags & XBF_FS_MANAGED)) {
- spin_unlock(&hash->bh_lock);
- /*
- * Catch superblock reference count leaks
- * immediately
- */
- BUG_ON(bp->b_bn == 0);
- delay(100);
- goto again;
- }
- }
- spin_unlock(&hash->bh_lock);
- }
-}
-
-/*
- * Allocate buffer hash table for a given target.
- * For devices containing metadata (i.e. not the log/realtime devices)
- * we need to allocate a much larger hash table.
- */
-STATIC void
-xfs_alloc_bufhash(
- xfs_buftarg_t *btp,
- int external)
+ struct xfs_buftarg *btp)
{
- unsigned int i;
+ struct xfs_perag *pag;
+ uint i;
- btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */
- btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
- sizeof(xfs_bufhash_t));
- for (i = 0; i < (1 << btp->bt_hashshift); i++) {
- spin_lock_init(&btp->bt_hash[i].bh_lock);
- INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
+ for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
+ pag = xfs_perag_get(btp->bt_mount, i);
+ spin_lock(&pag->pag_buf_lock);
+ while (rb_first(&pag->pag_buf_tree)) {
+ spin_unlock(&pag->pag_buf_lock);
+ delay(100);
+ spin_lock(&pag->pag_buf_lock);
+ }
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
}
}
-STATIC void
-xfs_free_bufhash(
- xfs_buftarg_t *btp)
-{
- kmem_free_large(btp->bt_hash);
- btp->bt_hash = NULL;
-}
-
/*
* buftarg list for delwrite queue processing
*/
@@ -1487,7 +1496,6 @@ xfs_free_buftarg(
xfs_flush_buftarg(btp, 1);
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_blkdev_issue_flush(btp);
- xfs_free_bufhash(btp);
iput(btp->bt_mapping->host);
/* Unregister the buftarg first so that we don't get a
@@ -1609,6 +1617,7 @@ out_error:
xfs_buftarg_t *
xfs_alloc_buftarg(
+ struct xfs_mount *mp,
struct block_device *bdev,
int external,
const char *fsname)
@@ -1617,6 +1626,7 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+ btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
if (xfs_setsize_buftarg_early(btp, bdev))
@@ -1625,7 +1635,6 @@ xfs_alloc_buftarg(
goto error;
if (xfs_alloc_delwrite_queue(btp, fsname))
goto error;
- xfs_alloc_bufhash(btp, external);
return btp;
error:
@@ -1916,7 +1925,7 @@ xfs_flush_buftarg(
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
- xfs_iowait(bp);
+ xfs_buf_iowait(bp);
xfs_buf_relse(bp);
}
}
@@ -1933,7 +1942,7 @@ xfs_buf_init(void)
goto out;
xfslogd_workqueue = alloc_workqueue("xfslogd",
- WQ_RESCUER | WQ_HIGHPRI, 1);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
if (!xfslogd_workqueue)
goto out_free_buf_zone;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 2a05614f0b9..383a3f37cf9 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -51,7 +51,6 @@ typedef enum {
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
-#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
#define XBF_ORDERED (1 << 11)/* use ordered writes */
#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
@@ -86,14 +85,6 @@ typedef enum {
*/
#define _XBF_PAGE_LOCKED (1 << 22)
-/*
- * If we try a barrier write, but it fails we have to communicate
- * this to the upper layers. Unfortunately b_error gets overwritten
- * when the buffer is re-issued so we have to add another flag to
- * keep this information.
- */
-#define _XFS_BARRIER_FAILED (1 << 23)
-
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
@@ -104,7 +95,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \
- { XBF_FS_MANAGED, "FS_MANAGED" }, \
{ XBF_ORDERED, "ORDERED" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\
@@ -114,8 +104,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
- { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }, \
- { _XFS_BARRIER_FAILED, "BARRIER_FAILED" }
+ { _XBF_PAGE_LOCKED, "PAGE_LOCKED" }
typedef enum {
@@ -132,14 +121,11 @@ typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
struct address_space *bt_mapping;
+ struct xfs_mount *bt_mount;
unsigned int bt_bsize;
unsigned int bt_sshift;
size_t bt_smask;
- /* per device buffer hash table */
- uint bt_hashshift;
- xfs_bufhash_t *bt_hash;
-
/* per device delwri queue */
struct task_struct *bt_task;
struct list_head bt_list;
@@ -167,34 +153,41 @@ typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
#define XB_PAGES 2
typedef struct xfs_buf {
+ /*
+ * first cacheline holds all the fields needed for an uncontended cache
+ * hit to be fully processed. The semaphore straddles the cacheline
+ * boundary, but the counter and lock sits on the first cacheline,
+ * which is the only bit that is touched if we hit the semaphore
+ * fast-path on locking.
+ */
+ struct rb_node b_rbnode; /* rbtree node */
+ xfs_off_t b_file_offset; /* offset in file */
+ size_t b_buffer_length;/* size of buffer in bytes */
+ atomic_t b_hold; /* reference count */
+ xfs_buf_flags_t b_flags; /* status flags */
struct semaphore b_sema; /* semaphore for lockables */
- unsigned long b_queuetime; /* time buffer was queued */
- atomic_t b_pin_count; /* pin count */
+
wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head b_list;
- xfs_buf_flags_t b_flags; /* status flags */
- struct list_head b_hash_list; /* hash table list */
- xfs_bufhash_t *b_hash; /* hash table list start */
+ struct xfs_perag *b_pag; /* contains rbtree root */
xfs_buftarg_t *b_target; /* buffer target (device) */
- atomic_t b_hold; /* reference count */
xfs_daddr_t b_bn; /* block number for I/O */
- xfs_off_t b_file_offset; /* offset in file */
- size_t b_buffer_length;/* size of buffer in bytes */
size_t b_count_desired;/* desired transfer size */
void *b_addr; /* virtual address of buffer */
struct work_struct b_iodone_work;
- atomic_t b_io_remaining; /* #outstanding I/O requests */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */
struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
void *b_fspriv2;
- struct xfs_mount *b_mount;
- unsigned short b_error; /* error code on I/O */
- unsigned int b_page_count; /* size of page array */
- unsigned int b_offset; /* page offset in first page */
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
+ unsigned long b_queuetime; /* time buffer was queued */
+ atomic_t b_pin_count; /* pin count */
+ atomic_t b_io_remaining; /* #outstanding I/O requests */
+ unsigned int b_page_count; /* size of page array */
+ unsigned int b_offset; /* page offset in first page */
+ unsigned short b_error; /* error code on I/O */
#ifdef XFS_BUF_LOCK_TRACKING
int b_last_holder;
#endif
@@ -213,11 +206,13 @@ extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
-extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
+extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int);
extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
extern void xfs_buf_hold(xfs_buf_t *);
-extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
- xfs_buf_flags_t);
+extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t);
+struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t daddr, size_t length, int flags);
/* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *);
@@ -242,6 +237,8 @@ extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
+#define xfs_buf_zero(bp, off, len) \
+ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
@@ -276,8 +273,6 @@ extern void xfs_buf_terminate(void);
XFS_BUF_DONE(bp); \
} while (0)
-#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
-
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
@@ -356,25 +351,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
xfs_buf_rele(bp);
}
-#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
-
-#define xfs_biomove(bp, off, len, data, rw) \
- xfs_buf_iomove((bp), (off), (len), (data), \
- ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
-
-#define xfs_biozero(bp, off, len) \
- xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-
-#define xfs_iowait(bp) xfs_buf_iowait(bp)
-
-#define xfs_baread(target, rablkno, ralen) \
- xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
-
-
/*
* Handling of buftargs.
*/
-extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int, const char *);
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
+ struct block_device *, int, const char *);
extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h
deleted file mode 100644
index 55bddf3b609..00000000000
--- a/fs/xfs/linux-2.6/xfs_cred.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_CRED_H__
-#define __XFS_CRED_H__
-
-#include <linux/capability.h>
-
-/*
- * Credentials
- */
-typedef const struct cred cred_t;
-
-#endif /* __XFS_CRED_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 1f279b012f9..ed88ed16811 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -32,10 +32,9 @@ xfs_tosspages(
xfs_off_t last,
int fiopt)
{
- struct address_space *mapping = VFS_I(ip)->i_mapping;
-
- if (mapping->nrpages)
- truncate_inode_pages(mapping, first);
+ /* can't toss partial tail pages, so mask them out */
+ last &= ~(PAGE_SIZE - 1);
+ truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1);
}
int
@@ -50,12 +49,11 @@ xfs_flushinval_pages(
trace_xfs_pagecache_inval(ip, first, last);
- if (mapping->nrpages) {
- xfs_iflags_clear(ip, XFS_ITRUNCATED);
- ret = filemap_write_and_wait(mapping);
- if (!ret)
- truncate_inode_pages(mapping, first);
- }
+ xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ ret = filemap_write_and_wait_range(mapping, first,
+ last == -1 ? LLONG_MAX : last);
+ if (!ret)
+ truncate_inode_pages_range(mapping, first, last);
return -ret;
}
@@ -71,10 +69,9 @@ xfs_flush_pages(
int ret = 0;
int ret2;
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- xfs_iflags_clear(ip, XFS_ITRUNCATED);
- ret = -filemap_fdatawrite(mapping);
- }
+ xfs_iflags_clear(ip, XFS_ITRUNCATED);
+ ret = -filemap_fdatawrite_range(mapping, first,
+ last == -1 ? LLONG_MAX : last);
if (flags & XBF_ASYNC)
return ret;
ret2 = xfs_wait_on_pages(ip, first, last);
@@ -91,7 +88,9 @@ xfs_wait_on_pages(
{
struct address_space *mapping = VFS_I(ip)->i_mapping;
- if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
- return -filemap_fdatawait(mapping);
+ if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
+ return -filemap_fdatawait_range(mapping, first,
+ last == -1 ? ip->i_size - 1 : last);
+ }
return 0;
}
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
index 2ae8b1ccb02..76e81cff70b 100644
--- a/fs/xfs/linux-2.6/xfs_globals.c
+++ b/fs/xfs/linux-2.6/xfs_globals.c
@@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_cred.h"
#include "xfs_sysctl.h"
/*
diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h
deleted file mode 100644
index 69f71caf061..00000000000
--- a/fs/xfs/linux-2.6/xfs_globals.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_GLOBALS_H__
-#define __XFS_GLOBALS_H__
-
-extern uint64_t xfs_panic_mask; /* set to cause more panics */
-
-#endif /* __XFS_GLOBALS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 3b9e626f7cd..2ea238f6d38 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -790,7 +790,7 @@ xfs_ioc_fsgetxattr(
xfs_ilock(ip, XFS_ILOCK_SHARED);
fa.fsx_xflags = xfs_ip2xflags(ip);
fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
- fa.fsx_projid = ip->i_d.di_projid;
+ fa.fsx_projid = xfs_get_projid(ip);
if (attr) {
if (ip->i_afp) {
@@ -909,10 +909,10 @@ xfs_ioctl_setattr(
return XFS_ERROR(EIO);
/*
- * Disallow 32bit project ids because on-disk structure
- * is 16bit only.
+ * Disallow 32bit project ids when projid32bit feature is not enabled.
*/
- if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1))
+ if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
+ !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
return XFS_ERROR(EINVAL);
/*
@@ -961,7 +961,7 @@ xfs_ioctl_setattr(
if (mask & FSX_PROJID) {
if (XFS_IS_QUOTA_RUNNING(mp) &&
XFS_IS_PQUOTA_ON(mp) &&
- ip->i_d.di_projid != fa->fsx_projid) {
+ xfs_get_projid(ip) != fa->fsx_projid) {
ASSERT(tp);
code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
@@ -1063,12 +1063,12 @@ xfs_ioctl_setattr(
* Change the ownerships and register quota modifications
* in the transaction.
*/
- if (ip->i_d.di_projid != fa->fsx_projid) {
+ if (xfs_get_projid(ip) != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
&ip->i_gdquot, gdqp);
}
- ip->i_d.di_projid = fa->fsx_projid;
+ xfs_set_projid(ip, fa->fsx_projid);
/*
* We may have to rev the inode as well as
@@ -1088,8 +1088,8 @@ xfs_ioctl_setattr(
xfs_diflags_to_linux(ip);
}
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
XFS_STATS_INC(xs_ig_attrchg);
@@ -1301,7 +1301,8 @@ xfs_file_ioctl(
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64:
case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64: {
+ case XFS_IOC_UNRESVSP64:
+ case XFS_IOC_ZERO_RANGE: {
xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf)))
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 6c83f7f62dc..b3486dfa552 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -164,7 +164,8 @@ xfs_ioctl32_bstat_copyin(
get_user(bstat->bs_extsize, &bstat32->bs_extsize) ||
get_user(bstat->bs_extents, &bstat32->bs_extents) ||
get_user(bstat->bs_gen, &bstat32->bs_gen) ||
- get_user(bstat->bs_projid, &bstat32->bs_projid) ||
+ get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) ||
+ get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) ||
get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) ||
get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) ||
get_user(bstat->bs_aextents, &bstat32->bs_aextents))
@@ -218,6 +219,7 @@ xfs_bulkstat_one_fmt_compat(
put_user(buffer->bs_extents, &p32->bs_extents) ||
put_user(buffer->bs_gen, &p32->bs_gen) ||
put_user(buffer->bs_projid, &p32->bs_projid) ||
+ put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) ||
put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) ||
put_user(buffer->bs_dmstate, &p32->bs_dmstate) ||
put_user(buffer->bs_aextents, &p32->bs_aextents))
@@ -574,6 +576,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_FSGEOMETRY_V1:
case XFS_IOC_FSGROWFSDATA:
case XFS_IOC_FSGROWFSRT:
+ case XFS_IOC_ZERO_RANGE:
return xfs_file_ioctl(filp, cmd, p);
#else
case XFS_IOC_ALLOCSP_32:
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h
index 1024c4f8ba0..08b605792a9 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.h
@@ -65,8 +65,10 @@ typedef struct compat_xfs_bstat {
__s32 bs_extsize; /* extent size */
__s32 bs_extents; /* number of extents */
__u32 bs_gen; /* generation count */
- __u16 bs_projid; /* project id */
- unsigned char bs_pad[14]; /* pad space, unused */
+ __u16 bs_projid_lo; /* lower part of project id */
+#define bs_projid bs_projid_lo /* (previously just bs_projid) */
+ __u16 bs_projid_hi; /* high part of project id */
+ unsigned char bs_pad[12]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index b1fc2a6bfe8..ec858e09d54 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -95,41 +95,6 @@ xfs_mark_inode_dirty(
}
/*
- * Change the requested timestamp in the given inode.
- * We don't lock across timestamp updates, and we don't log them but
- * we do record the fact that there is dirty information in core.
- */
-void
-xfs_ichgtime(
- xfs_inode_t *ip,
- int flags)
-{
- struct inode *inode = VFS_I(ip);
- timespec_t tv;
- int sync_it = 0;
-
- tv = current_fs_time(inode->i_sb);
-
- if ((flags & XFS_ICHGTIME_MOD) &&
- !timespec_equal(&inode->i_mtime, &tv)) {
- inode->i_mtime = tv;
- sync_it = 1;
- }
- if ((flags & XFS_ICHGTIME_CHG) &&
- !timespec_equal(&inode->i_ctime, &tv)) {
- inode->i_ctime = tv;
- sync_it = 1;
- }
-
- /*
- * Update complete - now make sure everyone knows that the inode
- * is dirty.
- */
- if (sync_it)
- xfs_mark_inode_dirty_sync(ip);
-}
-
-/*
* Hook in SELinux. This is not quite correct yet, what we really need
* here (as we do for default ACLs) is a mechanism by which creation of
* these attrs can be journalled at inode creation time (along with the
@@ -224,7 +189,7 @@ xfs_vn_mknod(
}
xfs_dentry_to_name(&name, dentry);
- error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip, NULL);
+ error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
if (unlikely(error))
goto out_free_acl;
@@ -397,7 +362,7 @@ xfs_vn_symlink(
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
xfs_dentry_to_name(&name, dentry);
- error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip, NULL);
+ error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
if (unlikely(error))
goto out;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 2fa0bd9ebc7..214ddd71ff7 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -71,6 +71,7 @@
#include <linux/random.h>
#include <linux/ctype.h>
#include <linux/writeback.h>
+#include <linux/capability.h>
#include <asm/page.h>
#include <asm/div64.h>
@@ -79,14 +80,12 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
-#include <xfs_cred.h>
#include <xfs_vnode.h>
#include <xfs_stats.h>
#include <xfs_sysctl.h>
#include <xfs_iops.h>
#include <xfs_aops.h>
#include <xfs_super.h>
-#include <xfs_globals.h>
#include <xfs_buf.h>
/*
@@ -144,7 +143,7 @@
#define SYNCHRONIZE() barrier()
#define __return_address __builtin_return_address(0)
-#define dfltprid 0
+#define XFS_PROJID_DEFAULT 0
#define MAXPATHLEN 1024
#define MIN(a,b) (min(a,b))
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index a4e07974955..ab31ce5aeaf 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -44,7 +44,6 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_vnodeops.h"
-#include "xfs_version.h"
#include "xfs_log_priv.h"
#include "xfs_trans_priv.h"
#include "xfs_filestream.h"
@@ -645,7 +644,7 @@ xfs_barrier_test(
XFS_BUF_ORDERED(sbp);
xfsbdstrat(mp, sbp);
- error = xfs_iowait(sbp);
+ error = xfs_buf_iowait(sbp);
/*
* Clear all the flags we set and possible error state in the
@@ -693,8 +692,7 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
- BLKDEV_IFL_WAIT);
+ blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
}
STATIC void
@@ -758,18 +756,20 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0, mp->m_fsname);
+ mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1, mp->m_fsname);
+ mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
+ mp->m_fsname);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1, mp->m_fsname);
+ mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
+ mp->m_fsname);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
@@ -972,12 +972,7 @@ xfs_fs_inode_init_once(
/*
* Dirty the XFS inode when mark_inode_dirty_sync() is called so that
- * we catch unlogged VFS level updates to the inode. Care must be taken
- * here - the transaction code calls mark_inode_dirty_sync() to mark the
- * VFS inode dirty in a transaction and clears the i_update_core field;
- * it must clear the field after calling mark_inode_dirty_sync() to
- * correctly indicate that the dirty state has been propagated into the
- * inode log item.
+ * we catch unlogged VFS level updates to the inode.
*
* We need the barrier() to maintain correct ordering between unlogged
* updates and the transaction commit code that clears the i_update_core
@@ -1521,8 +1516,9 @@ xfs_fs_fill_super(
if (error)
goto out_free_fsname;
- if (xfs_icsb_init_counters(mp))
- mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
+ error = xfs_icsb_init_counters(mp);
+ if (error)
+ goto out_close_devices;
error = xfs_readsb(mp, flags);
if (error)
@@ -1583,6 +1579,7 @@ xfs_fs_fill_super(
xfs_freesb(mp);
out_destroy_counters:
xfs_icsb_destroy_counters(mp);
+ out_close_devices:
xfs_close_devices(mp);
out_free_fsname:
xfs_free_fsname(mp);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 1ef4a4d2d99..50a3266c999 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -62,6 +62,7 @@ extern void xfs_qm_exit(void);
# define XFS_DBG_STRING "no debug"
#endif
+#define XFS_VERSION_STRING "SGI XFS"
#define XFS_BUILD_OPTIONS XFS_ACL_STRING \
XFS_SECURITY_STRING \
XFS_REALTIME_STRING \
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 81976ffed7d..37d33254981 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -39,42 +39,39 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+/*
+ * The inode lookup is done in batches to keep the amount of lock traffic and
+ * radix tree lookups to a minimum. The batch size is a trade off between
+ * lookup reduction and stack usage. This is in the reclaim path, so we can't
+ * be too greedy.
+ */
+#define XFS_LOOKUP_BATCH 32
-STATIC xfs_inode_t *
-xfs_inode_ag_lookup(
- struct xfs_mount *mp,
- struct xfs_perag *pag,
- uint32_t *first_index,
- int tag)
+STATIC int
+xfs_inode_ag_walk_grab(
+ struct xfs_inode *ip)
{
- int nr_found;
- struct xfs_inode *ip;
+ struct inode *inode = VFS_I(ip);
- /*
- * use a gang lookup to find the next inode in the tree
- * as the tree is sparse and a gang lookup walks to find
- * the number of objects requested.
- */
- if (tag == XFS_ICI_NO_TAG) {
- nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
- (void **)&ip, *first_index, 1);
- } else {
- nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
- (void **)&ip, *first_index, 1, tag);
+ /* nothing to sync during shutdown */
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return EFSCORRUPTED;
+
+ /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+ if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ return ENOENT;
+
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ return ENOENT;
+
+ if (is_bad_inode(inode)) {
+ IRELE(ip);
+ return ENOENT;
}
- if (!nr_found)
- return NULL;
- /*
- * Update the index for the next lookup. Catch overflows
- * into the next AG range which can occur if we have inodes
- * in the last block of the AG and we are currently
- * pointing to the last inode.
- */
- *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
- if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- return NULL;
- return ip;
+ /* inode is valid */
+ return 0;
}
STATIC int
@@ -83,49 +80,75 @@ xfs_inode_ag_walk(
struct xfs_perag *pag,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
- int flags,
- int tag,
- int exclusive,
- int *nr_to_scan)
+ int flags)
{
uint32_t first_index;
int last_error = 0;
int skipped;
+ int done;
+ int nr_found;
restart:
+ done = 0;
skipped = 0;
first_index = 0;
+ nr_found = 0;
do {
+ struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int error = 0;
- xfs_inode_t *ip;
+ int i;
- if (exclusive)
- write_lock(&pag->pag_ici_lock);
- else
- read_lock(&pag->pag_ici_lock);
- ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
- if (!ip) {
- if (exclusive)
- write_unlock(&pag->pag_ici_lock);
- else
- read_unlock(&pag->pag_ici_lock);
+ read_lock(&pag->pag_ici_lock);
+ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
+ (void **)batch, first_index,
+ XFS_LOOKUP_BATCH);
+ if (!nr_found) {
+ read_unlock(&pag->pag_ici_lock);
break;
}
- /* execute releases pag->pag_ici_lock */
- error = execute(ip, pag, flags);
- if (error == EAGAIN) {
- skipped++;
- continue;
+ /*
+ * Grab the inodes before we drop the lock. if we found
+ * nothing, nr == 0 and the loop will be skipped.
+ */
+ for (i = 0; i < nr_found; i++) {
+ struct xfs_inode *ip = batch[i];
+
+ if (done || xfs_inode_ag_walk_grab(ip))
+ batch[i] = NULL;
+
+ /*
+ * Update the index for the next lookup. Catch overflows
+ * into the next AG range which can occur if we have inodes
+ * in the last block of the AG and we are currently
+ * pointing to the last inode.
+ */
+ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
+ if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
+ done = 1;
+ }
+
+ /* unlock now we've grabbed the inodes. */
+ read_unlock(&pag->pag_ici_lock);
+
+ for (i = 0; i < nr_found; i++) {
+ if (!batch[i])
+ continue;
+ error = execute(batch[i], pag, flags);
+ IRELE(batch[i]);
+ if (error == EAGAIN) {
+ skipped++;
+ continue;
+ }
+ if (error && last_error != EFSCORRUPTED)
+ last_error = error;
}
- if (error)
- last_error = error;
/* bail out if the filesystem is corrupted. */
if (error == EFSCORRUPTED)
break;
- } while ((*nr_to_scan)--);
+ } while (nr_found && !done);
if (skipped) {
delay(1);
@@ -134,110 +157,32 @@ restart:
return last_error;
}
-/*
- * Select the next per-ag structure to iterate during the walk. The reclaim
- * walk is optimised only to walk AGs with reclaimable inodes in them.
- */
-static struct xfs_perag *
-xfs_inode_ag_iter_next_pag(
- struct xfs_mount *mp,
- xfs_agnumber_t *first,
- int tag)
-{
- struct xfs_perag *pag = NULL;
-
- if (tag == XFS_ICI_RECLAIM_TAG) {
- int found;
- int ref;
-
- spin_lock(&mp->m_perag_lock);
- found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
- (void **)&pag, *first, 1, tag);
- if (found <= 0) {
- spin_unlock(&mp->m_perag_lock);
- return NULL;
- }
- *first = pag->pag_agno + 1;
- /* open coded pag reference increment */
- ref = atomic_inc_return(&pag->pag_ref);
- spin_unlock(&mp->m_perag_lock);
- trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
- } else {
- pag = xfs_perag_get(mp, *first);
- (*first)++;
- }
- return pag;
-}
-
int
xfs_inode_ag_iterator(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
- int flags,
- int tag,
- int exclusive,
- int *nr_to_scan)
+ int flags)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
- int nr;
- nr = nr_to_scan ? *nr_to_scan : INT_MAX;
ag = 0;
- while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
- error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
- exclusive, &nr);
+ while ((pag = xfs_perag_get(mp, ag))) {
+ ag = pag->pag_agno + 1;
+ error = xfs_inode_ag_walk(mp, pag, execute, flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
break;
}
- if (nr <= 0)
- break;
}
- if (nr_to_scan)
- *nr_to_scan = nr;
return XFS_ERROR(last_error);
}
-/* must be called with pag_ici_lock held and releases it */
-int
-xfs_sync_inode_valid(
- struct xfs_inode *ip,
- struct xfs_perag *pag)
-{
- struct inode *inode = VFS_I(ip);
- int error = EFSCORRUPTED;
-
- /* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- goto out_unlock;
-
- /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- error = ENOENT;
- if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
- goto out_unlock;
-
- /* If we can't grab the inode, it must on it's way to reclaim. */
- if (!igrab(inode))
- goto out_unlock;
-
- if (is_bad_inode(inode)) {
- IRELE(ip);
- goto out_unlock;
- }
-
- /* inode is valid */
- error = 0;
-out_unlock:
- read_unlock(&pag->pag_ici_lock);
- return error;
-}
-
STATIC int
xfs_sync_inode_data(
struct xfs_inode *ip,
@@ -248,10 +193,6 @@ xfs_sync_inode_data(
struct address_space *mapping = inode->i_mapping;
int error = 0;
- error = xfs_sync_inode_valid(ip, pag);
- if (error)
- return error;
-
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
goto out_wait;
@@ -268,7 +209,6 @@ xfs_sync_inode_data(
out_wait:
if (flags & SYNC_WAIT)
xfs_ioend_wait(ip);
- IRELE(ip);
return error;
}
@@ -280,10 +220,6 @@ xfs_sync_inode_attr(
{
int error = 0;
- error = xfs_sync_inode_valid(ip, pag);
- if (error)
- return error;
-
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_inode_clean(ip))
goto out_unlock;
@@ -302,7 +238,6 @@ xfs_sync_inode_attr(
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
- IRELE(ip);
return error;
}
@@ -318,8 +253,7 @@ xfs_sync_data(
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
- error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
- XFS_ICI_NO_TAG, 0, NULL);
+ error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
if (error)
return XFS_ERROR(error);
@@ -337,8 +271,7 @@ xfs_sync_attr(
{
ASSERT((flags & ~SYNC_WAIT) == 0);
- return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
- XFS_ICI_NO_TAG, 0, NULL);
+ return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags);
}
STATIC int
@@ -698,6 +631,43 @@ __xfs_inode_clear_reclaim_tag(
}
/*
+ * Grab the inode for reclaim exclusively.
+ * Return 0 if we grabbed it, non-zero otherwise.
+ */
+STATIC int
+xfs_reclaim_inode_grab(
+ struct xfs_inode *ip,
+ int flags)
+{
+
+ /*
+ * do some unlocked checks first to avoid unnecceary lock traffic.
+ * The first is a flush lock check, the second is a already in reclaim
+ * check. Only do these checks if we are not going to block on locks.
+ */
+ if ((flags & SYNC_TRYLOCK) &&
+ (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
+ return 1;
+ }
+
+ /*
+ * The radix tree lock here protects a thread in xfs_iget from racing
+ * with us starting reclaim on the inode. Once we have the
+ * XFS_IRECLAIM flag set it will not touch us.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
+ /* ignore as it is already under reclaim */
+ spin_unlock(&ip->i_flags_lock);
+ return 1;
+ }
+ __xfs_iflags_set(ip, XFS_IRECLAIM);
+ spin_unlock(&ip->i_flags_lock);
+ return 0;
+}
+
+/*
* Inodes in different states need to be treated differently, and the return
* value of xfs_iflush is not sufficient to get this right. The following table
* lists the inode states and the reclaim actions necessary for non-blocking
@@ -755,23 +725,6 @@ xfs_reclaim_inode(
{
int error = 0;
- /*
- * The radix tree lock here protects a thread in xfs_iget from racing
- * with us starting reclaim on the inode. Once we have the
- * XFS_IRECLAIM flag set it will not touch us.
- */
- spin_lock(&ip->i_flags_lock);
- ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
- if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
- /* ignore as it is already under reclaim */
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- return 0;
- }
- __xfs_iflags_set(ip, XFS_IRECLAIM);
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
@@ -868,13 +821,126 @@ reclaim:
}
+/*
+ * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
+ * corrupted, we still want to try to reclaim all the inodes. If we don't,
+ * then a shut down during filesystem unmount reclaim walk leak all the
+ * unreclaimed inodes.
+ */
+int
+xfs_reclaim_inodes_ag(
+ struct xfs_mount *mp,
+ int flags,
+ int *nr_to_scan)
+{
+ struct xfs_perag *pag;
+ int error = 0;
+ int last_error = 0;
+ xfs_agnumber_t ag;
+ int trylock = flags & SYNC_TRYLOCK;
+ int skipped;
+
+restart:
+ ag = 0;
+ skipped = 0;
+ while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
+ unsigned long first_index = 0;
+ int done = 0;
+ int nr_found = 0;
+
+ ag = pag->pag_agno + 1;
+
+ if (trylock) {
+ if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
+ skipped++;
+ continue;
+ }
+ first_index = pag->pag_ici_reclaim_cursor;
+ } else
+ mutex_lock(&pag->pag_ici_reclaim_lock);
+
+ do {
+ struct xfs_inode *batch[XFS_LOOKUP_BATCH];
+ int i;
+
+ write_lock(&pag->pag_ici_lock);
+ nr_found = radix_tree_gang_lookup_tag(
+ &pag->pag_ici_root,
+ (void **)batch, first_index,
+ XFS_LOOKUP_BATCH,
+ XFS_ICI_RECLAIM_TAG);
+ if (!nr_found) {
+ write_unlock(&pag->pag_ici_lock);
+ break;
+ }
+
+ /*
+ * Grab the inodes before we drop the lock. if we found
+ * nothing, nr == 0 and the loop will be skipped.
+ */
+ for (i = 0; i < nr_found; i++) {
+ struct xfs_inode *ip = batch[i];
+
+ if (done || xfs_reclaim_inode_grab(ip, flags))
+ batch[i] = NULL;
+
+ /*
+ * Update the index for the next lookup. Catch
+ * overflows into the next AG range which can
+ * occur if we have inodes in the last block of
+ * the AG and we are currently pointing to the
+ * last inode.
+ */
+ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
+ if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
+ done = 1;
+ }
+
+ /* unlock now we've grabbed the inodes. */
+ write_unlock(&pag->pag_ici_lock);
+
+ for (i = 0; i < nr_found; i++) {
+ if (!batch[i])
+ continue;
+ error = xfs_reclaim_inode(batch[i], pag, flags);
+ if (error && last_error != EFSCORRUPTED)
+ last_error = error;
+ }
+
+ *nr_to_scan -= XFS_LOOKUP_BATCH;
+
+ } while (nr_found && !done && *nr_to_scan > 0);
+
+ if (trylock && !done)
+ pag->pag_ici_reclaim_cursor = first_index;
+ else
+ pag->pag_ici_reclaim_cursor = 0;
+ mutex_unlock(&pag->pag_ici_reclaim_lock);
+ xfs_perag_put(pag);
+ }
+
+ /*
+ * if we skipped any AG, and we still have scan count remaining, do
+ * another pass this time using blocking reclaim semantics (i.e
+ * waiting on the reclaim locks and ignoring the reclaim cursors). This
+ * ensure that when we get more reclaimers than AGs we block rather
+ * than spin trying to execute reclaim.
+ */
+ if (trylock && skipped && *nr_to_scan > 0) {
+ trylock = 0;
+ goto restart;
+ }
+ return XFS_ERROR(last_error);
+}
+
int
xfs_reclaim_inodes(
xfs_mount_t *mp,
int mode)
{
- return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
- XFS_ICI_RECLAIM_TAG, 1, NULL);
+ int nr_to_scan = INT_MAX;
+
+ return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
}
/*
@@ -896,17 +962,16 @@ xfs_reclaim_inode_shrink(
if (!(gfp_mask & __GFP_FS))
return -1;
- xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
- XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
- /* if we don't exhaust the scan, don't bother coming back */
+ xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+ /* terminate if we don't exhaust the scan */
if (nr_to_scan > 0)
return -1;
}
reclaimable = 0;
ag = 0;
- while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
- XFS_ICI_RECLAIM_TAG))) {
+ while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
+ ag = pag->pag_agno + 1;
reclaimable += pag->pag_ici_reclaimable;
xfs_perag_put(pag);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index fe78726196f..32ba6628290 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -47,10 +47,10 @@ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
struct xfs_inode *ip);
-int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
+int xfs_sync_inode_grab(struct xfs_inode *ip);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
- int flags, int tag, int write_lock, int *nr_to_scan);
+ int flags);
void xfs_inode_shrinker_register(struct xfs_mount *mp);
void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index be5dffd282a..acef2e98c59 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -124,7 +124,7 @@ DEFINE_EVENT(xfs_perag_class, name, \
unsigned long caller_ip), \
TP_ARGS(mp, agno, refcount, caller_ip))
DEFINE_PERAG_REF_EVENT(xfs_perag_get);
-DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
+DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
@@ -325,13 +325,12 @@ DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_cond_lock);
DEFINE_BUF_EVENT(xfs_buf_unlock);
-DEFINE_BUF_EVENT(xfs_buf_ordered_retry);
DEFINE_BUF_EVENT(xfs_buf_iowait);
DEFINE_BUF_EVENT(xfs_buf_iowait_done);
DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue);
DEFINE_BUF_EVENT(xfs_buf_delwri_split);
-DEFINE_BUF_EVENT(xfs_buf_get_noaddr);
+DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_bdstrat_shut);
DEFINE_BUF_EVENT(xfs_buf_item_relse);
DEFINE_BUF_EVENT(xfs_buf_item_iodone);
diff --git a/fs/xfs/linux-2.6/xfs_version.h b/fs/xfs/linux-2.6/xfs_version.h
deleted file mode 100644
index f8d279d7563..00000000000
--- a/fs/xfs/linux-2.6/xfs_version.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_VERSION_H__
-#define __XFS_VERSION_H__
-
-/*
- * Dummy file that can contain a timestamp to put into the
- * XFS init string, to help users keep track of what they're
- * running
- */
-
-#define XFS_VERSION_STRING "SGI XFS"
-
-#endif /* __XFS_VERSION_H__ */
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index e1a2f6800e0..faf8e1a83a1 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -463,87 +463,68 @@ xfs_qm_dqtobp(
uint flags)
{
xfs_bmbt_irec_t map;
- int nmaps, error;
+ int nmaps = 1, error;
xfs_buf_t *bp;
- xfs_inode_t *quotip;
- xfs_mount_t *mp;
+ xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp);
+ xfs_mount_t *mp = dqp->q_mount;
xfs_disk_dquot_t *ddq;
- xfs_dqid_t id;
- boolean_t newdquot;
+ xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
xfs_trans_t *tp = (tpp ? *tpp : NULL);
- mp = dqp->q_mount;
- id = be32_to_cpu(dqp->q_core.d_id);
- nmaps = 1;
- newdquot = B_FALSE;
+ dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
- /*
- * If we don't know where the dquot lives, find out.
- */
- if (dqp->q_blkno == (xfs_daddr_t) 0) {
- /* We use the id as an index */
- dqp->q_fileoffset = (xfs_fileoff_t)id /
- mp->m_quotainfo->qi_dqperchunk;
- nmaps = 1;
- quotip = XFS_DQ_TO_QIP(dqp);
- xfs_ilock(quotip, XFS_ILOCK_SHARED);
+ xfs_ilock(quotip, XFS_ILOCK_SHARED);
+ if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
/*
- * Return if this type of quotas is turned off while we didn't
- * have an inode lock
+ * Return if this type of quotas is turned off while we
+ * didn't have the quota inode lock.
*/
- if (XFS_IS_THIS_QUOTA_OFF(dqp)) {
- xfs_iunlock(quotip, XFS_ILOCK_SHARED);
- return (ESRCH);
- }
+ xfs_iunlock(quotip, XFS_ILOCK_SHARED);
+ return ESRCH;
+ }
+
+ /*
+ * Find the block map; no allocations yet
+ */
+ error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
+ XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
+ NULL, 0, &map, &nmaps, NULL);
+
+ xfs_iunlock(quotip, XFS_ILOCK_SHARED);
+ if (error)
+ return error;
+
+ ASSERT(nmaps == 1);
+ ASSERT(map.br_blockcount == 1);
+
+ /*
+ * Offset of dquot in the (fixed sized) dquot chunk.
+ */
+ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
+ sizeof(xfs_dqblk_t);
+
+ ASSERT(map.br_startblock != DELAYSTARTBLOCK);
+ if (map.br_startblock == HOLESTARTBLOCK) {
/*
- * Find the block map; no allocations yet
+ * We don't allocate unless we're asked to
*/
- error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
- XFS_DQUOT_CLUSTER_SIZE_FSB,
- XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmaps, NULL);
+ if (!(flags & XFS_QMOPT_DQALLOC))
+ return ENOENT;
- xfs_iunlock(quotip, XFS_ILOCK_SHARED);
+ ASSERT(tp);
+ error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
+ dqp->q_fileoffset, &bp);
if (error)
- return (error);
- ASSERT(nmaps == 1);
- ASSERT(map.br_blockcount == 1);
+ return error;
+ tp = *tpp;
+ } else {
+ trace_xfs_dqtobp_read(dqp);
/*
- * offset of dquot in the (fixed sized) dquot chunk.
+ * store the blkno etc so that we don't have to do the
+ * mapping all the time
*/
- dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
- sizeof(xfs_dqblk_t);
- if (map.br_startblock == HOLESTARTBLOCK) {
- /*
- * We don't allocate unless we're asked to
- */
- if (!(flags & XFS_QMOPT_DQALLOC))
- return (ENOENT);
-
- ASSERT(tp);
- if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
- dqp->q_fileoffset, &bp)))
- return (error);
- tp = *tpp;
- newdquot = B_TRUE;
- } else {
- /*
- * store the blkno etc so that we don't have to do the
- * mapping all the time
- */
- dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
- }
- }
- ASSERT(dqp->q_blkno != DELAYSTARTBLOCK);
- ASSERT(dqp->q_blkno != HOLESTARTBLOCK);
-
- /*
- * Read in the buffer, unless we've just done the allocation
- * (in which case we already have the buf).
- */
- if (!newdquot) {
- trace_xfs_dqtobp_read(dqp);
+ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
dqp->q_blkno,
@@ -552,13 +533,14 @@ xfs_qm_dqtobp(
if (error || !bp)
return XFS_ERROR(error);
}
+
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
/*
* calculate the location of the dquot inside the buffer.
*/
- ddq = (xfs_disk_dquot_t *)((char *)XFS_BUF_PTR(bp) + dqp->q_bufoffset);
+ ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
/*
* A simple sanity check in case we got a corrupted dquot...
@@ -1176,18 +1158,18 @@ xfs_qm_dqflush(
xfs_dquot_t *dqp,
uint flags)
{
- xfs_mount_t *mp;
- xfs_buf_t *bp;
- xfs_disk_dquot_t *ddqp;
+ struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_buf *bp;
+ struct xfs_disk_dquot *ddqp;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
+
trace_xfs_dqflush(dqp);
/*
- * If not dirty, or it's pinned and we are not supposed to
- * block, nada.
+ * If not dirty, or it's pinned and we are not supposed to block, nada.
*/
if (!XFS_DQ_IS_DIRTY(dqp) ||
(!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
@@ -1201,40 +1183,46 @@ xfs_qm_dqflush(
* down forcibly. If that's the case we must not write this dquot
* to disk, because the log record didn't make it to disk!
*/
- if (XFS_FORCED_SHUTDOWN(dqp->q_mount)) {
- dqp->dq_flags &= ~(XFS_DQ_DIRTY);
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ dqp->dq_flags &= ~XFS_DQ_DIRTY;
xfs_dqfunlock(dqp);
return XFS_ERROR(EIO);
}
/*
* Get the buffer containing the on-disk dquot
- * We don't need a transaction envelope because we know that the
- * the ondisk-dquot has already been allocated for.
*/
- if ((error = xfs_qm_dqtobp(NULL, dqp, &ddqp, &bp, XFS_QMOPT_DOWARN))) {
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
+ mp->m_quotainfo->qi_dqchunklen, 0, &bp);
+ if (error) {
ASSERT(error != ENOENT);
- /*
- * Quotas could have gotten turned off (ESRCH)
- */
xfs_dqfunlock(dqp);
- return (error);
+ return error;
}
- if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id),
- 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
- xfs_force_shutdown(dqp->q_mount, SHUTDOWN_CORRUPT_INCORE);
+ /*
+ * Calculate the location of the dquot inside the buffer.
+ */
+ ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset);
+
+ /*
+ * A simple sanity check in case we got a corrupted dquot..
+ */
+ if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+ XFS_QMOPT_DOWARN, "dqflush (incore copy)")) {
+ xfs_buf_relse(bp);
+ xfs_dqfunlock(dqp);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return XFS_ERROR(EIO);
}
/* This is the only portion of data that needs to persist */
- memcpy(ddqp, &(dqp->q_core), sizeof(xfs_disk_dquot_t));
+ memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
/*
* Clear the dirty field and remember the flush lsn for later use.
*/
- dqp->dq_flags &= ~(XFS_DQ_DIRTY);
- mp = dqp->q_mount;
+ dqp->dq_flags &= ~XFS_DQ_DIRTY;
xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
&dqp->q_logitem.qli_item.li_lsn);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 9a92407109a..f8e854b4fde 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -55,8 +55,6 @@ uint ndquot;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
-static cred_t xfs_zerocr;
-
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
@@ -837,7 +835,7 @@ xfs_qm_dqattach_locked(
xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot) :
- xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
+ xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot);
/*
@@ -1199,87 +1197,6 @@ xfs_qm_list_destroy(
mutex_destroy(&(list->qh_lock));
}
-
-/*
- * Stripped down version of dqattach. This doesn't attach, or even look at the
- * dquots attached to the inode. The rationale is that there won't be any
- * attached at the time this is called from quotacheck.
- */
-STATIC int
-xfs_qm_dqget_noattach(
- xfs_inode_t *ip,
- xfs_dquot_t **O_udqpp,
- xfs_dquot_t **O_gdqpp)
-{
- int error;
- xfs_mount_t *mp;
- xfs_dquot_t *udqp, *gdqp;
-
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- mp = ip->i_mount;
- udqp = NULL;
- gdqp = NULL;
-
- if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(ip->i_udquot == NULL);
- /*
- * We want the dquot allocated if it doesn't exist.
- */
- if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
- XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
- &udqp))) {
- /*
- * Shouldn't be able to turn off quotas here.
- */
- ASSERT(error != ESRCH);
- ASSERT(error != ENOENT);
- return error;
- }
- ASSERT(udqp);
- }
-
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(ip->i_gdquot == NULL);
- if (udqp)
- xfs_dqunlock(udqp);
- error = XFS_IS_GQUOTA_ON(mp) ?
- xfs_qm_dqget(mp, ip,
- ip->i_d.di_gid, XFS_DQ_GROUP,
- XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
- &gdqp) :
- xfs_qm_dqget(mp, ip,
- ip->i_d.di_projid, XFS_DQ_PROJ,
- XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
- &gdqp);
- if (error) {
- if (udqp)
- xfs_qm_dqrele(udqp);
- ASSERT(error != ESRCH);
- ASSERT(error != ENOENT);
- return error;
- }
- ASSERT(gdqp);
-
- /* Reacquire the locks in the right order */
- if (udqp) {
- if (! xfs_qm_dqlock_nowait(udqp)) {
- xfs_dqunlock(gdqp);
- xfs_dqlock(udqp);
- xfs_dqlock(gdqp);
- }
- }
- }
-
- *O_udqpp = udqp;
- *O_gdqpp = gdqp;
-
-#ifdef QUOTADEBUG
- if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
- if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
-#endif
- return 0;
-}
-
/*
* Create an inode and return with a reference already taken, but unlocked
* This is how we create quota inodes
@@ -1305,8 +1222,8 @@ xfs_qm_qino_alloc(
return error;
}
- if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
- &xfs_zerocr, 0, 1, ip, &committed))) {
+ error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
+ if (error) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
return error;
@@ -1516,7 +1433,7 @@ xfs_qm_dqiterate(
rablkcnt = map[i+1].br_blockcount;
rablkno = map[i+1].br_startblock;
while (rablkcnt--) {
- xfs_baread(mp->m_ddev_targp,
+ xfs_buf_readahead(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp, rablkno),
mp->m_quotainfo->qi_dqchunklen);
rablkno++;
@@ -1546,18 +1463,34 @@ xfs_qm_dqiterate(
/*
* Called by dqusage_adjust in doing a quotacheck.
- * Given the inode, and a dquot (either USR or GRP, doesn't matter),
- * this updates its incore copy as well as the buffer copy. This is
- * so that once the quotacheck is done, we can just log all the buffers,
- * as opposed to logging numerous updates to individual dquots.
+ *
+ * Given the inode, and a dquot id this updates both the incore dqout as well
+ * as the buffer copy. This is so that once the quotacheck is done, we can
+ * just log all the buffers, as opposed to logging numerous updates to
+ * individual dquots.
*/
-STATIC void
+STATIC int
xfs_qm_quotacheck_dqadjust(
- xfs_dquot_t *dqp,
+ struct xfs_inode *ip,
+ xfs_dqid_t id,
+ uint type,
xfs_qcnt_t nblks,
xfs_qcnt_t rtblks)
{
- ASSERT(XFS_DQ_IS_LOCKED(dqp));
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_dquot *dqp;
+ int error;
+
+ error = xfs_qm_dqget(mp, ip, id, type,
+ XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
+ if (error) {
+ /*
+ * Shouldn't be able to turn off quotas here.
+ */
+ ASSERT(error != ESRCH);
+ ASSERT(error != ENOENT);
+ return error;
+ }
trace_xfs_dqadjust(dqp);
@@ -1582,11 +1515,13 @@ xfs_qm_quotacheck_dqadjust(
* There are no timers for the default values set in the root dquot.
*/
if (dqp->q_core.d_id) {
- xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
- xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
+ xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
+ xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
}
dqp->dq_flags |= XFS_DQ_DIRTY;
+ xfs_qm_dqput(dqp);
+ return 0;
}
STATIC int
@@ -1629,8 +1564,7 @@ xfs_qm_dqusage_adjust(
int *res) /* result code value */
{
xfs_inode_t *ip;
- xfs_dquot_t *udqp, *gdqp;
- xfs_qcnt_t nblks, rtblks;
+ xfs_qcnt_t nblks, rtblks = 0;
int error;
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -1650,51 +1584,24 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
+ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+ if (error) {
*res = BULKSTAT_RV_NOTHING;
return error;
}
- /*
- * Obtain the locked dquots. In case of an error (eg. allocation
- * fails for ENOSPC), we return the negative of the error number
- * to bulkstat, so that it can get propagated to quotacheck() and
- * making us disable quotas for the file system.
- */
- if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
- *res = BULKSTAT_RV_GIVEUP;
- return error;
- }
+ ASSERT(ip->i_delayed_blks == 0);
- rtblks = 0;
- if (! XFS_IS_REALTIME_INODE(ip)) {
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
- } else {
+ if (XFS_IS_REALTIME_INODE(ip)) {
/*
* Walk thru the extent list and count the realtime blocks.
*/
- if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- IRELE(ip);
- if (udqp)
- xfs_qm_dqput(udqp);
- if (gdqp)
- xfs_qm_dqput(gdqp);
- *res = BULKSTAT_RV_GIVEUP;
- return error;
- }
- nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
+ error = xfs_qm_get_rtblks(ip, &rtblks);
+ if (error)
+ goto error0;
}
- ASSERT(ip->i_delayed_blks == 0);
- /*
- * We can't release the inode while holding its dquot locks.
- * The inode can go into inactive and might try to acquire the dquotlocks.
- * So, just unlock here and do a vn_rele at the end.
- */
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
/*
* Add the (disk blocks and inode) resources occupied by this
@@ -1709,26 +1616,36 @@ xfs_qm_dqusage_adjust(
* and quotaoffs don't race. (Quotachecks happen at mount time only).
*/
if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(udqp);
- xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
- xfs_qm_dqput(udqp);
+ error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
+ XFS_DQ_USER, nblks, rtblks);
+ if (error)
+ goto error0;
}
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(gdqp);
- xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
- xfs_qm_dqput(gdqp);
+
+ if (XFS_IS_GQUOTA_ON(mp)) {
+ error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
+ XFS_DQ_GROUP, nblks, rtblks);
+ if (error)
+ goto error0;
}
- /*
- * Now release the inode. This will send it to 'inactive', and
- * possibly even free blocks.
- */
- IRELE(ip);
- /*
- * Goto next inode.
- */
+ if (XFS_IS_PQUOTA_ON(mp)) {
+ error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
+ XFS_DQ_PROJ, nblks, rtblks);
+ if (error)
+ goto error0;
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ IRELE(ip);
*res = BULKSTAT_RV_DIDONE;
return 0;
+
+error0:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ IRELE(ip);
+ *res = BULKSTAT_RV_GIVEUP;
+ return error;
}
/*
@@ -2224,7 +2141,7 @@ xfs_qm_write_sb_changes(
/*
- * Given an inode, a uid and gid (from cred_t) make sure that we have
+ * Given an inode, a uid, gid and prid make sure that we have
* allocated relevant dquot(s) on disk, and that we won't exceed inode
* quotas by creating this file.
* This also attaches dquot(s) to the given inode after locking it,
@@ -2332,7 +2249,7 @@ xfs_qm_vop_dqalloc(
xfs_dqunlock(gq);
}
} else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
- if (ip->i_d.di_projid != prid) {
+ if (xfs_get_projid(ip) != prid) {
xfs_iunlock(ip, lockflags);
if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
@@ -2454,7 +2371,7 @@ xfs_qm_vop_chown_reserve(
}
if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
- ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
+ xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
prjflags = XFS_QMOPT_ENOSPC;
if (prjflags ||
@@ -2558,7 +2475,7 @@ xfs_qm_vop_create_dqattach(
ip->i_gdquot = gdqp;
ASSERT(XFS_IS_OQUOTA_ON(mp));
ASSERT((XFS_IS_GQUOTA_ON(mp) ?
- ip->i_d.di_gid : ip->i_d.di_projid) ==
+ ip->i_d.di_gid : xfs_get_projid(ip)) ==
be32_to_cpu(gdqp->q_core.d_id));
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index bea02d786c5..45b5cb1788a 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -81,7 +81,7 @@ xfs_qm_statvfs(
xfs_mount_t *mp = ip->i_mount;
xfs_dquot_t *dqp;
- if (!xfs_qm_dqget(mp, NULL, ip->i_d.di_projid, XFS_DQ_PROJ, 0, &dqp)) {
+ if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) {
xfs_fill_statvfs_from_dquot(statp, &dqp->q_core);
xfs_qm_dqput(dqp);
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 45e5849df23..bdebc183223 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -276,7 +276,7 @@ xfs_qm_scall_trunc_qfile(
goto out_unlock;
}
- xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
out_unlock:
@@ -875,21 +875,14 @@ xfs_dqrele_inode(
struct xfs_perag *pag,
int flags)
{
- int error;
-
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
- read_unlock(&pag->pag_ici_lock);
return 0;
}
- error = xfs_sync_inode_valid(ip, pag);
- if (error)
- return error;
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
@@ -900,8 +893,6 @@ xfs_dqrele_inode(
ip->i_gdquot = NULL;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- IRELE(ip);
return 0;
}
@@ -918,8 +909,7 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
- XFS_ICI_NO_TAG, 0, NULL);
+ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
}
/*------------------------------------------------------------------------*/
@@ -1175,7 +1165,7 @@ xfs_qm_internalqcheck_adjust(
}
xfs_qm_internalqcheck_get_dquots(mp,
(xfs_dqid_t) ip->i_d.di_uid,
- (xfs_dqid_t) ip->i_d.di_projid,
+ (xfs_dqid_t) xfs_get_projid(ip),
(xfs_dqid_t) ip->i_d.di_gid,
&ud, &gd);
if (XFS_IS_UQUOTA_ON(mp)) {
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 4917d4eed4e..63c7a1a6c02 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -230,6 +230,15 @@ typedef struct xfs_perag {
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
int pag_ici_reclaimable; /* reclaimable inodes */
+ struct mutex pag_ici_reclaim_lock; /* serialisation point */
+ unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
+
+ /* buffer cache index */
+ spinlock_t pag_buf_lock; /* lock for pag_buf_tree */
+ struct rb_root pag_buf_tree; /* ordered tree of active buffers */
+
+ /* for rcu-safe freeing */
+ struct rcu_head rcu_head;
#endif
int pagb_count; /* pagb slots in use */
} xfs_perag_t;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index af168faccc7..112abc439ca 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -675,7 +675,7 @@ xfs_alloc_ag_vextent_near(
xfs_agblock_t gtbnoa; /* aligned ... */
xfs_extlen_t gtdiff; /* difference to right side entry */
xfs_extlen_t gtlen; /* length of right side entry */
- xfs_extlen_t gtlena; /* aligned ... */
+ xfs_extlen_t gtlena = 0; /* aligned ... */
xfs_agblock_t gtnew; /* useful start bno of right side */
int error; /* error code */
int i; /* result code, temporary */
@@ -684,7 +684,7 @@ xfs_alloc_ag_vextent_near(
xfs_agblock_t ltbnoa; /* aligned ... */
xfs_extlen_t ltdiff; /* difference to left side entry */
xfs_extlen_t ltlen; /* length of left side entry */
- xfs_extlen_t ltlena; /* aligned ... */
+ xfs_extlen_t ltlena = 0; /* aligned ... */
xfs_agblock_t ltnew; /* useful start bno of left side */
xfs_extlen_t rlen; /* length of returned extent */
#if defined(DEBUG) && defined(__KERNEL__)
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 97f7328967f..3916925e258 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -280,38 +280,6 @@ xfs_allocbt_key_diff(
return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock;
}
-STATIC int
-xfs_allocbt_kill_root(
- struct xfs_btree_cur *cur,
- struct xfs_buf *bp,
- int level,
- union xfs_btree_ptr *newroot)
-{
- int error;
-
- XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
- XFS_BTREE_STATS_INC(cur, killroot);
-
- /*
- * Update the root pointer, decreasing the level by 1 and then
- * free the old root.
- */
- xfs_allocbt_set_root(cur, newroot, -1);
- error = xfs_allocbt_free_block(cur, bp);
- if (error) {
- XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
- return error;
- }
-
- XFS_BTREE_STATS_INC(cur, free);
-
- xfs_btree_setbuf(cur, level, NULL);
- cur->bc_nlevels--;
-
- XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
- return 0;
-}
-
#ifdef DEBUG
STATIC int
xfs_allocbt_keys_inorder(
@@ -423,7 +391,6 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
- .kill_root = xfs_allocbt_kill_root,
.alloc_block = xfs_allocbt_alloc_block,
.free_block = xfs_allocbt_free_block,
.update_lastrec = xfs_allocbt_update_lastrec,
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index c2568242a90..c8637537881 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -355,16 +355,15 @@ xfs_attr_set_int(
if (mp->m_flags & XFS_MOUNT_WSYNC) {
xfs_trans_set_sync(args.trans);
}
+
+ if (!error && (flags & ATTR_KERNOTIME) == 0) {
+ xfs_trans_ichgtime(args.trans, dp,
+ XFS_ICHGTIME_CHG);
+ }
err2 = xfs_trans_commit(args.trans,
XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
return(error == 0 ? err2 : error);
}
@@ -420,6 +419,9 @@ xfs_attr_set_int(
xfs_trans_set_sync(args.trans);
}
+ if ((flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
+
/*
* Commit the last in the sequence of transactions.
*/
@@ -427,13 +429,6 @@ xfs_attr_set_int(
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
-
return(error);
out:
@@ -567,6 +562,9 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
xfs_trans_set_sync(args.trans);
}
+ if ((flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args.trans, dp, XFS_ICHGTIME_CHG);
+
/*
* Commit the last in the sequence of transactions.
*/
@@ -574,13 +572,6 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
error = xfs_trans_commit(args.trans, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- /*
- * Hit the inode change time.
- */
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_ichgtime(dp, XFS_ICHGTIME_CHG);
- }
-
return(error);
out:
@@ -1995,7 +1986,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
tmp = (valuelen < XFS_BUF_SIZE(bp))
? valuelen : XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, dst, XBF_READ);
+ xfs_buf_iomove(bp, 0, tmp, dst, XBRW_READ);
xfs_buf_relse(bp);
dst += tmp;
valuelen -= tmp;
@@ -2125,9 +2116,9 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
+ xfs_buf_iomove(bp, 0, tmp, src, XBRW_WRITE);
if (tmp < XFS_BUF_SIZE(bp))
- xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
+ xfs_buf_zero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
return (error);
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index f90dadd5a96..8abd12e32e1 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -614,7 +614,7 @@ xfs_bmap_add_extent(
nblks += cur->bc_private.b.allocated;
ASSERT(nblks <= da_old);
if (nblks < da_old)
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
+ xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
(int64_t)(da_old - nblks), rsvd);
}
/*
@@ -1079,7 +1079,8 @@ xfs_bmap_add_extent_delay_real(
diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
(cur ? cur->bc_private.b.allocated : 0));
if (diff > 0 &&
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
+ xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
+ -((int64_t)diff), rsvd)) {
/*
* Ick gross gag me with a spoon.
*/
@@ -1089,16 +1090,18 @@ xfs_bmap_add_extent_delay_real(
temp--;
diff--;
if (!diff ||
- !xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
+ !xfs_icsb_modify_counters(ip->i_mount,
+ XFS_SBS_FDBLOCKS,
+ -((int64_t)diff), rsvd))
break;
}
if (temp2) {
temp2--;
diff--;
if (!diff ||
- !xfs_mod_incore_sb(ip->i_mount,
- XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
+ !xfs_icsb_modify_counters(ip->i_mount,
+ XFS_SBS_FDBLOCKS,
+ -((int64_t)diff), rsvd))
break;
}
}
@@ -1766,7 +1769,7 @@ xfs_bmap_add_extent_hole_delay(
}
if (oldlen != newlen) {
ASSERT(oldlen > newlen);
- xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
+ xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
(int64_t)(oldlen - newlen), rsvd);
/*
* Nothing to do for disk quota accounting here.
@@ -3111,9 +3114,10 @@ xfs_bmap_del_extent(
* Nothing to do for disk quota accounting here.
*/
ASSERT(da_old >= da_new);
- if (da_old > da_new)
- xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
- rsvd);
+ if (da_old > da_new) {
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ (int64_t)(da_old - da_new), rsvd);
+ }
done:
*logflagsp = flags;
return error;
@@ -4526,13 +4530,13 @@ xfs_bmapi(
-((int64_t)extsz), (flags &
XFS_BMAPI_RSVBLOCKS));
} else {
- error = xfs_mod_incore_sb(mp,
+ error = xfs_icsb_modify_counters(mp,
XFS_SBS_FDBLOCKS,
-((int64_t)alen), (flags &
XFS_BMAPI_RSVBLOCKS));
}
if (!error) {
- error = xfs_mod_incore_sb(mp,
+ error = xfs_icsb_modify_counters(mp,
XFS_SBS_FDBLOCKS,
-((int64_t)indlen), (flags &
XFS_BMAPI_RSVBLOCKS));
@@ -4542,7 +4546,7 @@ xfs_bmapi(
(int64_t)extsz, (flags &
XFS_BMAPI_RSVBLOCKS));
else if (error)
- xfs_mod_incore_sb(mp,
+ xfs_icsb_modify_counters(mp,
XFS_SBS_FDBLOCKS,
(int64_t)alen, (flags &
XFS_BMAPI_RSVBLOCKS));
@@ -4744,8 +4748,12 @@ xfs_bmapi(
* Check if writing previously allocated but
* unwritten extents.
*/
- if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
- ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
+ if (wr &&
+ ((mval->br_state == XFS_EXT_UNWRITTEN &&
+ ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) ||
+ (mval->br_state == XFS_EXT_NORM &&
+ ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT)) ==
+ (XFS_BMAPI_PREALLOC|XFS_BMAPI_CONVERT))))) {
/*
* Modify (by adding) the state flag, if writing.
*/
@@ -4757,7 +4765,9 @@ xfs_bmapi(
*firstblock;
cur->bc_private.b.flist = flist;
}
- mval->br_state = XFS_EXT_NORM;
+ mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
+ ? XFS_EXT_NORM
+ : XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
firstblock, flist, &tmp_logflags,
whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
@@ -5200,7 +5210,7 @@ xfs_bunmapi(
ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
- xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
(int64_t)del.br_blockcount, rsvd);
(void)xfs_trans_reserve_quota_nblks(NULL,
ip, -((long)del.br_blockcount), 0,
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index b13569a6179..71ec9b6ecdf 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -74,9 +74,12 @@ typedef struct xfs_bmap_free
#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
/* combine contig. space */
#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */
-#define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */
- /* need write cache flushing and no */
- /* additional allocation alignments */
+/*
+ * unwritten extent conversion - this needs write cache flushing and no additional
+ * allocation alignments. When specified with XFS_BMAPI_PREALLOC it converts
+ * from written to unwritten, otherwise convert from unwritten to written.
+ */
+#define XFS_BMAPI_CONVERT 0x200
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_WRITE, "WRITE" }, \
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 829af92f0fb..04f9cca8da7 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -217,7 +217,7 @@ xfs_btree_del_cursor(
*/
for (i = 0; i < cur->bc_nlevels; i++) {
if (cur->bc_bufs[i])
- xfs_btree_setbuf(cur, i, NULL);
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
else if (!error)
break;
}
@@ -656,7 +656,7 @@ xfs_btree_reada_bufl(
ASSERT(fsbno != NULLFSBLOCK);
d = XFS_FSB_TO_DADDR(mp, fsbno);
- xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+ xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
}
/*
@@ -676,7 +676,7 @@ xfs_btree_reada_bufs(
ASSERT(agno != NULLAGNUMBER);
ASSERT(agbno != NULLAGBLOCK);
d = XFS_AGB_TO_DADDR(mp, agno, agbno);
- xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count);
+ xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count);
}
STATIC int
@@ -763,22 +763,19 @@ xfs_btree_readahead(
* Set the buffer for level "lev" in the cursor to bp, releasing
* any previous buffer.
*/
-void
+STATIC void
xfs_btree_setbuf(
xfs_btree_cur_t *cur, /* btree cursor */
int lev, /* level in btree */
xfs_buf_t *bp) /* new buffer to set */
{
struct xfs_btree_block *b; /* btree block */
- xfs_buf_t *obp; /* old buffer pointer */
- obp = cur->bc_bufs[lev];
- if (obp)
- xfs_trans_brelse(cur->bc_tp, obp);
+ if (cur->bc_bufs[lev])
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]);
cur->bc_bufs[lev] = bp;
cur->bc_ra[lev] = 0;
- if (!bp)
- return;
+
b = XFS_BUF_TO_BLOCK(bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
@@ -3011,6 +3008,43 @@ out0:
return 0;
}
+/*
+ * Kill the current root node, and replace it with it's only child node.
+ */
+STATIC int
+xfs_btree_kill_root(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp,
+ int level,
+ union xfs_btree_ptr *newroot)
+{
+ int error;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+ XFS_BTREE_STATS_INC(cur, killroot);
+
+ /*
+ * Update the root pointer, decreasing the level by 1 and then
+ * free the old root.
+ */
+ cur->bc_ops->set_root(cur, newroot, -1);
+
+ error = cur->bc_ops->free_block(cur, bp);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+
+ XFS_BTREE_STATS_INC(cur, free);
+
+ cur->bc_bufs[level] = NULL;
+ cur->bc_ra[level] = 0;
+ cur->bc_nlevels--;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ return 0;
+}
+
STATIC int
xfs_btree_dec_cursor(
struct xfs_btree_cur *cur,
@@ -3195,7 +3229,7 @@ xfs_btree_delrec(
* Make it the new root of the btree.
*/
pp = xfs_btree_ptr_addr(cur, 1, block);
- error = cur->bc_ops->kill_root(cur, bp, level, pp);
+ error = xfs_btree_kill_root(cur, bp, level, pp);
if (error)
goto error0;
} else if (level > 0) {
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7fa07062bdd..82fafc66bd1 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -152,9 +152,7 @@ struct xfs_btree_ops {
/* update btree root pointer */
void (*set_root)(struct xfs_btree_cur *cur,
- union xfs_btree_ptr *nptr, int level_change);
- int (*kill_root)(struct xfs_btree_cur *cur, struct xfs_buf *bp,
- int level, union xfs_btree_ptr *newroot);
+ union xfs_btree_ptr *nptr, int level_change);
/* block allocation / freeing */
int (*alloc_block)(struct xfs_btree_cur *cur,
@@ -399,16 +397,6 @@ xfs_btree_reada_bufs(
xfs_agblock_t agbno, /* allocation group block number */
xfs_extlen_t count); /* count of filesystem blocks */
-/*
- * Set the buffer for level "lev" in the cursor to bp, releasing
- * any previous buffer.
- */
-void
-xfs_btree_setbuf(
- xfs_btree_cur_t *cur, /* btree cursor */
- int lev, /* level in btree */
- struct xfs_buf *bp); /* new buffer to set */
-
/*
* Common btree core entry points.
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1b09d7a280d..2686d0d54c5 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -692,8 +692,7 @@ xfs_buf_item_init(
* the first. If we do already have one, there is
* nothing to do here so return.
*/
- if (bp->b_mount != mp)
- bp->b_mount = mp;
+ ASSERT(bp->b_target->bt_mount == mp);
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (lip->li_type == XFS_LI_BUF) {
@@ -974,7 +973,7 @@ xfs_buf_iodone_callbacks(
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
return;
}
@@ -1033,7 +1032,7 @@ xfs_buf_iodone_callbacks(
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
}
/*
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 30fa0e206fb..1c00bedb317 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -2042,7 +2042,7 @@ xfs_da_do_buf(
mappedbno, nmapped, 0, &bp);
break;
case 3:
- xfs_baread(mp->m_ddev_targp, mappedbno, nmapped);
+ xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
error = 0;
bp = NULL;
break;
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index e5b153b2e6a..dffba9ba0db 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -49,8 +49,9 @@ typedef struct xfs_dinode {
__be32 di_uid; /* owner's user id */
__be32 di_gid; /* owner's group id */
__be32 di_nlink; /* number of links to file */
- __be16 di_projid; /* owner's project id */
- __u8 di_pad[8]; /* unused, zeroed space */
+ __be16 di_projid_lo; /* lower part of owner's project id */
+ __be16 di_projid_hi; /* higher part owner's project id */
+ __u8 di_pad[6]; /* unused, zeroed space */
__be16 di_flushiter; /* incremented on flush */
xfs_timestamp_t di_atime; /* time last accessed */
xfs_timestamp_t di_mtime; /* time last modified */
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 504be8640e9..ae891223be9 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -961,7 +961,7 @@ xfs_dir2_leaf_getdents(
if (i > ra_current &&
map[ra_index].br_blockcount >=
mp->m_dirblkfsbs) {
- xfs_baread(mp->m_ddev_targp,
+ xfs_buf_readahead(mp->m_ddev_targp,
XFS_FSB_TO_DADDR(mp,
map[ra_index].br_startblock +
ra_offset),
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 87c2e9d0228..8f6fc1a9638 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -293,9 +293,11 @@ typedef struct xfs_bstat {
__s32 bs_extsize; /* extent size */
__s32 bs_extents; /* number of extents */
__u32 bs_gen; /* generation count */
- __u16 bs_projid; /* project id */
+ __u16 bs_projid_lo; /* lower part of project id */
+#define bs_projid bs_projid_lo /* (previously just bs_projid) */
__u16 bs_forkoff; /* inode fork offset in bytes */
- unsigned char bs_pad[12]; /* pad space, unused */
+ __u16 bs_projid_hi; /* higher part of project id */
+ unsigned char bs_pad[10]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */
@@ -448,6 +450,7 @@ typedef struct xfs_handle {
/* XFS_IOC_SETBIOSIZE ---- deprecated 46 */
/* XFS_IOC_GETBIOSIZE ---- deprecated 47 */
#define XFS_IOC_GETBMAPX _IOWR('X', 56, struct getbmap)
+#define XFS_IOC_ZERO_RANGE _IOW ('X', 57, struct xfs_flock64)
/*
* ioctl commands that replace IRIX syssgi()'s
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 43b1d569933..a7c116e814a 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -144,12 +144,11 @@ xfs_growfs_data_private(
if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
return error;
dpct = pct - mp->m_sb.sb_imax_pct;
- error = xfs_read_buf(mp, mp->m_ddev_targp,
- XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
- XFS_FSS_TO_BB(mp, 1), 0, &bp);
- if (error)
- return error;
- ASSERT(bp);
+ bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
+ BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ if (!bp)
+ return EIO;
xfs_buf_relse(bp);
new = nb; /* use new as a temporary here */
@@ -597,7 +596,8 @@ out:
* the extra reserve blocks from the reserve.....
*/
int error;
- error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ fdblks_delta, 0);
if (error == ENOSPC)
goto retry;
}
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 5371d2dc360..0626a32c344 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -212,7 +212,7 @@ xfs_ialloc_inode_init(
* to log a whole cluster of inodes instead of all the
* individual transactions causing a lot of log traffic.
*/
- xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
+ xfs_buf_zero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog);
for (i = 0; i < ninodes; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = sizeof(struct xfs_dinode);
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index d352862cefa..16921f55c54 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -183,38 +183,6 @@ xfs_inobt_key_diff(
cur->bc_rec.i.ir_startino;
}
-STATIC int
-xfs_inobt_kill_root(
- struct xfs_btree_cur *cur,
- struct xfs_buf *bp,
- int level,
- union xfs_btree_ptr *newroot)
-{
- int error;
-
- XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
- XFS_BTREE_STATS_INC(cur, killroot);
-
- /*
- * Update the root pointer, decreasing the level by 1 and then
- * free the old root.
- */
- xfs_inobt_set_root(cur, newroot, -1);
- error = xfs_inobt_free_block(cur, bp);
- if (error) {
- XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
- return error;
- }
-
- XFS_BTREE_STATS_INC(cur, free);
-
- cur->bc_bufs[level] = NULL;
- cur->bc_nlevels--;
-
- XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
- return 0;
-}
-
#ifdef DEBUG
STATIC int
xfs_inobt_keys_inorder(
@@ -309,7 +277,6 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
- .kill_root = xfs_inobt_kill_root,
.alloc_block = xfs_inobt_alloc_block,
.free_block = xfs_inobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index b1ecc6f97ad..0cdd26932d8 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -365,8 +365,8 @@ xfs_iget(
xfs_perag_t *pag;
xfs_agino_t agino;
- /* the radix tree exists only in inode capable AGs */
- if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
+ /* reject inode numbers outside existing AGs */
+ if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 34798f391c4..108c7a085f9 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -660,7 +660,8 @@ xfs_dinode_from_disk(
to->di_uid = be32_to_cpu(from->di_uid);
to->di_gid = be32_to_cpu(from->di_gid);
to->di_nlink = be32_to_cpu(from->di_nlink);
- to->di_projid = be16_to_cpu(from->di_projid);
+ to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
+ to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
to->di_flushiter = be16_to_cpu(from->di_flushiter);
to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
@@ -695,7 +696,8 @@ xfs_dinode_to_disk(
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
to->di_nlink = cpu_to_be32(from->di_nlink);
- to->di_projid = cpu_to_be16(from->di_projid);
+ to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
+ to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
to->di_flushiter = cpu_to_be16(from->di_flushiter);
to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
@@ -874,7 +876,7 @@ xfs_iread(
if (ip->i_d.di_version == 1) {
ip->i_d.di_nlink = ip->i_d.di_onlink;
ip->i_d.di_onlink = 0;
- ip->i_d.di_projid = 0;
+ xfs_set_projid(ip, 0);
}
ip->i_delayed_blks = 0;
@@ -982,8 +984,7 @@ xfs_ialloc(
mode_t mode,
xfs_nlink_t nlink,
xfs_dev_t rdev,
- cred_t *cr,
- xfs_prid_t prid,
+ prid_t prid,
int okalloc,
xfs_buf_t **ialloc_context,
boolean_t *call_again,
@@ -1027,7 +1028,7 @@ xfs_ialloc(
ASSERT(ip->i_d.di_nlink == nlink);
ip->i_d.di_uid = current_fsuid();
ip->i_d.di_gid = current_fsgid();
- ip->i_d.di_projid = prid;
+ xfs_set_projid(ip, prid);
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
/*
@@ -2725,7 +2726,7 @@ cluster_corrupt_out:
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_ERROR(bp,EIO);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
} else {
XFS_BUF_STALE(bp);
xfs_buf_relse(bp);
@@ -3008,7 +3009,7 @@ xfs_iflush_int(
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
memset(&(dip->di_pad[0]), 0,
sizeof(dip->di_pad));
- ASSERT(ip->i_d.di_projid == 0);
+ ASSERT(xfs_get_projid(ip) == 0);
}
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0898c5417d1..fac52290de9 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -134,8 +134,9 @@ typedef struct xfs_icdinode {
__uint32_t di_uid; /* owner's user id */
__uint32_t di_gid; /* owner's group id */
__uint32_t di_nlink; /* number of links to file */
- __uint16_t di_projid; /* owner's project id */
- __uint8_t di_pad[8]; /* unused, zeroed space */
+ __uint16_t di_projid_lo; /* lower part of owner's project id */
+ __uint16_t di_projid_hi; /* higher part of owner's project id */
+ __uint8_t di_pad[6]; /* unused, zeroed space */
__uint16_t di_flushiter; /* incremented on flush */
xfs_ictimestamp_t di_atime; /* time last accessed */
xfs_ictimestamp_t di_mtime; /* time last modified */
@@ -212,7 +213,6 @@ typedef struct xfs_icdinode {
#ifdef __KERNEL__
struct bhv_desc;
-struct cred;
struct xfs_buf;
struct xfs_bmap_free;
struct xfs_bmbt_irec;
@@ -335,6 +335,25 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
}
/*
+ * Project quota id helpers (previously projid was 16bit only
+ * and using two 16bit values to hold new 32bit projid was choosen
+ * to retain compatibility with "old" filesystems).
+ */
+static inline prid_t
+xfs_get_projid(struct xfs_inode *ip)
+{
+ return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo;
+}
+
+static inline void
+xfs_set_projid(struct xfs_inode *ip,
+ prid_t projid)
+{
+ ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16);
+ ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff);
+}
+
+/*
* Manage the i_flush queue embedded in the inode. This completion
* queue synchronizes processes attempting to flush the in-core
* inode back to disk.
@@ -456,8 +475,8 @@ void xfs_inode_free(struct xfs_inode *ip);
* xfs_inode.c prototypes.
*/
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t,
- xfs_nlink_t, xfs_dev_t, cred_t *, xfs_prid_t,
- int, struct xfs_buf **, boolean_t *, xfs_inode_t **);
+ xfs_nlink_t, xfs_dev_t, prid_t, int,
+ struct xfs_buf **, boolean_t *, xfs_inode_t **);
uint xfs_ip2xflags(struct xfs_inode *);
uint xfs_dic2xflags(struct xfs_dinode *);
@@ -471,7 +490,6 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
-void xfs_ichgtime(xfs_inode_t *, int);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index fe00777e279..c7ac020705d 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -223,15 +223,6 @@ xfs_inode_item_format(
nvecs = 1;
/*
- * Make sure the linux inode is dirty. We do this before
- * clearing i_update_core as the VFS will call back into
- * XFS here and set i_update_core, so we need to dirty the
- * inode first so that the ordering of i_update_core and
- * unlogged modifications still works as described below.
- */
- xfs_mark_inode_dirty_sync(ip);
-
- /*
* Clear i_update_core if the timestamps (or any other
* non-transactional modification) need flushing/logging
* and we're about to log them with the rest of the core.
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 7e3626e5925..dc1882adaf5 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -92,7 +92,8 @@ xfs_bulkstat_one_int(
* further change.
*/
buf->bs_nlink = dic->di_nlink;
- buf->bs_projid = dic->di_projid;
+ buf->bs_projid_lo = dic->di_projid_lo;
+ buf->bs_projid_hi = dic->di_projid_hi;
buf->bs_ino = ino;
buf->bs_mode = dic->di_mode;
buf->bs_uid = dic->di_uid;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 33f718f92a4..cee4ab9f8a9 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -917,19 +917,6 @@ xlog_iodone(xfs_buf_t *bp)
l = iclog->ic_log;
/*
- * If the _XFS_BARRIER_FAILED flag was set by a lower
- * layer, it means the underlying device no longer supports
- * barrier I/O. Warn loudly and turn off barriers.
- */
- if (bp->b_flags & _XFS_BARRIER_FAILED) {
- bp->b_flags &= ~_XFS_BARRIER_FAILED;
- l->l_mp->m_flags &= ~XFS_MOUNT_BARRIER;
- xfs_fs_cmn_err(CE_WARN, l->l_mp,
- "xlog_iodone: Barriers are no longer supported"
- " by device. Disabling barriers\n");
- }
-
- /*
* Race to shutdown the filesystem if we see an error.
*/
if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp,
@@ -1131,7 +1118,8 @@ xlog_alloc_log(xfs_mount_t *mp,
iclog->ic_prev = prev_iclog;
prev_iclog = iclog;
- bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp);
+ bp = xfs_buf_get_uncached(mp->m_logdev_targp,
+ log->l_iclog_size, 0);
if (!bp)
goto out_free_iclog;
if (!XFS_BUF_CPSEMA(bp))
@@ -1309,7 +1297,7 @@ xlog_bdstrat(
if (iclog->ic_state & XLOG_STATE_IOERROR) {
XFS_BUF_ERROR(bp, EIO);
XFS_BUF_STALE(bp);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
/*
* It would seem logical to return EIO here, but we rely on
* the log state machine to propagate I/O errors instead of
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7e206fc1fa3..23d6ceb5e97 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -146,102 +146,6 @@ xlog_cil_init_post_recovery(
}
/*
- * Insert the log item into the CIL and calculate the difference in space
- * consumed by the item. Add the space to the checkpoint ticket and calculate
- * if the change requires additional log metadata. If it does, take that space
- * as well. Remove the amount of space we addded to the checkpoint ticket from
- * the current transaction ticket so that the accounting works out correctly.
- *
- * If this is the first time the item is being placed into the CIL in this
- * context, pin it so it can't be written to disk until the CIL is flushed to
- * the iclog and the iclog written to disk.
- */
-static void
-xlog_cil_insert(
- struct log *log,
- struct xlog_ticket *ticket,
- struct xfs_log_item *item,
- struct xfs_log_vec *lv)
-{
- struct xfs_cil *cil = log->l_cilp;
- struct xfs_log_vec *old = lv->lv_item->li_lv;
- struct xfs_cil_ctx *ctx = cil->xc_ctx;
- int len;
- int diff_iovecs;
- int iclog_space;
-
- if (old) {
- /* existing lv on log item, space used is a delta */
- ASSERT(!list_empty(&item->li_cil));
- ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
-
- len = lv->lv_buf_len - old->lv_buf_len;
- diff_iovecs = lv->lv_niovecs - old->lv_niovecs;
- kmem_free(old->lv_buf);
- kmem_free(old);
- } else {
- /* new lv, must pin the log item */
- ASSERT(!lv->lv_item->li_lv);
- ASSERT(list_empty(&item->li_cil));
-
- len = lv->lv_buf_len;
- diff_iovecs = lv->lv_niovecs;
- IOP_PIN(lv->lv_item);
-
- }
- len += diff_iovecs * sizeof(xlog_op_header_t);
-
- /* attach new log vector to log item */
- lv->lv_item->li_lv = lv;
-
- spin_lock(&cil->xc_cil_lock);
- list_move_tail(&item->li_cil, &cil->xc_cil);
- ctx->nvecs += diff_iovecs;
-
- /*
- * If this is the first time the item is being committed to the CIL,
- * store the sequence number on the log item so we can tell
- * in future commits whether this is the first checkpoint the item is
- * being committed into.
- */
- if (!item->li_seq)
- item->li_seq = ctx->sequence;
-
- /*
- * Now transfer enough transaction reservation to the context ticket
- * for the checkpoint. The context ticket is special - the unit
- * reservation has to grow as well as the current reservation as we
- * steal from tickets so we can correctly determine the space used
- * during the transaction commit.
- */
- if (ctx->ticket->t_curr_res == 0) {
- /* first commit in checkpoint, steal the header reservation */
- ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
- ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
- ticket->t_curr_res -= ctx->ticket->t_unit_res;
- }
-
- /* do we need space for more log record headers? */
- iclog_space = log->l_iclog_size - log->l_iclog_hsize;
- if (len > 0 && (ctx->space_used / iclog_space !=
- (ctx->space_used + len) / iclog_space)) {
- int hdrs;
-
- hdrs = (len + iclog_space - 1) / iclog_space;
- /* need to take into account split region headers, too */
- hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
- ctx->ticket->t_unit_res += hdrs;
- ctx->ticket->t_curr_res += hdrs;
- ticket->t_curr_res -= hdrs;
- ASSERT(ticket->t_curr_res >= len);
- }
- ticket->t_curr_res -= len;
- ctx->space_used += len;
-
- spin_unlock(&cil->xc_cil_lock);
-}
-
-/*
* Format log item into a flat buffers
*
* For delayed logging, we need to hold a formatted buffer containing all the
@@ -286,7 +190,7 @@ xlog_cil_format_items(
len += lv->lv_iovecp[index].i_len;
lv->lv_buf_len = len;
- lv->lv_buf = kmem_zalloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
+ lv->lv_buf = kmem_alloc(lv->lv_buf_len, KM_SLEEP|KM_NOFS);
ptr = lv->lv_buf;
for (index = 0; index < lv->lv_niovecs; index++) {
@@ -300,21 +204,136 @@ xlog_cil_format_items(
}
}
+/*
+ * Prepare the log item for insertion into the CIL. Calculate the difference in
+ * log space and vectors it will consume, and if it is a new item pin it as
+ * well.
+ */
+STATIC void
+xfs_cil_prepare_item(
+ struct log *log,
+ struct xfs_log_vec *lv,
+ int *len,
+ int *diff_iovecs)
+{
+ struct xfs_log_vec *old = lv->lv_item->li_lv;
+
+ if (old) {
+ /* existing lv on log item, space used is a delta */
+ ASSERT(!list_empty(&lv->lv_item->li_cil));
+ ASSERT(old->lv_buf && old->lv_buf_len && old->lv_niovecs);
+
+ *len += lv->lv_buf_len - old->lv_buf_len;
+ *diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
+ kmem_free(old->lv_buf);
+ kmem_free(old);
+ } else {
+ /* new lv, must pin the log item */
+ ASSERT(!lv->lv_item->li_lv);
+ ASSERT(list_empty(&lv->lv_item->li_cil));
+
+ *len += lv->lv_buf_len;
+ *diff_iovecs += lv->lv_niovecs;
+ IOP_PIN(lv->lv_item);
+
+ }
+
+ /* attach new log vector to log item */
+ lv->lv_item->li_lv = lv;
+
+ /*
+ * If this is the first time the item is being committed to the
+ * CIL, store the sequence number on the log item so we can
+ * tell in future commits whether this is the first checkpoint
+ * the item is being committed into.
+ */
+ if (!lv->lv_item->li_seq)
+ lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
+}
+
+/*
+ * Insert the log items into the CIL and calculate the difference in space
+ * consumed by the item. Add the space to the checkpoint ticket and calculate
+ * if the change requires additional log metadata. If it does, take that space
+ * as well. Remove the amount of space we addded to the checkpoint ticket from
+ * the current transaction ticket so that the accounting works out correctly.
+ */
static void
xlog_cil_insert_items(
struct log *log,
struct xfs_log_vec *log_vector,
- struct xlog_ticket *ticket,
- xfs_lsn_t *start_lsn)
+ struct xlog_ticket *ticket)
{
- struct xfs_log_vec *lv;
-
- if (start_lsn)
- *start_lsn = log->l_cilp->xc_ctx->sequence;
+ struct xfs_cil *cil = log->l_cilp;
+ struct xfs_cil_ctx *ctx = cil->xc_ctx;
+ struct xfs_log_vec *lv;
+ int len = 0;
+ int diff_iovecs = 0;
+ int iclog_space;
ASSERT(log_vector);
+
+ /*
+ * Do all the accounting aggregation and switching of log vectors
+ * around in a separate loop to the insertion of items into the CIL.
+ * Then we can do a separate loop to update the CIL within a single
+ * lock/unlock pair. This reduces the number of round trips on the CIL
+ * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
+ * hold time for the transaction commit.
+ *
+ * If this is the first time the item is being placed into the CIL in
+ * this context, pin it so it can't be written to disk until the CIL is
+ * flushed to the iclog and the iclog written to disk.
+ *
+ * We can do this safely because the context can't checkpoint until we
+ * are done so it doesn't matter exactly how we update the CIL.
+ */
+ for (lv = log_vector; lv; lv = lv->lv_next)
+ xfs_cil_prepare_item(log, lv, &len, &diff_iovecs);
+
+ /* account for space used by new iovec headers */
+ len += diff_iovecs * sizeof(xlog_op_header_t);
+
+ spin_lock(&cil->xc_cil_lock);
+
+ /* move the items to the tail of the CIL */
for (lv = log_vector; lv; lv = lv->lv_next)
- xlog_cil_insert(log, ticket, lv->lv_item, lv);
+ list_move_tail(&lv->lv_item->li_cil, &cil->xc_cil);
+
+ ctx->nvecs += diff_iovecs;
+
+ /*
+ * Now transfer enough transaction reservation to the context ticket
+ * for the checkpoint. The context ticket is special - the unit
+ * reservation has to grow as well as the current reservation as we
+ * steal from tickets so we can correctly determine the space used
+ * during the transaction commit.
+ */
+ if (ctx->ticket->t_curr_res == 0) {
+ /* first commit in checkpoint, steal the header reservation */
+ ASSERT(ticket->t_curr_res >= ctx->ticket->t_unit_res + len);
+ ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
+ ticket->t_curr_res -= ctx->ticket->t_unit_res;
+ }
+
+ /* do we need space for more log record headers? */
+ iclog_space = log->l_iclog_size - log->l_iclog_hsize;
+ if (len > 0 && (ctx->space_used / iclog_space !=
+ (ctx->space_used + len) / iclog_space)) {
+ int hdrs;
+
+ hdrs = (len + iclog_space - 1) / iclog_space;
+ /* need to take into account split region headers, too */
+ hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
+ ctx->ticket->t_unit_res += hdrs;
+ ctx->ticket->t_curr_res += hdrs;
+ ticket->t_curr_res -= hdrs;
+ ASSERT(ticket->t_curr_res >= len);
+ }
+ ticket->t_curr_res -= len;
+ ctx->space_used += len;
+
+ spin_unlock(&cil->xc_cil_lock);
}
static void
@@ -638,7 +657,10 @@ xfs_log_commit_cil(
/* lock out background commit */
down_read(&log->l_cilp->xc_ctx_lock);
- xlog_cil_insert_items(log, log_vector, tp->t_ticket, commit_lsn);
+ if (commit_lsn)
+ *commit_lsn = log->l_cilp->xc_ctx->sequence;
+
+ xlog_cil_insert_items(log, log_vector, tp->t_ticket);
/* check we didn't blow the reservation */
if (tp->t_ticket->t_curr_res < 0)
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6f3f5fa37ac..966d3f97458 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -107,7 +107,8 @@ xlog_get_bp(
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
+ return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
+ BBTOB(nbblks), 0);
}
STATIC void
@@ -167,7 +168,7 @@ xlog_bread_noalign(
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
xfsbdstrat(log->l_mp, bp);
- error = xfs_iowait(bp);
+ error = xfs_buf_iowait(bp);
if (error)
xfs_ioerror_alert("xlog_bread", log->l_mp,
bp, XFS_BUF_ADDR(bp));
@@ -321,12 +322,13 @@ xlog_recover_iodone(
* this during recovery. One strike!
*/
xfs_ioerror_alert("xlog_recover_iodone",
- bp->b_mount, bp, XFS_BUF_ADDR(bp));
- xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
+ bp->b_target->bt_mount, bp,
+ XFS_BUF_ADDR(bp));
+ xfs_force_shutdown(bp->b_target->bt_mount,
+ SHUTDOWN_META_IO_ERROR);
}
- bp->b_mount = NULL;
XFS_BUF_CLR_IODONE_FUNC(bp);
- xfs_biodone(bp);
+ xfs_buf_ioend(bp, 0);
}
/*
@@ -2275,8 +2277,7 @@ xlog_recover_do_buffer_trans(
XFS_BUF_STALE(bp);
error = xfs_bwrite(mp, bp);
} else {
- ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
- bp->b_mount = mp;
+ ASSERT(bp->b_target->bt_mount == mp);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
xfs_bdwrite(mp, bp);
}
@@ -2540,8 +2541,7 @@ xlog_recover_do_inode_trans(
}
write_inode_buffer:
- ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
- bp->b_mount = mp;
+ ASSERT(bp->b_target->bt_mount == mp);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
xfs_bdwrite(mp, bp);
error:
@@ -2678,8 +2678,7 @@ xlog_recover_do_dquot_trans(
memcpy(ddq, recddq, item->ri_buf[1].i_len);
ASSERT(dq_f->qlf_size == 2);
- ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
- bp->b_mount = mp;
+ ASSERT(bp->b_target->bt_mount == mp);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
xfs_bdwrite(mp, bp);
@@ -3817,7 +3816,7 @@ xlog_do_recover(
XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp);
xfsbdstrat(log->l_mp, bp);
- error = xfs_iowait(bp);
+ error = xfs_buf_iowait(bp);
if (error) {
xfs_ioerror_alert("xlog_do_recover",
log->l_mp, bp, XFS_BUF_ADDR(bp));
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index aeb9d72ebf6..b1498ab5a39 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -52,16 +52,11 @@ STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
int);
STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
int);
-STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
- int64_t, int);
STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
-
#else
#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
-#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
-
#endif
static const struct {
@@ -199,6 +194,8 @@ xfs_uuid_unmount(
/*
* Reference counting access wrappers to the perag structures.
+ * Because we never free per-ag structures, the only thing we
+ * have to protect against changes is the tree structure itself.
*/
struct xfs_perag *
xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
@@ -206,19 +203,43 @@ xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
struct xfs_perag *pag;
int ref = 0;
- spin_lock(&mp->m_perag_lock);
+ rcu_read_lock();
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
if (pag) {
ASSERT(atomic_read(&pag->pag_ref) >= 0);
- /* catch leaks in the positive direction during testing */
- ASSERT(atomic_read(&pag->pag_ref) < 1000);
ref = atomic_inc_return(&pag->pag_ref);
}
- spin_unlock(&mp->m_perag_lock);
+ rcu_read_unlock();
trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
return pag;
}
+/*
+ * search from @first to find the next perag with the given tag set.
+ */
+struct xfs_perag *
+xfs_perag_get_tag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t first,
+ int tag)
+{
+ struct xfs_perag *pag;
+ int found;
+ int ref;
+
+ rcu_read_lock();
+ found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+ (void **)&pag, first, 1, tag);
+ if (found <= 0) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ ref = atomic_inc_return(&pag->pag_ref);
+ rcu_read_unlock();
+ trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
+ return pag;
+}
+
void
xfs_perag_put(struct xfs_perag *pag)
{
@@ -229,10 +250,18 @@ xfs_perag_put(struct xfs_perag *pag)
trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
}
+STATIC void
+__xfs_free_perag(
+ struct rcu_head *head)
+{
+ struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
+
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ kmem_free(pag);
+}
+
/*
- * Free up the resources associated with a mount structure. Assume that
- * the structure was initially zeroed, so we can tell which fields got
- * initialized.
+ * Free up the per-ag resources associated with the mount structure.
*/
STATIC void
xfs_free_perag(
@@ -244,10 +273,9 @@ xfs_free_perag(
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
spin_lock(&mp->m_perag_lock);
pag = radix_tree_delete(&mp->m_perag_tree, agno);
- ASSERT(pag);
- ASSERT(atomic_read(&pag->pag_ref) == 0);
spin_unlock(&mp->m_perag_lock);
- kmem_free(pag);
+ ASSERT(pag);
+ call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}
@@ -444,7 +472,10 @@ xfs_initialize_perag(
pag->pag_agno = index;
pag->pag_mount = mp;
rwlock_init(&pag->pag_ici_lock);
+ mutex_init(&pag->pag_ici_reclaim_lock);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+ spin_lock_init(&pag->pag_buf_lock);
+ pag->pag_buf_tree = RB_ROOT;
if (radix_tree_preload(GFP_NOFS))
goto out_unwind;
@@ -639,7 +670,6 @@ int
xfs_readsb(xfs_mount_t *mp, int flags)
{
unsigned int sector_size;
- unsigned int extra_flags;
xfs_buf_t *bp;
int error;
@@ -652,28 +682,24 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* access to the superblock.
*/
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
- extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
- bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
- extra_flags);
- if (!bp || XFS_BUF_ISERROR(bp)) {
- xfs_fs_mount_cmn_err(flags, "SB read failed");
- error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
- goto fail;
+reread:
+ bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ XFS_SB_DADDR, sector_size, 0);
+ if (!bp) {
+ xfs_fs_mount_cmn_err(flags, "SB buffer read failed");
+ return EIO;
}
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
/*
* Initialize the mount structure from the superblock.
* But first do some basic consistency checking.
*/
xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
-
error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
if (error) {
xfs_fs_mount_cmn_err(flags, "SB validate failed");
- goto fail;
+ goto release_buf;
}
/*
@@ -684,7 +710,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
"device supports only %u byte sectors (not %u)",
sector_size, mp->m_sb.sb_sectsize);
error = ENOSYS;
- goto fail;
+ goto release_buf;
}
/*
@@ -692,33 +718,20 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* re-read the superblock so the buffer is correctly sized.
*/
if (sector_size < mp->m_sb.sb_sectsize) {
- XFS_BUF_UNMANAGE(bp);
xfs_buf_relse(bp);
sector_size = mp->m_sb.sb_sectsize;
- bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), extra_flags);
- if (!bp || XFS_BUF_ISERROR(bp)) {
- xfs_fs_mount_cmn_err(flags, "SB re-read failed");
- error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
- goto fail;
- }
- ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ goto reread;
}
/* Initialize per-cpu counters */
xfs_icsb_reinit_counters(mp);
mp->m_sb_bp = bp;
- xfs_buf_relse(bp);
- ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
+ xfs_buf_unlock(bp);
return 0;
- fail:
- if (bp) {
- XFS_BUF_UNMANAGE(bp);
- xfs_buf_relse(bp);
- }
+release_buf:
+ xfs_buf_relse(bp);
return error;
}
@@ -991,42 +1004,35 @@ xfs_check_sizes(xfs_mount_t *mp)
{
xfs_buf_t *bp;
xfs_daddr_t d;
- int error;
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
- cmn_err(CE_WARN, "XFS: size check 1 failed");
+ cmn_err(CE_WARN, "XFS: filesystem size mismatch detected");
return XFS_ERROR(EFBIG);
}
- error = xfs_read_buf(mp, mp->m_ddev_targp,
- d - XFS_FSS_TO_BB(mp, 1),
- XFS_FSS_TO_BB(mp, 1), 0, &bp);
- if (!error) {
- xfs_buf_relse(bp);
- } else {
- cmn_err(CE_WARN, "XFS: size check 2 failed");
- if (error == ENOSPC)
- error = XFS_ERROR(EFBIG);
- return error;
+ bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
+ d - XFS_FSS_TO_BB(mp, 1),
+ BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
+ if (!bp) {
+ cmn_err(CE_WARN, "XFS: last sector read failed");
+ return EIO;
}
+ xfs_buf_relse(bp);
if (mp->m_logdev_targp != mp->m_ddev_targp) {
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
- cmn_err(CE_WARN, "XFS: size check 3 failed");
+ cmn_err(CE_WARN, "XFS: log size mismatch detected");
return XFS_ERROR(EFBIG);
}
- error = xfs_read_buf(mp, mp->m_logdev_targp,
- d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_BB(mp, 1), 0, &bp);
- if (!error) {
- xfs_buf_relse(bp);
- } else {
- cmn_err(CE_WARN, "XFS: size check 3 failed");
- if (error == ENOSPC)
- error = XFS_ERROR(EFBIG);
- return error;
+ bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
+ d - XFS_FSB_TO_BB(mp, 1),
+ XFS_FSB_TO_B(mp, 1), 0);
+ if (!bp) {
+ cmn_err(CE_WARN, "XFS: log device read failed");
+ return EIO;
}
+ xfs_buf_relse(bp);
}
return 0;
}
@@ -1601,7 +1607,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp)
XFS_BUF_UNASYNC(sbp);
ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
xfsbdstrat(mp, sbp);
- error = xfs_iowait(sbp);
+ error = xfs_buf_iowait(sbp);
if (error)
xfs_ioerror_alert("xfs_unmountfs_writesb",
mp, sbp, XFS_BUF_ADDR(sbp));
@@ -1832,135 +1838,72 @@ xfs_mod_incore_sb_unlocked(
*/
int
xfs_mod_incore_sb(
- xfs_mount_t *mp,
- xfs_sb_field_t field,
- int64_t delta,
- int rsvd)
+ struct xfs_mount *mp,
+ xfs_sb_field_t field,
+ int64_t delta,
+ int rsvd)
{
- int status;
+ int status;
- /* check for per-cpu counters */
- switch (field) {
#ifdef HAVE_PERCPU_SB
- case XFS_SBS_ICOUNT:
- case XFS_SBS_IFREE:
- case XFS_SBS_FDBLOCKS:
- if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
- status = xfs_icsb_modify_counters(mp, field,
- delta, rsvd);
- break;
- }
- /* FALLTHROUGH */
+ ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
#endif
- default:
- spin_lock(&mp->m_sb_lock);
- status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
- spin_unlock(&mp->m_sb_lock);
- break;
- }
+ spin_lock(&mp->m_sb_lock);
+ status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
+ spin_unlock(&mp->m_sb_lock);
return status;
}
/*
- * xfs_mod_incore_sb_batch() is used to change more than one field
- * in the in-core superblock structure at a time. This modification
- * is protected by a lock internal to this module. The fields and
- * changes to those fields are specified in the array of xfs_mod_sb
- * structures passed in.
+ * Change more than one field in the in-core superblock structure at a time.
*
- * Either all of the specified deltas will be applied or none of
- * them will. If any modified field dips below 0, then all modifications
- * will be backed out and EINVAL will be returned.
+ * The fields and changes to those fields are specified in the array of
+ * xfs_mod_sb structures passed in. Either all of the specified deltas
+ * will be applied or none of them will. If any modified field dips below 0,
+ * then all modifications will be backed out and EINVAL will be returned.
+ *
+ * Note that this function may not be used for the superblock values that
+ * are tracked with the in-memory per-cpu counters - a direct call to
+ * xfs_icsb_modify_counters is required for these.
*/
int
-xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
+xfs_mod_incore_sb_batch(
+ struct xfs_mount *mp,
+ xfs_mod_sb_t *msb,
+ uint nmsb,
+ int rsvd)
{
- int status=0;
- xfs_mod_sb_t *msbp;
+ xfs_mod_sb_t *msbp = &msb[0];
+ int error = 0;
/*
- * Loop through the array of mod structures and apply each
- * individually. If any fail, then back out all those
- * which have already been applied. Do all of this within
- * the scope of the m_sb_lock so that all of the changes will
- * be atomic.
+ * Loop through the array of mod structures and apply each individually.
+ * If any fail, then back out all those which have already been applied.
+ * Do all of this within the scope of the m_sb_lock so that all of the
+ * changes will be atomic.
*/
spin_lock(&mp->m_sb_lock);
- msbp = &msb[0];
for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
- /*
- * Apply the delta at index n. If it fails, break
- * from the loop so we'll fall into the undo loop
- * below.
- */
- switch (msbp->msb_field) {
-#ifdef HAVE_PERCPU_SB
- case XFS_SBS_ICOUNT:
- case XFS_SBS_IFREE:
- case XFS_SBS_FDBLOCKS:
- if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
- spin_unlock(&mp->m_sb_lock);
- status = xfs_icsb_modify_counters(mp,
- msbp->msb_field,
- msbp->msb_delta, rsvd);
- spin_lock(&mp->m_sb_lock);
- break;
- }
- /* FALLTHROUGH */
-#endif
- default:
- status = xfs_mod_incore_sb_unlocked(mp,
- msbp->msb_field,
- msbp->msb_delta, rsvd);
- break;
- }
+ ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
+ msbp->msb_field > XFS_SBS_FDBLOCKS);
- if (status != 0) {
- break;
- }
+ error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
+ msbp->msb_delta, rsvd);
+ if (error)
+ goto unwind;
}
+ spin_unlock(&mp->m_sb_lock);
+ return 0;
- /*
- * If we didn't complete the loop above, then back out
- * any changes made to the superblock. If you add code
- * between the loop above and here, make sure that you
- * preserve the value of status. Loop back until
- * we step below the beginning of the array. Make sure
- * we don't touch anything back there.
- */
- if (status != 0) {
- msbp--;
- while (msbp >= msb) {
- switch (msbp->msb_field) {
-#ifdef HAVE_PERCPU_SB
- case XFS_SBS_ICOUNT:
- case XFS_SBS_IFREE:
- case XFS_SBS_FDBLOCKS:
- if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
- spin_unlock(&mp->m_sb_lock);
- status = xfs_icsb_modify_counters(mp,
- msbp->msb_field,
- -(msbp->msb_delta),
- rsvd);
- spin_lock(&mp->m_sb_lock);
- break;
- }
- /* FALLTHROUGH */
-#endif
- default:
- status = xfs_mod_incore_sb_unlocked(mp,
- msbp->msb_field,
- -(msbp->msb_delta),
- rsvd);
- break;
- }
- ASSERT(status == 0);
- msbp--;
- }
+unwind:
+ while (--msbp >= msb) {
+ error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
+ -msbp->msb_delta, rsvd);
+ ASSERT(error == 0);
}
spin_unlock(&mp->m_sb_lock);
- return status;
+ return error;
}
/*
@@ -1998,18 +1941,13 @@ xfs_getsb(
*/
void
xfs_freesb(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_buf_t *bp;
+ struct xfs_buf *bp = mp->m_sb_bp;
- /*
- * Use xfs_getsb() so that the buffer will be locked
- * when we call xfs_buf_relse().
- */
- bp = xfs_getsb(mp, 0);
- XFS_BUF_UNMANAGE(bp);
- xfs_buf_relse(bp);
+ xfs_buf_lock(bp);
mp->m_sb_bp = NULL;
+ xfs_buf_relse(bp);
}
/*
@@ -2496,7 +2434,7 @@ xfs_icsb_balance_counter(
spin_unlock(&mp->m_sb_lock);
}
-STATIC int
+int
xfs_icsb_modify_counters(
xfs_mount_t *mp,
xfs_sb_field_t field,
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 622da2179a5..5861b498074 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,7 +53,6 @@ typedef struct xfs_trans_reservations {
#include "xfs_sync.h"
-struct cred;
struct log;
struct xfs_mount_args;
struct xfs_inode;
@@ -91,6 +90,8 @@ extern void xfs_icsb_reinit_counters(struct xfs_mount *);
extern void xfs_icsb_destroy_counters(struct xfs_mount *);
extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
+extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
+ int64_t, int);
#else
#define xfs_icsb_init_counters(mp) (0)
@@ -98,6 +99,8 @@ extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
#define xfs_icsb_reinit_counters(mp) do { } while (0)
#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
+#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \
+ xfs_mod_incore_sb(mp, field, delta, rsvd)
#endif
typedef struct xfs_mount {
@@ -232,8 +235,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_DIRSYNC (1ULL << 21) /* synchronous directory ops */
#define XFS_MOUNT_COMPAT_IOSIZE (1ULL << 22) /* don't report large preferred
* I/O size in stat() */
-#define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock
- counters */
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
@@ -327,6 +328,8 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
* perag get/put wrappers for ref counting
*/
struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
+struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int tag);
void xfs_perag_put(struct xfs_perag *pag);
/*
diff --git a/fs/xfs/xfs_refcache.h b/fs/xfs/xfs_refcache.h
deleted file mode 100644
index 2dec79edb51..00000000000
--- a/fs/xfs/xfs_refcache.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_REFCACHE_H__
-#define __XFS_REFCACHE_H__
-
-#ifdef HAVE_REFCACHE
-/*
- * Maximum size (in inodes) for the NFS reference cache
- */
-#define XFS_REFCACHE_SIZE_MAX 512
-
-struct xfs_inode;
-struct xfs_mount;
-
-extern void xfs_refcache_insert(struct xfs_inode *);
-extern void xfs_refcache_purge_ip(struct xfs_inode *);
-extern void xfs_refcache_purge_mp(struct xfs_mount *);
-extern void xfs_refcache_purge_some(struct xfs_mount *);
-extern void xfs_refcache_resize(int);
-extern void xfs_refcache_destroy(void);
-
-extern void xfs_refcache_iunlock(struct xfs_inode *, uint);
-
-#else
-
-#define xfs_refcache_insert(ip) do { } while (0)
-#define xfs_refcache_purge_ip(ip) do { } while (0)
-#define xfs_refcache_purge_mp(mp) do { } while (0)
-#define xfs_refcache_purge_some(mp) do { } while (0)
-#define xfs_refcache_resize(size) do { } while (0)
-#define xfs_refcache_destroy() do { } while (0)
-
-#define xfs_refcache_iunlock(ip, flags) xfs_iunlock(ip, flags)
-
-#endif
-
-#endif /* __XFS_REFCACHE_H__ */
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 8fca957200d..d2af0a8381a 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -183,7 +183,7 @@ xfs_rename(
* tree quota mechanism would be circumvented.
*/
if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
+ (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
error = XFS_ERROR(EXDEV);
goto error_return;
}
@@ -211,7 +211,9 @@ xfs_rename(
goto error_return;
if (error)
goto abort_return;
- xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+ xfs_trans_ichgtime(tp, target_dp,
+ XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
if (new_parent && src_is_directory) {
error = xfs_bumplink(tp, target_dp);
@@ -249,7 +251,9 @@ xfs_rename(
&first_block, &free_list, spaceres);
if (error)
goto abort_return;
- xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+ xfs_trans_ichgtime(tp, target_dp,
+ XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
/*
* Decrement the link count on the target since the target
@@ -292,7 +296,7 @@ xfs_rename(
* inode isn't really being changed, but old unix file systems did
* it and some incremental backup programs won't work without it.
*/
- xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
/*
* Adjust the link count on src_dp. This is necessary when
@@ -315,7 +319,7 @@ xfs_rename(
if (error)
goto abort_return;
- xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 891260fea11..12a19138531 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -39,6 +39,7 @@
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_trace.h"
+#include "xfs_buf.h"
/*
@@ -1883,13 +1884,13 @@ xfs_growfs_rt(
/*
* Read in the last block of the device, make sure it exists.
*/
- error = xfs_read_buf(mp, mp->m_rtdev_targp,
- XFS_FSB_TO_BB(mp, nrblocks - 1),
- XFS_FSB_TO_BB(mp, 1), 0, &bp);
- if (error)
- return error;
- ASSERT(bp);
+ bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ XFS_FSB_TO_BB(mp, nrblocks - 1),
+ XFS_FSB_TO_B(mp, 1), 0);
+ if (!bp)
+ return EIO;
xfs_buf_relse(bp);
+
/*
* Calculate new parameters. These are the final values to be reached.
*/
@@ -2215,7 +2216,6 @@ xfs_rtmount_init(
{
xfs_buf_t *bp; /* buffer for last block of subvolume */
xfs_daddr_t d; /* address of last block of subvolume */
- int error; /* error return value */
xfs_sb_t *sbp; /* filesystem superblock copy in mount */
sbp = &mp->m_sb;
@@ -2242,15 +2242,12 @@ xfs_rtmount_init(
(unsigned long long) mp->m_sb.sb_rblocks);
return XFS_ERROR(EFBIG);
}
- error = xfs_read_buf(mp, mp->m_rtdev_targp,
- d - XFS_FSB_TO_BB(mp, 1),
- XFS_FSB_TO_BB(mp, 1), 0, &bp);
- if (error) {
- cmn_err(CE_WARN,
- "XFS: realtime mount -- xfs_read_buf failed, returned %d", error);
- if (error == ENOSPC)
- return XFS_ERROR(EFBIG);
- return error;
+ bp = xfs_buf_read_uncached(mp, mp->m_rtdev_targp,
+ d - XFS_FSB_TO_BB(mp, 1),
+ XFS_FSB_TO_B(mp, 1), 0);
+ if (!bp) {
+ cmn_err(CE_WARN, "XFS: realtime device size check failed");
+ return EIO;
}
xfs_buf_relse(bp);
return 0;
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 1b017c65749..1eb2ba58681 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -80,10 +80,12 @@ struct xfs_mount;
#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
+#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
#define XFS_SB_VERSION2_OKREALFBITS \
(XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
- XFS_SB_VERSION2_ATTR2BIT)
+ XFS_SB_VERSION2_ATTR2BIT | \
+ XFS_SB_VERSION2_PROJID32BIT)
#define XFS_SB_VERSION2_OKSASHFBITS \
(0)
#define XFS_SB_VERSION2_OKREALBITS \
@@ -495,6 +497,12 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp)
sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
}
+static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp)
+{
+ return xfs_sb_version_hasmorebits(sbp) &&
+ (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT);
+}
+
/*
* end of superblock version macros
*/
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 1c47edaea0d..f6d956b7711 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -696,7 +696,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
*/
if (blocks > 0) {
- error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
+ error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
-((int64_t)blocks), rsvd);
if (error != 0) {
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
@@ -767,7 +767,7 @@ undo_log:
undo_blocks:
if (blocks > 0) {
- (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
+ xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
(int64_t)blocks, rsvd);
tp->t_blk_res = 0;
}
@@ -1009,7 +1009,7 @@ void
xfs_trans_unreserve_and_mod_sb(
xfs_trans_t *tp)
{
- xfs_mod_sb_t msb[14]; /* If you add cases, add entries */
+ xfs_mod_sb_t msb[9]; /* If you add cases, add entries */
xfs_mod_sb_t *msbp;
xfs_mount_t *mp = tp->t_mountp;
/* REFERENCED */
@@ -1017,55 +1017,61 @@ xfs_trans_unreserve_and_mod_sb(
int rsvd;
int64_t blkdelta = 0;
int64_t rtxdelta = 0;
+ int64_t idelta = 0;
+ int64_t ifreedelta = 0;
msbp = msb;
rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
- /* calculate free blocks delta */
+ /* calculate deltas */
if (tp->t_blk_res > 0)
blkdelta = tp->t_blk_res;
-
if ((tp->t_fdblocks_delta != 0) &&
(xfs_sb_version_haslazysbcount(&mp->m_sb) ||
(tp->t_flags & XFS_TRANS_SB_DIRTY)))
blkdelta += tp->t_fdblocks_delta;
- if (blkdelta != 0) {
- msbp->msb_field = XFS_SBS_FDBLOCKS;
- msbp->msb_delta = blkdelta;
- msbp++;
- }
-
- /* calculate free realtime extents delta */
if (tp->t_rtx_res > 0)
rtxdelta = tp->t_rtx_res;
-
if ((tp->t_frextents_delta != 0) &&
(tp->t_flags & XFS_TRANS_SB_DIRTY))
rtxdelta += tp->t_frextents_delta;
+ if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
+ (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
+ idelta = tp->t_icount_delta;
+ ifreedelta = tp->t_ifree_delta;
+ }
+
+ /* apply the per-cpu counters */
+ if (blkdelta) {
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
+ blkdelta, rsvd);
+ if (error)
+ goto out;
+ }
+
+ if (idelta) {
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT,
+ idelta, rsvd);
+ if (error)
+ goto out_undo_fdblocks;
+ }
+
+ if (ifreedelta) {
+ error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE,
+ ifreedelta, rsvd);
+ if (error)
+ goto out_undo_icount;
+ }
+
+ /* apply remaining deltas */
if (rtxdelta != 0) {
msbp->msb_field = XFS_SBS_FREXTENTS;
msbp->msb_delta = rtxdelta;
msbp++;
}
- /* apply remaining deltas */
-
- if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
- (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
- if (tp->t_icount_delta != 0) {
- msbp->msb_field = XFS_SBS_ICOUNT;
- msbp->msb_delta = tp->t_icount_delta;
- msbp++;
- }
- if (tp->t_ifree_delta != 0) {
- msbp->msb_field = XFS_SBS_IFREE;
- msbp->msb_delta = tp->t_ifree_delta;
- msbp++;
- }
- }
-
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
if (tp->t_dblocks_delta != 0) {
msbp->msb_field = XFS_SBS_DBLOCKS;
@@ -1115,8 +1121,24 @@ xfs_trans_unreserve_and_mod_sb(
if (msbp > msb) {
error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
(uint)(msbp - msb), rsvd);
- ASSERT(error == 0);
+ if (error)
+ goto out_undo_ifreecount;
}
+
+ return;
+
+out_undo_ifreecount:
+ if (ifreedelta)
+ xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd);
+out_undo_icount:
+ if (idelta)
+ xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd);
+out_undo_fdblocks:
+ if (blkdelta)
+ xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
+out:
+ ASSERT(error = 0);
+ return;
}
/*
@@ -1389,15 +1411,12 @@ xfs_trans_item_committed(
*/
STATIC void
xfs_trans_committed(
- struct xfs_trans *tp,
+ void *arg,
int abortflag)
{
+ struct xfs_trans *tp = arg;
struct xfs_log_item_desc *lidp, *next;
- /* Call the transaction's completion callback if there is one. */
- if (tp->t_callback != NULL)
- tp->t_callback(tp, tp->t_callarg);
-
list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
xfs_trans_free_item_desc(lidp);
@@ -1525,7 +1544,7 @@ xfs_trans_commit_iclog(
* running in simulation mode (the log is explicitly turned
* off).
*/
- tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed;
+ tp->t_logcb.cb_func = xfs_trans_committed;
tp->t_logcb.cb_arg = tp;
/*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index c13c0f97b49..246286b77a8 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -399,8 +399,6 @@ typedef struct xfs_trans {
* transaction. */
struct xfs_mount *t_mountp; /* ptr to fs mount struct */
struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
- xfs_trans_callback_t t_callback; /* transaction callback */
- void *t_callarg; /* callback arg */
unsigned int t_flags; /* misc flags */
int64_t t_icount_delta; /* superblock icount change */
int64_t t_ifree_delta; /* superblock ifree change */
@@ -473,6 +471,7 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
xfs_ino_t , uint, uint, struct xfs_inode **);
+void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 90af025e683..c47918c302a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -336,7 +336,7 @@ xfs_trans_read_buf(
ASSERT(!XFS_BUF_ISASYNC(bp));
XFS_BUF_READ(bp);
xfsbdstrat(tp->t_mountp, bp);
- error = xfs_iowait(bp);
+ error = xfs_buf_iowait(bp);
if (error) {
xfs_ioerror_alert("xfs_trans_read_buf", mp,
bp, blkno);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index cdc53a1050c..ccb34532768 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -118,6 +118,36 @@ xfs_trans_ijoin_ref(
}
/*
+ * Transactional inode timestamp update. Requires the inode to be locked and
+ * joined to the transaction supplied. Relies on the transaction subsystem to
+ * track dirty state and update/writeback the inode accordingly.
+ */
+void
+xfs_trans_ichgtime(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ int flags)
+{
+ struct inode *inode = VFS_I(ip);
+ timespec_t tv;
+
+ ASSERT(tp);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(ip->i_transp == tp);
+
+ tv = current_fs_time(inode->i_sb);
+
+ if ((flags & XFS_ICHGTIME_MOD) &&
+ !timespec_equal(&inode->i_mtime, &tv)) {
+ inode->i_mtime = tv;
+ }
+ if ((flags & XFS_ICHGTIME_CHG) &&
+ !timespec_equal(&inode->i_ctime, &tv)) {
+ inode->i_ctime = tv;
+ }
+}
+
+/*
* This is called to mark the fields indicated in fieldmask as needing
* to be logged when the transaction is committed. The inode must
* already be associated with the given transaction.
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 320775295e3..26d1867d815 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -73,8 +73,6 @@ typedef __int32_t xfs_tid_t; /* transaction identifier */
typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */
typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */
-typedef __uint16_t xfs_prid_t; /* prid_t truncated to 16bits in XFS */
-
typedef __uint32_t xlog_tid_t; /* transaction ID type */
/*
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index b7d5769d2df..8b32d1a4c5a 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -56,7 +56,6 @@ xfs_dir_ialloc(
mode_t mode,
xfs_nlink_t nlink,
xfs_dev_t rdev,
- cred_t *credp,
prid_t prid, /* project id */
int okalloc, /* ok to allocate new space */
xfs_inode_t **ipp, /* pointer to inode; it will be
@@ -93,7 +92,7 @@ xfs_dir_ialloc(
* transaction commit so that no other process can steal
* the inode(s) that we've just allocated.
*/
- code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid, okalloc,
+ code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
&ialloc_context, &call_again, &ip);
/*
@@ -197,7 +196,7 @@ xfs_dir_ialloc(
* other allocations in this allocation group,
* this call should always succeed.
*/
- code = xfs_ialloc(tp, dp, mode, nlink, rdev, credp, prid,
+ code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
okalloc, &ialloc_context, &call_again, &ip);
/*
@@ -235,7 +234,7 @@ xfs_droplink(
{
int error;
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
ASSERT (ip->i_d.di_nlink > 0);
ip->i_d.di_nlink--;
@@ -299,7 +298,7 @@ xfs_bumplink(
{
if (ip->i_d.di_nlink >= XFS_MAXLINK)
return XFS_ERROR(EMLINK);
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
ASSERT(ip->i_d.di_nlink > 0);
ip->i_d.di_nlink++;
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index f55b9678264..456fca31493 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -19,8 +19,7 @@
#define __XFS_UTILS_H__
extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
- xfs_dev_t, cred_t *, prid_t, int,
- xfs_inode_t **, int *);
+ xfs_dev_t, prid_t, int, xfs_inode_t **, int *);
extern int xfs_droplink(xfs_trans_t *, xfs_inode_t *);
extern int xfs_bumplink(xfs_trans_t *, xfs_inode_t *);
extern void xfs_bump_ino_vers2(xfs_trans_t *, xfs_inode_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 4c7c7bfb2b2..8e4a63c4151 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -114,7 +114,7 @@ xfs_setattr(
*/
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
- code = xfs_qm_vop_dqalloc(ip, uid, gid, ip->i_d.di_projid,
+ code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
qflags, &udqp, &gdqp);
if (code)
return code;
@@ -184,8 +184,11 @@ xfs_setattr(
ip->i_size == 0 && ip->i_d.di_nextents == 0) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
lock_flags &= ~XFS_ILOCK_EXCL;
- if (mask & ATTR_CTIME)
- xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ if (mask & ATTR_CTIME) {
+ inode->i_mtime = inode->i_ctime =
+ current_fs_time(inode->i_sb);
+ xfs_mark_inode_dirty_sync(ip);
+ }
code = 0;
goto error_return;
}
@@ -1253,8 +1256,7 @@ xfs_create(
struct xfs_name *name,
mode_t mode,
xfs_dev_t rdev,
- xfs_inode_t **ipp,
- cred_t *credp)
+ xfs_inode_t **ipp)
{
int is_dir = S_ISDIR(mode);
struct xfs_mount *mp = dp->i_mount;
@@ -1266,7 +1268,7 @@ xfs_create(
boolean_t unlock_dp_on_error = B_FALSE;
uint cancel_flags;
int committed;
- xfs_prid_t prid;
+ prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
uint resblks;
@@ -1279,9 +1281,9 @@ xfs_create(
return XFS_ERROR(EIO);
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- prid = dp->i_d.di_projid;
+ prid = xfs_get_projid(dp);
else
- prid = dfltprid;
+ prid = XFS_PROJID_DEFAULT;
/*
* Make sure that we have allocated dquot(s) on disk.
@@ -1360,7 +1362,7 @@ xfs_create(
* entry pointing to them, but a directory also the "." entry
* pointing to itself.
*/
- error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, credp,
+ error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
prid, resblks > 0, &ip, &committed);
if (error) {
if (error == ENOSPC)
@@ -1391,7 +1393,7 @@ xfs_create(
ASSERT(error != ENOSPC);
goto out_trans_abort;
}
- xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
if (is_dir) {
@@ -1742,7 +1744,7 @@ xfs_remove(
ASSERT(error != ENOENT);
goto out_bmap_cancel;
}
- xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
if (is_dir) {
/*
@@ -1880,7 +1882,7 @@ xfs_link(
* the tree quota mechanism could be circumvented.
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
- (tdp->i_d.di_projid != sip->i_d.di_projid))) {
+ (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
error = XFS_ERROR(EXDEV);
goto error_return;
}
@@ -1895,7 +1897,7 @@ xfs_link(
&first_block, &free_list, resblks);
if (error)
goto abort_return;
- xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
error = xfs_bumplink(tp, sip);
@@ -1933,8 +1935,7 @@ xfs_symlink(
struct xfs_name *link_name,
const char *target_path,
mode_t mode,
- xfs_inode_t **ipp,
- cred_t *credp)
+ xfs_inode_t **ipp)
{
xfs_mount_t *mp = dp->i_mount;
xfs_trans_t *tp;
@@ -1955,7 +1956,7 @@ xfs_symlink(
int byte_cnt;
int n;
xfs_buf_t *bp;
- xfs_prid_t prid;
+ prid_t prid;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
@@ -1978,9 +1979,9 @@ xfs_symlink(
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
- prid = dp->i_d.di_projid;
+ prid = xfs_get_projid(dp);
else
- prid = (xfs_prid_t)dfltprid;
+ prid = XFS_PROJID_DEFAULT;
/*
* Make sure that we have allocated dquot(s) on disk.
@@ -2046,8 +2047,8 @@ xfs_symlink(
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT),
- 1, 0, credp, prid, resblks > 0, &ip, NULL);
+ error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
+ prid, resblks > 0, &ip, NULL);
if (error) {
if (error == ENOSPC)
goto error_return;
@@ -2129,7 +2130,7 @@ xfs_symlink(
&first_block, &free_list, resblks);
if (error)
goto error1;
- xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
/*
@@ -2272,7 +2273,7 @@ xfs_alloc_file_space(
count = len;
imapp = &imaps[0];
nimaps = 1;
- bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
+ bmapi_flag = XFS_BMAPI_WRITE | alloc_type;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
@@ -2431,9 +2432,9 @@ xfs_zero_remaining_bytes(
if (endoff > ip->i_size)
endoff = ip->i_size;
- bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize,
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp);
+ bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
+ mp->m_rtdev_targp : mp->m_ddev_targp,
+ mp->m_sb.sb_blocksize, XBF_DONT_BLOCK);
if (!bp)
return XFS_ERROR(ENOMEM);
@@ -2459,7 +2460,7 @@ xfs_zero_remaining_bytes(
XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
xfsbdstrat(mp, bp);
- error = xfs_iowait(bp);
+ error = xfs_buf_iowait(bp);
if (error) {
xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
mp, bp, XFS_BUF_ADDR(bp));
@@ -2472,7 +2473,7 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
xfsbdstrat(mp, bp);
- error = xfs_iowait(bp);
+ error = xfs_buf_iowait(bp);
if (error) {
xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
mp, bp, XFS_BUF_ADDR(bp));
@@ -2711,6 +2712,7 @@ xfs_change_file_space(
xfs_off_t llen;
xfs_trans_t *tp;
struct iattr iattr;
+ int prealloc_type;
if (!S_ISREG(ip->i_d.di_mode))
return XFS_ERROR(EINVAL);
@@ -2753,12 +2755,17 @@ xfs_change_file_space(
* size to be changed.
*/
setprealloc = clrprealloc = 0;
+ prealloc_type = XFS_BMAPI_PREALLOC;
switch (cmd) {
+ case XFS_IOC_ZERO_RANGE:
+ prealloc_type |= XFS_BMAPI_CONVERT;
+ xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0);
+ /* FALLTHRU */
case XFS_IOC_RESVSP:
case XFS_IOC_RESVSP64:
error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
- 1, attr_flags);
+ prealloc_type, attr_flags);
if (error)
return error;
setprealloc = 1;
@@ -2827,7 +2834,7 @@ xfs_change_file_space(
if (ip->i_d.di_mode & S_IXGRP)
ip->i_d.di_mode &= ~S_ISGID;
- xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
}
if (setprealloc)
ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index d8dfa8d0dad..f6702927eee 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -2,7 +2,6 @@
#define _XFS_VNODEOPS_H 1
struct attrlist_cursor_kern;
-struct cred;
struct file;
struct iattr;
struct inode;
@@ -26,7 +25,7 @@ int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
- xfs_dev_t rdev, struct xfs_inode **ipp, cred_t *credp);
+ xfs_dev_t rdev, struct xfs_inode **ipp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,
struct xfs_inode *ip);
int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
@@ -34,8 +33,7 @@ int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize,
xfs_off_t *offset, filldir_t filldir);
int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
- const char *target_path, mode_t mode, struct xfs_inode **ipp,
- cred_t *credp);
+ const char *target_path, mode_t mode, struct xfs_inode **ipp);
int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state);
int xfs_change_file_space(struct xfs_inode *ip, int cmd,
xfs_flock64_t *bf, xfs_off_t offset, int attr_flags);