summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2010-08-26 13:22:27 -0400
committerJ. Bruce Fields <bfields@redhat.com>2010-08-26 13:22:27 -0400
commitf632265d0ffb5acf331252d98c64939849d96bb2 (patch)
tree31187d9a726bf1ca6ca12e26ad8e7c609eaf4d8b /fs
parent7d94784293096c0a46897acdb83be5abd9278ece (diff)
parentda5cabf80e2433131bf0ed8993abc0f7ea618c73 (diff)
downloadlinux-3.10-f632265d0ffb5acf331252d98c64939849d96bb2.tar.gz
linux-3.10-f632265d0ffb5acf331252d98c64939849d96bb2.tar.bz2
linux-3.10-f632265d0ffb5acf331252d98c64939849d96bb2.zip
Merge commit 'v2.6.36-rc1' into HEAD
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/Makefile4
-rw-r--r--fs/9p/fid.c111
-rw-r--r--fs/9p/v9fs.c3
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/v9fs_vfs.h3
-rw-r--r--fs/9p/vfs_dir.c136
-rw-r--r--fs/9p/vfs_file.c26
-rw-r--r--fs/9p/vfs_inode.c785
-rw-r--r--fs/9p/vfs_super.c54
-rw-r--r--fs/9p/xattr.c160
-rw-r--r--fs/9p/xattr.h27
-rw-r--r--fs/9p/xattr_user.c80
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/adfs/inode.c16
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/file.c11
-rw-r--r--fs/affs/inode.c38
-rw-r--r--fs/affs/super.c32
-rw-r--r--fs/afs/Kconfig1
-rw-r--r--fs/afs/cell.c96
-rw-r--r--fs/afs/dir.c47
-rw-r--r--fs/afs/inode.c91
-rw-r--r--fs/afs/internal.h25
-rw-r--r--fs/afs/main.c9
-rw-r--r--fs/afs/mntpt.c78
-rw-r--r--fs/afs/proc.c2
-rw-r--r--fs/afs/rxrpc.c1
-rw-r--r--fs/afs/server.c5
-rw-r--r--fs/afs/super.c22
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c21
-rw-r--r--fs/attr.c88
-rw-r--r--fs/autofs/root.c67
-rw-r--r--fs/autofs4/root.c52
-rw-r--r--fs/bad_inode.c7
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/bfs.h1
-rw-r--r--fs/bfs/file.c17
-rw-r--r--fs/bfs/inode.c116
-rw-r--r--fs/binfmt_elf_fdpic.c26
-rw-r--r--fs/binfmt_flat.c27
-rw-r--r--fs/binfmt_misc.c5
-rw-r--r--fs/bio.c5
-rw-r--r--fs/block_dev.c113
-rw-r--r--fs/btrfs/acl.c8
-rw-r--r--fs/btrfs/ctree.c129
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c19
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/file.c12
-rw-r--r--fs/btrfs/inode.c44
-rw-r--r--fs/btrfs/ioctl.c24
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/root-tree.c3
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/volumes.c18
-rw-r--r--fs/buffer.c180
-rw-r--r--fs/cachefiles/bind.c2
-rw-r--r--fs/cachefiles/daemon.c38
-rw-r--r--fs/cachefiles/internal.h13
-rw-r--r--fs/cachefiles/namei.c13
-rw-r--r--fs/cachefiles/rdwr.c4
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/addr.c16
-rw-r--r--fs/ceph/armor.c6
-rw-r--r--fs/ceph/auth.c6
-rw-r--r--fs/ceph/auth_x.c11
-rw-r--r--fs/ceph/buffer.c16
-rw-r--r--fs/ceph/caps.c426
-rw-r--r--fs/ceph/ceph_frag.h4
-rw-r--r--fs/ceph/ceph_fs.c50
-rw-r--r--fs/ceph/ceph_fs.h87
-rw-r--r--fs/ceph/ceph_hash.h4
-rw-r--r--fs/ceph/ceph_strings.c3
-rw-r--r--fs/ceph/crush/crush.h4
-rw-r--r--fs/ceph/crush/hash.h4
-rw-r--r--fs/ceph/crush/mapper.c41
-rw-r--r--fs/ceph/crush/mapper.h4
-rw-r--r--fs/ceph/crypto.c27
-rw-r--r--fs/ceph/crypto.h4
-rw-r--r--fs/ceph/debugfs.c23
-rw-r--r--fs/ceph/decode.h6
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c34
-rw-r--r--fs/ceph/inode.c32
-rw-r--r--fs/ceph/ioctl.c24
-rw-r--r--fs/ceph/ioctl.h2
-rw-r--r--fs/ceph/locks.c256
-rw-r--r--fs/ceph/mds_client.c306
-rw-r--r--fs/ceph/mds_client.h33
-rw-r--r--fs/ceph/mdsmap.c6
-rw-r--r--fs/ceph/mdsmap.h8
-rw-r--r--fs/ceph/messenger.c96
-rw-r--r--fs/ceph/mon_client.c181
-rw-r--r--fs/ceph/mon_client.h5
-rw-r--r--fs/ceph/msgr.h4
-rw-r--r--fs/ceph/osd_client.c18
-rw-r--r--fs/ceph/osdmap.c63
-rw-r--r--fs/ceph/rados.h13
-rw-r--r--fs/ceph/super.c92
-rw-r--r--fs/ceph/super.h40
-rw-r--r--fs/ceph/xattr.c2
-rw-r--r--fs/char_dev.c1
-rw-r--r--fs/cifs/Kconfig27
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README15
-rw-r--r--fs/cifs/cache.c331
-rw-r--r--fs/cifs/cifs_debug.c25
-rw-r--r--fs/cifs/cifs_dfs_ref.c33
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_spnego.c7
-rw-r--r--fs/cifs/cifsfs.c50
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h48
-rw-r--r--fs/cifs/cifsproto.h5
-rw-r--r--fs/cifs/connect.c181
-rw-r--r--fs/cifs/dir.c84
-rw-r--r--fs/cifs/dns_resolve.c166
-rw-r--r--fs/cifs/dns_resolve.h2
-rw-r--r--fs/cifs/file.c203
-rw-r--r--fs/cifs/fscache.c236
-rw-r--r--fs/cifs/fscache.h136
-rw-r--r--fs/cifs/inode.c153
-rw-r--r--fs/cifs/ioctl.c3
-rw-r--r--fs/cifs/misc.c20
-rw-r--r--fs/cifs/netmisc.c63
-rw-r--r--fs/cifs/readdir.c5
-rw-r--r--fs/cifs/sess.c10
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/coda/inode.c8
-rw-r--r--fs/coda/psdev.c12
-rw-r--r--fs/coda/upcall.c12
-rw-r--r--fs/compat.c52
-rw-r--r--fs/compat_ioctl.c51
-rw-r--r--fs/configfs/inode.c9
-rw-r--r--fs/cramfs/inode.c88
-rw-r--r--fs/dcache.c227
-rw-r--r--fs/direct-io.c100
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/netlink.c15
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/file.c60
-rw-r--r--fs/ecryptfs/inode.c112
-rw-r--r--fs/ecryptfs/messaging.c19
-rw-r--r--fs/ecryptfs/super.c14
-rw-r--r--fs/exec.c12
-rw-r--r--fs/exofs/exofs.h3
-rw-r--r--fs/exofs/file.c30
-rw-r--r--fs/exofs/inode.c136
-rw-r--r--fs/exofs/ios.c46
-rw-r--r--fs/exofs/super.c3
-rw-r--r--fs/ext2/acl.c1
-rw-r--r--fs/ext2/balloc.c11
-rw-r--r--fs/ext2/dir.c23
-rw-r--r--fs/ext2/ext2.h5
-rw-r--r--fs/ext2/ialloc.c13
-rw-r--r--fs/ext2/inode.c89
-rw-r--r--fs/ext2/super.c14
-rw-r--r--fs/ext2/xattr.c25
-rw-r--r--fs/ext3/Kconfig1
-rw-r--r--fs/ext3/acl.c1
-rw-r--r--fs/ext3/ialloc.c12
-rw-r--r--fs/ext3/inode.c143
-rw-r--r--fs/ext3/namei.c3
-rw-r--r--fs/ext3/resize.c2
-rw-r--r--fs/ext3/super.c31
-rw-r--r--fs/ext3/xattr.c12
-rw-r--r--fs/ext4/acl.c1
-rw-r--r--fs/ext4/balloc.c6
-rw-r--r--fs/ext4/block_validity.c8
-rw-r--r--fs/ext4/dir.c23
-rw-r--r--fs/ext4/ext4.h155
-rw-r--r--fs/ext4/ext4_jbd2.c71
-rw-r--r--fs/ext4/ext4_jbd2.h56
-rw-r--r--fs/ext4/extents.c20
-rw-r--r--fs/ext4/file.c5
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c298
-rw-r--r--fs/ext4/mballoc.c155
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c13
-rw-r--r--fs/ext4/namei.c40
-rw-r--r--fs/ext4/resize.c8
-rw-r--r--fs/ext4/super.c346
-rw-r--r--fs/ext4/xattr.c15
-rw-r--r--fs/fat/fat.h1
-rw-r--r--fs/fat/file.c49
-rw-r--r--fs/fat/inode.c26
-rw-r--r--fs/fcntl.c28
-rw-r--r--fs/file.c60
-rw-r--r--fs/file_table.c19
-rw-r--r--fs/freevxfs/vxfs_extern.h2
-rw-r--r--fs/freevxfs/vxfs_inode.c8
-rw-r--r--fs/freevxfs/vxfs_super.c4
-rw-r--r--fs/fs-writeback.c744
-rw-r--r--fs/fs_struct.c7
-rw-r--r--fs/fscache/Kconfig1
-rw-r--r--fs/fscache/internal.h22
-rw-r--r--fs/fscache/main.c106
-rw-r--r--fs/fscache/object-list.c11
-rw-r--r--fs/fscache/object.c106
-rw-r--r--fs/fscache/operation.c67
-rw-r--r--fs/fscache/page.c72
-rw-r--r--fs/fuse/dev.c229
-rw-r--r--fs/fuse/dir.c19
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c6
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/aops.c19
-rw-r--r--fs/gfs2/bmap.c18
-rw-r--r--fs/gfs2/bmap.h2
-rw-r--r--fs/gfs2/dir.c44
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c97
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c39
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/main.c14
-rw-r--r--fs/gfs2/meta_io.c8
-rw-r--r--fs/gfs2/ops_fstype.c37
-rw-r--r--fs/gfs2/ops_inode.c18
-rw-r--r--fs/gfs2/quota.c35
-rw-r--r--fs/gfs2/quota.h2
-rw-r--r--fs/gfs2/recovery.c54
-rw-r--r--fs/gfs2/recovery.h6
-rw-r--r--fs/gfs2/super.c52
-rw-r--r--fs/gfs2/sys.c60
-rw-r--r--fs/gfs2/xattr.c24
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c70
-rw-r--r--fs/hfs/super.c2
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/inode.c77
-rw-r--r--fs/hfsplus/super.c10
-rw-r--r--fs/hostfs/hostfs.h22
-rw-r--r--fs/hostfs/hostfs_kern.c517
-rw-r--r--fs/hostfs/hostfs_user.c112
-rw-r--r--fs/hpfs/file.c11
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hpfs/inode.c24
-rw-r--r--fs/hpfs/super.c2
-rw-r--r--fs/hppfs/hppfs.c8
-rw-r--r--fs/hugetlbfs/inode.c41
-rw-r--r--fs/inode.c187
-rw-r--r--fs/ioctl.c18
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/jbd/journal.c7
-rw-r--r--fs/jbd/recovery.c11
-rw-r--r--fs/jbd2/checkpoint.c18
-rw-r--r--fs/jbd2/commit.c50
-rw-r--r--fs/jbd2/journal.c136
-rw-r--r--fs/jbd2/recovery.c10
-rw-r--r--fs/jbd2/transaction.c260
-rw-r--r--fs/jffs2/acl.c3
-rw-r--r--fs/jffs2/background.c1
-rw-r--r--fs/jffs2/build.c1
-rw-r--r--fs/jffs2/compr.c5
-rw-r--r--fs/jffs2/compr.h1
-rw-r--r--fs/jffs2/compr_lzo.c1
-rw-r--r--fs/jffs2/compr_rtime.c1
-rw-r--r--fs/jffs2/compr_rubin.c1
-rw-r--r--fs/jffs2/compr_zlib.c1
-rw-r--r--fs/jffs2/debug.c1
-rw-r--r--fs/jffs2/debug.h1
-rw-r--r--fs/jffs2/dir.c124
-rw-r--r--fs/jffs2/erase.c1
-rw-r--r--fs/jffs2/file.c1
-rw-r--r--fs/jffs2/fs.c18
-rw-r--r--fs/jffs2/gc.c1
-rw-r--r--fs/jffs2/ioctl.c1
-rw-r--r--fs/jffs2/jffs2_fs_i.h1
-rw-r--r--fs/jffs2/jffs2_fs_sb.h1
-rw-r--r--fs/jffs2/nodelist.h1
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/super.c2
-rw-r--r--fs/jffs2/xattr.c4
-rw-r--r--fs/jfs/file.c14
-rw-r--r--fs/jfs/inode.c63
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/super.c8
-rw-r--r--fs/jfs/xattr.c87
-rw-r--r--fs/libfs.c73
-rw-r--r--fs/logfs/dir.c7
-rw-r--r--fs/logfs/file.c24
-rw-r--r--fs/logfs/inode.c51
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/logfs.h7
-rw-r--r--fs/logfs/readwrite.c62
-rw-r--r--fs/logfs/segment.c1
-rw-r--r--fs/logfs/super.c23
-rw-r--r--fs/mbcache.c173
-rw-r--r--fs/minix/bitmap.c6
-rw-r--r--fs/minix/dir.c25
-rw-r--r--fs/minix/file.c22
-rw-r--r--fs/minix/inode.c35
-rw-r--r--fs/minix/minix.h4
-rw-r--r--fs/namei.c23
-rw-r--r--fs/namespace.c13
-rw-r--r--fs/ncpfs/inode.c40
-rw-r--r--fs/ncpfs/ioctl.c1
-rw-r--r--fs/nfs/Kconfig27
-rw-r--r--fs/nfs/callback.c11
-rw-r--r--fs/nfs/callback_proc.c19
-rw-r--r--fs/nfs/client.c143
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/delegation.h4
-rw-r--r--fs/nfs/dir.c13
-rw-r--r--fs/nfs/direct.c29
-rw-r--r--fs/nfs/dns_resolve.c24
-rw-r--r--fs/nfs/dns_resolve.h12
-rw-r--r--fs/nfs/file.c64
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/inode.c87
-rw-r--r--fs/nfs/internal.h14
-rw-r--r--fs/nfs/nfs2xdr.c7
-rw-r--r--fs/nfs/nfs3xdr.c8
-rw-r--r--fs/nfs/nfs4_fs.h57
-rw-r--r--fs/nfs/nfs4proc.c474
-rw-r--r--fs/nfs/nfs4renewd.c4
-rw-r--r--fs/nfs/nfs4state.c82
-rw-r--r--fs/nfs/nfs4xdr.c111
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c30
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c39
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfs4xdr.c6
-rw-r--r--fs/nfsd/vfs.c17
-rw-r--r--fs/nilfs2/bmap.c6
-rw-r--r--fs/nilfs2/bmap.h16
-rw-r--r--fs/nilfs2/bmap_union.h42
-rw-r--r--fs/nilfs2/btnode.c23
-rw-r--r--fs/nilfs2/btnode.h4
-rw-r--r--fs/nilfs2/btree.c914
-rw-r--r--fs/nilfs2/btree.h14
-rw-r--r--fs/nilfs2/dir.c58
-rw-r--r--fs/nilfs2/direct.c96
-rw-r--r--fs/nilfs2/direct.h11
-rw-r--r--fs/nilfs2/gcdat.c2
-rw-r--r--fs/nilfs2/gcinode.c17
-rw-r--r--fs/nilfs2/inode.c78
-rw-r--r--fs/nilfs2/mdt.c1
-rw-r--r--fs/nilfs2/nilfs.h24
-rw-r--r--fs/nilfs2/page.c5
-rw-r--r--fs/nilfs2/page.h2
-rw-r--r--fs/nilfs2/recovery.c359
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/nilfs2/segbuf.h26
-rw-r--r--fs/nilfs2/segment.c19
-rw-r--r--fs/nilfs2/segment.h12
-rw-r--r--fs/nilfs2/super.c361
-rw-r--r--fs/nilfs2/the_nilfs.c161
-rw-r--r--fs/nilfs2/the_nilfs.h23
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/Makefile4
-rw-r--r--fs/notify/dnotify/dnotify.c213
-rw-r--r--fs/notify/fanotify/Kconfig26
-rw-r--r--fs/notify/fanotify/Makefile1
-rw-r--r--fs/notify/fanotify/fanotify.c212
-rw-r--r--fs/notify/fanotify/fanotify_user.c760
-rw-r--r--fs/notify/fsnotify.c201
-rw-r--r--fs/notify/fsnotify.h27
-rw-r--r--fs/notify/group.c182
-rw-r--r--fs/notify/inode_mark.c337
-rw-r--r--fs/notify/inotify/Kconfig15
-rw-r--r--fs/notify/inotify/Makefile1
-rw-r--r--fs/notify/inotify/inotify.c873
-rw-r--r--fs/notify/inotify/inotify.h7
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c151
-rw-r--r--fs/notify/inotify/inotify_user.c369
-rw-r--r--fs/notify/mark.c371
-rw-r--r--fs/notify/notification.c209
-rw-r--r--fs/notify/vfsmount_mark.c187
-rw-r--r--fs/ntfs/inode.c10
-rw-r--r--fs/ntfs/inode.h2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/ocfs2/acl.c33
-rw-r--r--fs/ocfs2/aops.c110
-rw-r--r--fs/ocfs2/cluster/tcp.c17
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c6
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c31
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c24
-rw-r--r--fs/ocfs2/dlm/dlmthread.c114
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c18
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/file.c331
-rw-r--r--fs/ocfs2/file.h6
-rw-r--r--fs/ocfs2/inode.c29
-rw-r--r--fs/ocfs2/inode.h5
-rw-r--r--fs/ocfs2/journal.c34
-rw-r--r--fs/ocfs2/localalloc.c7
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/quota_local.c4
-rw-r--r--fs/ocfs2/refcounttree.c32
-rw-r--r--fs/ocfs2/reservations.c1
-rw-r--r--fs/ocfs2/suballoc.c2
-rw-r--r--fs/ocfs2/super.c5
-rw-r--r--fs/ocfs2/xattr.c200
-rw-r--r--fs/omfs/dir.c22
-rw-r--r--fs/omfs/file.c46
-rw-r--r--fs/omfs/inode.c53
-rw-r--r--fs/omfs/omfs.h1
-rw-r--r--fs/omfs/omfs_fs.h1
-rw-r--r--fs/open.c18
-rw-r--r--fs/partitions/acorn.c35
-rw-r--r--fs/partitions/amiga.c20
-rw-r--r--fs/partitions/atari.c12
-rw-r--r--fs/partitions/check.c23
-rw-r--r--fs/partitions/check.h6
-rw-r--r--fs/partitions/efi.c2
-rw-r--r--fs/partitions/ibm.c31
-rw-r--r--fs/partitions/karma.c2
-rw-r--r--fs/partitions/ldm.c4
-rw-r--r--fs/partitions/mac.c4
-rw-r--r--fs/partitions/msdos.c67
-rw-r--r--fs/partitions/osf.c2
-rw-r--r--fs/partitions/sgi.c2
-rw-r--r--fs/partitions/sun.c2
-rw-r--r--fs/partitions/sysv68.c9
-rw-r--r--fs/partitions/ultrix.c2
-rw-r--r--fs/pipe.c93
-rw-r--r--fs/proc/Makefile2
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c144
-rw-r--r--fs/proc/generic.c18
-rw-r--r--fs/proc/inode.c23
-rw-r--r--fs/proc/proc_devtree.c3
-rw-r--r--fs/proc/proc_sysctl.c15
-rw-r--r--fs/proc/task_mmu.c8
-rw-r--r--fs/proc/task_nommu.c20
-rw-r--r--fs/qnx4/inode.c11
-rw-r--r--fs/quota/dquot.c83
-rw-r--r--fs/quota/quota_tree.c85
-rw-r--r--fs/quota/quota_tree.h6
-rw-r--r--fs/quota/quota_v1.c3
-rw-r--r--fs/quota/quota_v2.c11
-rw-r--r--fs/ramfs/file-nommu.c7
-rw-r--r--fs/read_write.c8
-rw-r--r--fs/readdir.c8
-rw-r--r--fs/reiserfs/file.c50
-rw-r--r--fs/reiserfs/inode.c136
-rw-r--r--fs/reiserfs/journal.c1
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/signalfd.c2
-rw-r--r--fs/smbfs/inode.c12
-rw-r--r--fs/splice.c25
-rw-r--r--fs/squashfs/Kconfig25
-rw-r--r--fs/squashfs/Makefile4
-rw-r--r--fs/squashfs/decompressor.c6
-rw-r--r--fs/squashfs/lzo_wrapper.c136
-rw-r--r--fs/squashfs/squashfs.h3
-rw-r--r--fs/squashfs/squashfs_fs.h20
-rw-r--r--fs/squashfs/xattr.c4
-rw-r--r--fs/squashfs/xattr.h2
-rw-r--r--fs/stat.c29
-rw-r--r--fs/statfs.c95
-rw-r--r--fs/super.c45
-rw-r--r--fs/sync.c27
-rw-r--r--fs/sysfs/file.c3
-rw-r--r--fs/sysfs/inode.c14
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/symlink.c26
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/sysv/dir.c21
-rw-r--r--fs/sysv/file.c22
-rw-r--r--fs/sysv/ialloc.c7
-rw-r--r--fs/sysv/inode.c19
-rw-r--r--fs/sysv/itree.c19
-rw-r--r--fs/sysv/super.c74
-rw-r--r--fs/sysv/sysv.h4
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/file.c23
-rw-r--r--fs/ubifs/lpt.c14
-rw-r--r--fs/ubifs/lpt_commit.c2
-rw-r--r--fs/ubifs/recovery.c23
-rw-r--r--fs/ubifs/shrinker.c2
-rw-r--r--fs/ubifs/super.c16
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/file.c23
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c61
-rw-r--r--fs/udf/super.c5
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/ufs/dir.c13
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c63
-rw-r--r--fs/ufs/super.c2
-rw-r--r--fs/ufs/truncate.c16
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/ufs/util.h4
-rw-r--r--fs/utimes.c7
-rw-r--r--fs/xfs/Makefile4
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c638
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c67
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h119
-rw-r--r--fs/xfs/linux-2.6/xfs_dmapi_priv.h28
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c104
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.h25
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c34
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c46
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_quotaops.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c179
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c188
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h473
-rw-r--r--fs/xfs/quota/xfs_dquot.c114
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c301
-rw-r--r--fs/xfs/quota/xfs_qm.c44
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c10
-rw-r--r--fs/xfs/quota/xfs_qm_stats.c10
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c144
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c35
-rw-r--r--fs/xfs/support/debug.c1
-rw-r--r--fs/xfs/xfs_ag.h1
-rw-r--r--fs/xfs/xfs_alloc.c15
-rw-r--r--fs/xfs/xfs_alloc.h20
-rw-r--r--fs/xfs/xfs_alloc_btree.c5
-rw-r--r--fs/xfs/xfs_attr.c91
-rw-r--r--fs/xfs/xfs_attr_leaf.c5
-rw-r--r--fs/xfs/xfs_bmap.c327
-rw-r--r--fs/xfs/xfs_bmap.h37
-rw-r--r--fs/xfs/xfs_bmap_btree.c5
-rw-r--r--fs/xfs/xfs_btree.c5
-rw-r--r--fs/xfs/xfs_buf_item.c228
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_da_btree.c20
-rw-r--r--fs/xfs/xfs_dfrag.c21
-rw-r--r--fs/xfs/xfs_dir2.c11
-rw-r--r--fs/xfs/xfs_dir2_block.c8
-rw-r--r--fs/xfs/xfs_dir2_data.c2
-rw-r--r--fs/xfs/xfs_dir2_leaf.c4
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/xfs_dmapi.h170
-rw-r--r--fs/xfs/xfs_dmops.c55
-rw-r--r--fs/xfs/xfs_error.c4
-rw-r--r--fs/xfs/xfs_extfree_item.c278
-rw-r--r--fs/xfs/xfs_filestream.c84
-rw-r--r--fs/xfs/xfs_filestream.h82
-rw-r--r--fs/xfs/xfs_fsops.c7
-rw-r--r--fs/xfs/xfs_ialloc.c146
-rw-r--r--fs/xfs/xfs_ialloc_btree.c4
-rw-r--r--fs/xfs/xfs_iget.c147
-rw-r--r--fs/xfs/xfs_inode.c207
-rw-r--r--fs/xfs/xfs_inode.h10
-rw-r--r--fs/xfs/xfs_inode_item.c273
-rw-r--r--fs/xfs/xfs_inode_item.h12
-rw-r--r--fs/xfs/xfs_iomap.c76
-rw-r--r--fs/xfs/xfs_iomap.h22
-rw-r--r--fs/xfs/xfs_itable.c293
-rw-r--r--fs/xfs/xfs_itable.h17
-rw-r--r--fs/xfs/xfs_log.c16
-rw-r--r--fs/xfs/xfs_log.h11
-rw-r--r--fs/xfs/xfs_log_cil.c4
-rw-r--r--fs/xfs/xfs_log_recover.c55
-rw-r--r--fs/xfs/xfs_mount.c73
-rw-r--r--fs/xfs/xfs_mount.h71
-rw-r--r--fs/xfs/xfs_rename.c63
-rw-r--r--fs/xfs/xfs_rtalloc.c17
-rw-r--r--fs/xfs/xfs_rtalloc.h11
-rw-r--r--fs/xfs/xfs_rw.c15
-rw-r--r--fs/xfs/xfs_trans.c657
-rw-r--r--fs/xfs/xfs_trans.h528
-rw-r--r--fs/xfs/xfs_trans_ail.c1
-rw-r--r--fs/xfs/xfs_trans_buf.c75
-rw-r--r--fs/xfs/xfs_trans_extfree.c23
-rw-r--r--fs/xfs/xfs_trans_inode.c76
-rw-r--r--fs/xfs/xfs_trans_item.c441
-rw-r--r--fs/xfs/xfs_trans_priv.h18
-rw-r--r--fs/xfs/xfs_utils.c87
-rw-r--r--fs/xfs/xfs_utils.h1
-rw-r--r--fs/xfs/xfs_vnodeops.c333
587 files changed, 18087 insertions, 14699 deletions
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index 1a940ec7af6..91fba025fcb 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_9P_FS) := 9p.o
vfs_dir.o \
vfs_dentry.o \
v9fs.o \
- fid.o
+ fid.o \
+ xattr.o \
+ xattr_user.o
9p-$(CONFIG_9P_FSCACHE) += cache.o
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 7317b39b281..35856368906 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -97,6 +97,34 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any)
return ret;
}
+/*
+ * We need to hold v9ses->rename_sem as long as we hold references
+ * to returned path array. Array element contain pointers to
+ * dentry names.
+ */
+static int build_path_from_dentry(struct v9fs_session_info *v9ses,
+ struct dentry *dentry, char ***names)
+{
+ int n = 0, i;
+ char **wnames;
+ struct dentry *ds;
+
+ for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent)
+ n++;
+
+ wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL);
+ if (!wnames)
+ goto err_out;
+
+ for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent)
+ wnames[i] = (char *)ds->d_name.name;
+
+ *names = wnames;
+ return n;
+err_out:
+ return -ENOMEM;
+}
+
/**
* v9fs_fid_lookup - lookup for a fid, try to walk if not found
* @dentry: dentry to look for fid in
@@ -112,7 +140,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
int i, n, l, clone, any, access;
u32 uid;
struct p9_fid *fid, *old_fid = NULL;
- struct dentry *d, *ds;
+ struct dentry *ds;
struct v9fs_session_info *v9ses;
char **wnames, *uname;
@@ -139,49 +167,62 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
fid = v9fs_fid_find(dentry, uid, any);
if (fid)
return fid;
-
+ /*
+ * we don't have a matching fid. To do a TWALK we need
+ * parent fid. We need to prevent rename when we want to
+ * look at the parent.
+ */
+ down_read(&v9ses->rename_sem);
ds = dentry->d_parent;
fid = v9fs_fid_find(ds, uid, any);
- if (!fid) { /* walk from the root */
- n = 0;
- for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent)
- n++;
+ if (fid) {
+ /* Found the parent fid do a lookup with that */
+ fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1);
+ goto fid_out;
+ }
+ up_read(&v9ses->rename_sem);
- fid = v9fs_fid_find(ds, uid, any);
- if (!fid) { /* the user is not attached to the fs yet */
- if (access == V9FS_ACCESS_SINGLE)
- return ERR_PTR(-EPERM);
+ /* start from the root and try to do a lookup */
+ fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any);
+ if (!fid) {
+ /* the user is not attached to the fs yet */
+ if (access == V9FS_ACCESS_SINGLE)
+ return ERR_PTR(-EPERM);
- if (v9fs_proto_dotu(v9ses))
+ if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses))
uname = NULL;
- else
- uname = v9ses->uname;
+ else
+ uname = v9ses->uname;
- fid = p9_client_attach(v9ses->clnt, NULL, uname, uid,
- v9ses->aname);
-
- if (IS_ERR(fid))
- return fid;
-
- v9fs_fid_add(ds, fid);
- }
- } else /* walk from the parent */
- n = 1;
+ fid = p9_client_attach(v9ses->clnt, NULL, uname, uid,
+ v9ses->aname);
+ if (IS_ERR(fid))
+ return fid;
- if (ds == dentry)
+ v9fs_fid_add(dentry->d_sb->s_root, fid);
+ }
+ /* If we are root ourself just return that */
+ if (dentry->d_sb->s_root == dentry)
return fid;
-
- wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL);
- if (!wnames)
- return ERR_PTR(-ENOMEM);
-
- for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent)
- wnames[i] = (char *) d->d_name.name;
-
+ /*
+ * Do a multipath walk with attached root.
+ * When walking parent we need to make sure we
+ * don't have a parallel rename happening
+ */
+ down_read(&v9ses->rename_sem);
+ n = build_path_from_dentry(v9ses, dentry, &wnames);
+ if (n < 0) {
+ fid = ERR_PTR(n);
+ goto err_out;
+ }
clone = 1;
i = 0;
while (i < n) {
l = min(n - i, P9_MAXWELEM);
+ /*
+ * We need to hold rename lock when doing a multipath
+ * walk to ensure none of the patch component change
+ */
fid = p9_client_walk(fid, l, &wnames[i], clone);
if (IS_ERR(fid)) {
if (old_fid) {
@@ -193,15 +234,17 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
p9_client_clunk(old_fid);
}
kfree(wnames);
- return fid;
+ goto err_out;
}
old_fid = fid;
i += l;
clone = 0;
}
-
kfree(wnames);
+fid_out:
v9fs_fid_add(dentry, fid);
+err_out:
+ up_read(&v9ses->rename_sem);
return fid;
}
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index f8b86e92cd6..38dc0e06759 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -237,6 +237,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
__putname(v9ses->uname);
return ERR_PTR(-ENOMEM);
}
+ init_rwsem(&v9ses->rename_sem);
rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY);
if (rc) {
@@ -278,7 +279,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ;
/* for legacy mode, fall back to V9FS_ACCESS_ANY */
- if (!v9fs_proto_dotu(v9ses) &&
+ if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
v9ses->flags &= ~V9FS_ACCESS_MASK;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index bec4d0bcb45..4c963c9fc41 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -104,6 +104,7 @@ struct v9fs_session_info {
struct p9_client *clnt; /* 9p client */
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
+ struct rw_semaphore rename_sem;
};
struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 32ef4009d03..88418c419ea 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -52,9 +52,10 @@ void v9fs_destroy_inode(struct inode *inode);
#endif
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
-void v9fs_clear_inode(struct inode *inode);
+void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
+void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index d61e3b28ce3..16c8a2a98c1 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -87,29 +87,19 @@ static void p9stat_init(struct p9_wstat *stbuf)
}
/**
- * v9fs_dir_readdir - read a directory
+ * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
* @filp: opened file structure
- * @dirent: directory structure ???
- * @filldir: function to populate directory structure ???
+ * @buflen: Length in bytes of buffer to allocate
*
*/
-static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
+static int v9fs_alloc_rdir_buf(struct file *filp, int buflen)
{
- int over;
- struct p9_wstat st;
- int err = 0;
- struct p9_fid *fid;
- int buflen;
- int reclen = 0;
struct p9_rdir *rdir;
+ struct p9_fid *fid;
+ int err = 0;
- P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
fid = filp->private_data;
-
- buflen = fid->clnt->msize - P9_IOHDRSZ;
-
- /* allocate rdir on demand */
if (!fid->rdir) {
rdir = kmalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL);
@@ -128,6 +118,36 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
spin_unlock(&filp->f_dentry->d_lock);
kfree(rdir);
}
+exit:
+ return err;
+}
+
+/**
+ * v9fs_dir_readdir - read a directory
+ * @filp: opened file structure
+ * @dirent: directory structure ???
+ * @filldir: function to populate directory structure ???
+ *
+ */
+
+static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ int over;
+ struct p9_wstat st;
+ int err = 0;
+ struct p9_fid *fid;
+ int buflen;
+ int reclen = 0;
+ struct p9_rdir *rdir;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ fid = filp->private_data;
+
+ buflen = fid->clnt->msize - P9_IOHDRSZ;
+
+ err = v9fs_alloc_rdir_buf(filp, buflen);
+ if (err)
+ goto exit;
rdir = (struct p9_rdir *) fid->rdir;
err = mutex_lock_interruptible(&rdir->mutex);
@@ -146,7 +166,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
while (rdir->head < rdir->tail) {
p9stat_init(&st);
err = p9stat_read(rdir->buf + rdir->head,
- buflen - rdir->head, &st,
+ rdir->tail - rdir->head, &st,
fid->clnt->proto_version);
if (err) {
P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
@@ -176,6 +196,88 @@ exit:
return err;
}
+/**
+ * v9fs_dir_readdir_dotl - read a directory
+ * @filp: opened file structure
+ * @dirent: buffer to fill dirent structures
+ * @filldir: function to populate dirent structures
+ *
+ */
+static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent,
+ filldir_t filldir)
+{
+ int over;
+ int err = 0;
+ struct p9_fid *fid;
+ int buflen;
+ struct p9_rdir *rdir;
+ struct p9_dirent curdirent;
+ u64 oldoffset = 0;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name);
+ fid = filp->private_data;
+
+ buflen = fid->clnt->msize - P9_READDIRHDRSZ;
+
+ err = v9fs_alloc_rdir_buf(filp, buflen);
+ if (err)
+ goto exit;
+ rdir = (struct p9_rdir *) fid->rdir;
+
+ err = mutex_lock_interruptible(&rdir->mutex);
+ if (err)
+ return err;
+
+ while (err == 0) {
+ if (rdir->tail == rdir->head) {
+ err = p9_client_readdir(fid, rdir->buf, buflen,
+ filp->f_pos);
+ if (err <= 0)
+ goto unlock_and_exit;
+
+ rdir->head = 0;
+ rdir->tail = err;
+ }
+
+ while (rdir->head < rdir->tail) {
+
+ err = p9dirent_read(rdir->buf + rdir->head,
+ buflen - rdir->head, &curdirent,
+ fid->clnt->proto_version);
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "returned %d\n", err);
+ err = -EIO;
+ goto unlock_and_exit;
+ }
+
+ /* d_off in dirent structure tracks the offset into
+ * the next dirent in the dir. However, filldir()
+ * expects offset into the current dirent. Hence
+ * while calling filldir send the offset from the
+ * previous dirent structure.
+ */
+ over = filldir(dirent, curdirent.d_name,
+ strlen(curdirent.d_name),
+ oldoffset, v9fs_qid2ino(&curdirent.qid),
+ curdirent.d_type);
+ oldoffset = curdirent.d_off;
+
+ if (over) {
+ err = 0;
+ goto unlock_and_exit;
+ }
+
+ filp->f_pos = curdirent.d_off;
+ rdir->head += err;
+ }
+ }
+
+unlock_and_exit:
+ mutex_unlock(&rdir->mutex);
+exit:
+ return err;
+}
+
/**
* v9fs_dir_release - close a directory
@@ -207,7 +309,7 @@ const struct file_operations v9fs_dir_operations = {
const struct file_operations v9fs_dir_operations_dotl = {
.read = generic_read_dir,
.llseek = generic_file_llseek,
- .readdir = v9fs_dir_readdir,
+ .readdir = v9fs_dir_readdir_dotl,
.open = v9fs_file_open,
.release = v9fs_dir_release,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 2bedc6c94fc..e97c92bd6f1 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -59,9 +59,13 @@ int v9fs_file_open(struct inode *inode, struct file *file)
struct p9_fid *fid;
int omode;
- P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p \n", inode, file);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9ses = v9fs_inode2v9ses(inode);
- omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses));
+ if (v9fs_proto_dotl(v9ses))
+ omode = file->f_flags;
+ else
+ omode = v9fs_uflags2omode(file->f_flags,
+ v9fs_proto_dotu(v9ses));
fid = file->private_data;
if (!fid) {
fid = v9fs_fid_clone(file->f_path.dentry);
@@ -73,11 +77,12 @@ int v9fs_file_open(struct inode *inode, struct file *file)
p9_client_clunk(fid);
return err;
}
- if (omode & P9_OTRUNC) {
+ if (file->f_flags & O_TRUNC) {
i_size_write(inode, 0);
inode->i_blocks = 0;
}
- if ((file->f_flags & O_APPEND) && (!v9fs_proto_dotu(v9ses)))
+ if ((file->f_flags & O_APPEND) &&
+ (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
generic_file_llseek(file, 0, SEEK_END);
}
@@ -139,7 +144,7 @@ ssize_t
v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
u64 offset)
{
- int n, total;
+ int n, total, size;
struct p9_fid *fid = filp->private_data;
P9_DPRINTK(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid,
@@ -147,6 +152,7 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
n = 0;
total = 0;
+ size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
do {
n = p9_client_read(fid, data, udata, offset, count);
if (n <= 0)
@@ -160,7 +166,7 @@ v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count,
offset += n;
count -= n;
total += n;
- } while (count > 0 && n == (fid->clnt->msize - P9_IOHDRSZ));
+ } while (count > 0 && n == size);
if (n < 0)
total = n;
@@ -183,11 +189,13 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
{
int ret;
struct p9_fid *fid;
+ size_t size;
P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
fid = filp->private_data;
- if (count > (fid->clnt->msize - P9_IOHDRSZ))
+ size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ;
+ if (count > size)
ret = v9fs_file_readn(filp, NULL, udata, count, *offset);
else
ret = p9_client_read(fid, NULL, udata, *offset, count);
@@ -224,9 +232,7 @@ v9fs_file_write(struct file *filp, const char __user * data,
fid = filp->private_data;
clnt = fid->clnt;
- rsize = fid->iounit;
- if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
- rsize = clnt->msize - P9_IOHDRSZ;
+ rsize = fid->iounit ? fid->iounit : clnt->msize - P9_IOHDRSZ;
do {
if (count < rsize)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 4331b3b5ee1..c7c23eab944 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -35,6 +35,7 @@
#include <linux/idr.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -42,6 +43,7 @@
#include "v9fs_vfs.h"
#include "fid.h"
#include "cache.h"
+#include "xattr.h"
static const struct inode_operations v9fs_dir_inode_operations;
static const struct inode_operations v9fs_dir_inode_operations_dotu;
@@ -236,6 +238,41 @@ void v9fs_destroy_inode(struct inode *inode)
#endif
/**
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * new file system object. This checks the S_ISGID to determine the owning
+ * group of the new file system object.
+ */
+
+static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+{
+ BUG_ON(dir_inode == NULL);
+
+ if (dir_inode->i_mode & S_ISGID) {
+ /* set_gid bit is set.*/
+ return dir_inode->i_gid;
+ }
+ return current_fsgid();
+}
+
+/**
+ * v9fs_dentry_from_dir_inode - helper function to get the dentry from
+ * dir inode.
+ *
+ */
+
+static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
+{
+ struct dentry *dentry;
+
+ spin_lock(&dcache_lock);
+ /* Directory should have only one entry. */
+ BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
+ dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+ spin_unlock(&dcache_lock);
+ return dentry;
+}
+
+/**
* v9fs_get_inode - helper function to setup an inode
* @sb: superblock
* @mode: mode to setup inode with
@@ -267,7 +304,13 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
case S_IFBLK:
case S_IFCHR:
case S_IFSOCK:
- if (!v9fs_proto_dotu(v9ses)) {
+ if (v9fs_proto_dotl(v9ses)) {
+ inode->i_op = &v9fs_file_inode_operations_dotl;
+ inode->i_fop = &v9fs_file_operations_dotl;
+ } else if (v9fs_proto_dotu(v9ses)) {
+ inode->i_op = &v9fs_file_inode_operations;
+ inode->i_fop = &v9fs_file_operations;
+ } else {
P9_DPRINTK(P9_DEBUG_ERROR,
"special files without extended mode\n");
err = -EINVAL;
@@ -387,8 +430,10 @@ error:
* @inode: inode to release
*
*/
-void v9fs_clear_inode(struct inode *inode)
+void v9fs_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(inode->i_mapping, 0);
+ end_writeback(inode);
filemap_fdatawrite(inode->i_mapping);
#ifdef CONFIG_9P_FSCACHE
@@ -396,23 +441,14 @@ void v9fs_clear_inode(struct inode *inode)
#endif
}
-/**
- * v9fs_inode_from_fid - populate an inode by issuing a attribute request
- * @v9ses: session information
- * @fid: fid to issue attribute request for
- * @sb: superblock on which to create inode
- *
- */
-
static struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
struct super_block *sb)
{
int err, umode;
- struct inode *ret;
+ struct inode *ret = NULL;
struct p9_wstat *st;
- ret = NULL;
st = p9_client_stat(fid);
if (IS_ERR(st))
return ERR_CAST(st);
@@ -433,15 +469,62 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
#endif
p9stat_free(st);
kfree(st);
-
return ret;
-
error:
p9stat_free(st);
kfree(st);
return ERR_PTR(err);
}
+static struct inode *
+v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
+{
+ struct inode *ret = NULL;
+ int err;
+ struct p9_stat_dotl *st;
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+ if (IS_ERR(st))
+ return ERR_CAST(st);
+
+ ret = v9fs_get_inode(sb, st->st_mode);
+ if (IS_ERR(ret)) {
+ err = PTR_ERR(ret);
+ goto error;
+ }
+
+ v9fs_stat2inode_dotl(st, ret);
+ ret->i_ino = v9fs_qid2ino(&st->qid);
+#ifdef CONFIG_9P_FSCACHE
+ v9fs_vcookie_set_qid(ret, &st->qid);
+ v9fs_cache_inode_get_cookie(ret);
+#endif
+ kfree(st);
+ return ret;
+error:
+ kfree(st);
+ return ERR_PTR(err);
+}
+
+/**
+ * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * issuing a attribute request
+ * @v9ses: session information
+ * @fid: fid to issue attribute request for
+ * @sb: superblock on which to create inode
+ *
+ */
+static inline struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+ struct super_block *sb)
+{
+ if (v9fs_proto_dotl(v9ses))
+ return v9fs_inode_dotl(v9ses, fid, sb);
+ else
+ return v9fs_inode(v9ses, fid, sb);
+}
+
/**
* v9fs_remove - helper function to remove files and directories
* @dir: directory inode that is being deleted
@@ -563,6 +646,118 @@ error:
}
/**
+ * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @dir: directory inode that is being created
+ * @dentry: dentry that is being deleted
+ * @mode: create permissions
+ * @nd: path information
+ *
+ */
+
+static int
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ int err = 0;
+ char *name = NULL;
+ gid_t gid;
+ int flags;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL;
+ struct p9_fid *dfid, *ofid;
+ struct file *filp;
+ struct p9_qid qid;
+ struct inode *inode;
+
+ v9ses = v9fs_inode2v9ses(dir);
+ if (nd && nd->flags & LOOKUP_OPEN)
+ flags = nd->intent.open.flags - 1;
+ else
+ flags = O_RDWR;
+
+ name = (char *) dentry->d_name.name;
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
+ "mode:0x%x\n", name, flags, mode);
+
+ dfid = v9fs_fid_lookup(dentry->d_parent);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ return err;
+ }
+
+ /* clone a fid to use for creation */
+ ofid = p9_client_walk(dfid, 0, NULL, 1);
+ if (IS_ERR(ofid)) {
+ err = PTR_ERR(ofid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ return err;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+ err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "p9_client_open_dotl failed in creat %d\n",
+ err);
+ goto error;
+ }
+
+ /* No need to populate the inode if we are not opening the file AND
+ * not in cached mode.
+ */
+ if (!v9ses->cache && !(nd && nd->flags & LOOKUP_OPEN)) {
+ /* Not in cached mode. No need to populate inode with stat */
+ dentry->d_op = &v9fs_dentry_operations;
+ p9_client_clunk(ofid);
+ d_instantiate(dentry, NULL);
+ return 0;
+ }
+
+ /* Now walk from the parent so we can get an unopened fid. */
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+ fid = NULL;
+ goto error;
+ }
+
+ /* instantiate inode and assign the unopened fid to dentry */
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+ goto error;
+ }
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+
+ /* if we are opening a file, assign the open fid to the file */
+ if (nd && nd->flags & LOOKUP_OPEN) {
+ filp = lookup_instantiate_filp(nd, dentry, v9fs_open_created);
+ if (IS_ERR(filp)) {
+ p9_client_clunk(ofid);
+ return PTR_ERR(filp);
+ }
+ filp->private_data = ofid;
+ } else
+ p9_client_clunk(ofid);
+
+ return 0;
+
+error:
+ if (ofid)
+ p9_client_clunk(ofid);
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
+/**
* v9fs_vfs_create - VFS hook to create files
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
@@ -652,6 +847,83 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return err;
}
+
+/**
+ * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @dir: inode that is being unlinked
+ * @dentry: dentry that is being unlinked
+ * @mode: mode for new directory
+ *
+ */
+
+static int v9fs_vfs_mkdir_dotl(struct inode *dir, struct dentry *dentry,
+ int mode)
+{
+ int err;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL, *dfid = NULL;
+ gid_t gid;
+ char *name;
+ struct inode *inode;
+ struct p9_qid qid;
+ struct dentry *dir_dentry;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+ err = 0;
+ v9ses = v9fs_inode2v9ses(dir);
+
+ mode |= S_IFDIR;
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ dfid = NULL;
+ goto error;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+ if (gid < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n");
+ goto error;
+ }
+
+ name = (char *) dentry->d_name.name;
+ err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
+ if (err < 0)
+ goto error;
+
+ /* instantiate inode and assign the unopened fid to the dentry */
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ }
+error:
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
/**
* v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
* @dir: inode that is being walked from
@@ -678,6 +950,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
sb = dir->i_sb;
v9ses = v9fs_inode2v9ses(dir);
+ /* We can walk d_parent because we hold the dir->i_mutex */
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid))
return ERR_CAST(dfid);
@@ -785,27 +1058,33 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto clunk_olddir;
}
+ down_write(&v9ses->rename_sem);
if (v9fs_proto_dotl(v9ses)) {
retval = p9_client_rename(oldfid, newdirfid,
(char *) new_dentry->d_name.name);
if (retval != -ENOSYS)
goto clunk_newdir;
}
+ if (old_dentry->d_parent != new_dentry->d_parent) {
+ /*
+ * 9P .u can only handle file rename in the same directory
+ */
- /* 9P can only handle file rename in the same directory */
- if (memcmp(&olddirfid->qid, &newdirfid->qid, sizeof(newdirfid->qid))) {
P9_DPRINTK(P9_DEBUG_ERROR,
"old dir and new dir are different\n");
retval = -EXDEV;
goto clunk_newdir;
}
-
v9fs_blank_wstat(&wstat);
wstat.muid = v9ses->uname;
wstat.name = (char *) new_dentry->d_name.name;
retval = p9_client_wstat(oldfid, &wstat);
clunk_newdir:
+ if (!retval)
+ /* successful rename */
+ d_move(old_dentry, new_dentry);
+ up_write(&v9ses->rename_sem);
p9_client_clunk(newdirfid);
clunk_olddir:
@@ -853,6 +1132,42 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
+static int
+v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ int err;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid;
+ struct p9_stat_dotl *st;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+ err = -EPERM;
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+ return simple_getattr(mnt, dentry, stat);
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ /* Ask for all the fields in stat structure. Server will return
+ * whatever it supports
+ */
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ v9fs_stat2inode_dotl(st, dentry->d_inode);
+ generic_fillattr(dentry->d_inode, stat);
+ /* Change block size to what the server returned */
+ stat->blksize = st->st_blksize;
+
+ kfree(st);
+ return 0;
+}
+
/**
* v9fs_vfs_setattr - set file metadata
* @dentry: file whose metadata to set
@@ -896,10 +1211,71 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
}
retval = p9_client_wstat(fid, &wstat);
- if (retval >= 0)
- retval = inode_setattr(dentry->d_inode, iattr);
+ if (retval < 0)
+ return retval;
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode)) {
+ retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+ if (retval)
+ return retval;
+ }
- return retval;
+ setattr_copy(dentry->d_inode, iattr);
+ mark_inode_dirty(dentry->d_inode);
+ return 0;
+}
+
+/**
+ * v9fs_vfs_setattr_dotl - set file metadata
+ * @dentry: file whose metadata to set
+ * @iattr: metadata assignment structure
+ *
+ */
+
+static int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+{
+ int retval;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid;
+ struct p9_iattr_dotl p9attr;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+ retval = inode_change_ok(dentry->d_inode, iattr);
+ if (retval)
+ return retval;
+
+ p9attr.valid = iattr->ia_valid;
+ p9attr.mode = iattr->ia_mode;
+ p9attr.uid = iattr->ia_uid;
+ p9attr.gid = iattr->ia_gid;
+ p9attr.size = iattr->ia_size;
+ p9attr.atime_sec = iattr->ia_atime.tv_sec;
+ p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+ p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+ p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+
+ retval = -EPERM;
+ v9ses = v9fs_inode2v9ses(dentry->d_inode);
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ retval = p9_client_setattr(fid, &p9attr);
+ if (retval < 0)
+ return retval;
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(dentry->d_inode)) {
+ retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+ if (retval)
+ return retval;
+ }
+
+ setattr_copy(dentry->d_inode, iattr);
+ mark_inode_dirty(dentry->d_inode);
+ return 0;
}
/**
@@ -980,6 +1356,77 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
}
/**
+ * v9fs_stat2inode_dotl - populate an inode structure with stat info
+ * @stat: stat structure
+ * @inode: inode to populate
+ * @sb: superblock of filesystem
+ *
+ */
+
+void
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+{
+
+ if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
+ inode->i_atime.tv_sec = stat->st_atime_sec;
+ inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ inode->i_mtime.tv_sec = stat->st_mtime_sec;
+ inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode->i_ctime.tv_sec = stat->st_ctime_sec;
+ inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode->i_uid = stat->st_uid;
+ inode->i_gid = stat->st_gid;
+ inode->i_nlink = stat->st_nlink;
+ inode->i_mode = stat->st_mode;
+ inode->i_rdev = new_decode_dev(stat->st_rdev);
+
+ if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+ i_size_write(inode, stat->st_size);
+ inode->i_blocks = stat->st_blocks;
+ } else {
+ if (stat->st_result_mask & P9_STATS_ATIME) {
+ inode->i_atime.tv_sec = stat->st_atime_sec;
+ inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_MTIME) {
+ inode->i_mtime.tv_sec = stat->st_mtime_sec;
+ inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_CTIME) {
+ inode->i_ctime.tv_sec = stat->st_ctime_sec;
+ inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ }
+ if (stat->st_result_mask & P9_STATS_UID)
+ inode->i_uid = stat->st_uid;
+ if (stat->st_result_mask & P9_STATS_GID)
+ inode->i_gid = stat->st_gid;
+ if (stat->st_result_mask & P9_STATS_NLINK)
+ inode->i_nlink = stat->st_nlink;
+ if (stat->st_result_mask & P9_STATS_MODE) {
+ inode->i_mode = stat->st_mode;
+ if ((S_ISBLK(inode->i_mode)) ||
+ (S_ISCHR(inode->i_mode)))
+ init_special_inode(inode, inode->i_mode,
+ inode->i_rdev);
+ }
+ if (stat->st_result_mask & P9_STATS_RDEV)
+ inode->i_rdev = new_decode_dev(stat->st_rdev);
+ if (stat->st_result_mask & P9_STATS_SIZE)
+ i_size_write(inode, stat->st_size);
+ if (stat->st_result_mask & P9_STATS_BLOCKS)
+ inode->i_blocks = stat->st_blocks;
+ }
+ if (stat->st_result_mask & P9_STATS_GEN)
+ inode->i_generation = stat->st_gen;
+
+ /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
+ * because the inode structure does not have fields for them.
+ */
+}
+
+/**
* v9fs_qid2ino - convert qid into inode number
* @qid: qid to hash
*
@@ -1022,7 +1469,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
if (IS_ERR(fid))
return PTR_ERR(fid);
- if (!v9fs_proto_dotu(v9ses))
+ if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))
return -EBADF;
st = p9_client_stat(fid);
@@ -1128,6 +1575,99 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
}
/**
+ * v9fs_vfs_symlink_dotl - helper function to create symlinks
+ * @dir: directory inode containing symlink
+ * @dentry: dentry for symlink
+ * @symname: symlink data
+ *
+ * See Also: 9P2000.L RFC for more information
+ *
+ */
+
+static int
+v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
+ const char *symname)
+{
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *dfid;
+ struct p9_fid *fid = NULL;
+ struct inode *inode;
+ struct p9_qid qid;
+ char *name;
+ int err;
+ gid_t gid;
+
+ name = (char *) dentry->d_name.name;
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
+ dir->i_ino, name, symname);
+ v9ses = v9fs_inode2v9ses(dir);
+
+ dfid = v9fs_fid_lookup(dentry->d_parent);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ return err;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+
+ if (gid < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_egid failed %d\n", gid);
+ goto error;
+ }
+
+ /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
+ err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
+
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+ goto error;
+ }
+
+ if (v9ses->cache) {
+ /* Now walk from the parent so we can get an unopened fid. */
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ /* instantiate inode and assign the unopened fid to dentry */
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ } else {
+ /* Not in cached mode. No need to populate inode with stat */
+ inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ dentry->d_op = &v9fs_dentry_operations;
+ d_instantiate(dentry, inode);
+ }
+
+error:
+ if (fid)
+ p9_client_clunk(fid);
+
+ return err;
+}
+
+/**
* v9fs_vfs_symlink - helper function to create symlinks
* @dir: directory inode containing symlink
* @dentry: dentry for symlink
@@ -1186,6 +1726,76 @@ clunk_fid:
}
/**
+ * v9fs_vfs_link_dotl - create a hardlink for dotl
+ * @old_dentry: dentry for file to link to
+ * @dir: inode destination for new link
+ * @dentry: dentry for link
+ *
+ */
+
+static int
+v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ int err;
+ struct p9_fid *dfid, *oldfid;
+ char *name;
+ struct v9fs_session_info *v9ses;
+ struct dentry *dir_dentry;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+ dir->i_ino, old_dentry->d_name.name,
+ dentry->d_name.name);
+
+ v9ses = v9fs_inode2v9ses(dir);
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid))
+ return PTR_ERR(dfid);
+
+ oldfid = v9fs_fid_lookup(old_dentry);
+ if (IS_ERR(oldfid))
+ return PTR_ERR(oldfid);
+
+ name = (char *) dentry->d_name.name;
+
+ err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
+
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+ return err;
+ }
+
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ /* Get the latest stat info from server. */
+ struct p9_fid *fid;
+ struct p9_stat_dotl *st;
+
+ fid = v9fs_fid_lookup(old_dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+ if (IS_ERR(st))
+ return PTR_ERR(st);
+
+ v9fs_stat2inode_dotl(st, old_dentry->d_inode);
+
+ kfree(st);
+ } else {
+ /* Caching disabled. No need to get upto date stat info.
+ * This dentry will be released immediately. So, just i_count++
+ */
+ atomic_inc(&old_dentry->d_inode->i_count);
+ }
+
+ dentry->d_op = old_dentry->d_op;
+ d_instantiate(dentry, old_dentry->d_inode);
+
+ return err;
+}
+
+/**
* v9fs_vfs_mknod - create a special file
* @dir: inode destination for new link
* @dentry: dentry for file
@@ -1230,6 +1840,100 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
return retval;
}
+/**
+ * v9fs_vfs_mknod_dotl - create a special file
+ * @dir: inode destination for new link
+ * @dentry: dentry for file
+ * @mode: mode for creation
+ * @rdev: device associated with special file
+ *
+ */
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+{
+ int err;
+ char *name;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid = NULL, *dfid = NULL;
+ struct inode *inode;
+ gid_t gid;
+ struct p9_qid qid;
+ struct dentry *dir_dentry;
+
+ P9_DPRINTK(P9_DEBUG_VFS,
+ " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+ dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
+
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+
+ v9ses = v9fs_inode2v9ses(dir);
+ dir_dentry = v9fs_dentry_from_dir_inode(dir);
+ dfid = v9fs_fid_lookup(dir_dentry);
+ if (IS_ERR(dfid)) {
+ err = PTR_ERR(dfid);
+ P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+ dfid = NULL;
+ goto error;
+ }
+
+ gid = v9fs_get_fsgid_for_create(dir);
+ if (gid < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS, "v9fs_get_fsgid_for_create failed\n");
+ goto error;
+ }
+
+ name = (char *) dentry->d_name.name;
+
+ err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
+ if (err < 0)
+ goto error;
+
+ /* instantiate inode and assign the unopened fid to the dentry */
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ fid = p9_client_walk(dfid, 1, &name, 1);
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+ err);
+ fid = NULL;
+ goto error;
+ }
+
+ inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+ err);
+ goto error;
+ }
+ dentry->d_op = &v9fs_cached_dentry_operations;
+ d_instantiate(dentry, inode);
+ err = v9fs_fid_add(dentry, fid);
+ if (err < 0)
+ goto error;
+ fid = NULL;
+ } else {
+ /*
+ * Not in cached mode. No need to populate inode with stat.
+ * socket syscall returns a fd, so we need instantiate
+ */
+ inode = v9fs_get_inode(dir->i_sb, mode);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto error;
+ }
+ dentry->d_op = &v9fs_dentry_operations;
+ d_instantiate(dentry, inode);
+ }
+
+error:
+ if (fid)
+ p9_client_clunk(fid);
+ return err;
+}
+
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
@@ -1238,24 +1942,29 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir,
.rmdir = v9fs_vfs_rmdir,
- .mknod = v9fs_vfs_mknod,
+ .mknod = v9fs_vfs_mknod_dotl,
.rename = v9fs_vfs_rename,
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
static const struct inode_operations v9fs_dir_inode_operations_dotl = {
- .create = v9fs_vfs_create,
+ .create = v9fs_vfs_create_dotl,
.lookup = v9fs_vfs_lookup,
- .symlink = v9fs_vfs_symlink,
- .link = v9fs_vfs_link,
+ .link = v9fs_vfs_link_dotl,
+ .symlink = v9fs_vfs_symlink_dotl,
.unlink = v9fs_vfs_unlink,
- .mkdir = v9fs_vfs_mkdir,
+ .mkdir = v9fs_vfs_mkdir_dotl,
.rmdir = v9fs_vfs_rmdir,
- .mknod = v9fs_vfs_mknod,
+ .mknod = v9fs_vfs_mknod_dotl,
.rename = v9fs_vfs_rename,
- .getattr = v9fs_vfs_getattr,
- .setattr = v9fs_vfs_setattr,
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
+
};
static const struct inode_operations v9fs_dir_inode_operations = {
@@ -1276,8 +1985,12 @@ static const struct inode_operations v9fs_file_inode_operations = {
};
static const struct inode_operations v9fs_file_inode_operations_dotl = {
- .getattr = v9fs_vfs_getattr,
- .setattr = v9fs_vfs_setattr,
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
};
static const struct inode_operations v9fs_symlink_inode_operations = {
@@ -1292,6 +2005,10 @@ static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
.readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link,
.put_link = v9fs_vfs_put_link,
- .getattr = v9fs_vfs_getattr,
- .setattr = v9fs_vfs_setattr,
+ .getattr = v9fs_vfs_getattr_dotl,
+ .setattr = v9fs_vfs_setattr_dotl,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = v9fs_listxattr,
};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index be74d020436..f9311077de6 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -45,6 +45,7 @@
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
+#include "xattr.h"
static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl;
@@ -77,9 +78,10 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_blocksize_bits = fls(v9ses->maxdata - 1);
sb->s_blocksize = 1 << sb->s_blocksize_bits;
sb->s_magic = V9FS_MAGIC;
- if (v9fs_proto_dotl(v9ses))
+ if (v9fs_proto_dotl(v9ses)) {
sb->s_op = &v9fs_super_ops_dotl;
- else
+ sb->s_xattr = v9fs_xattr_handlers;
+ } else
sb->s_op = &v9fs_super_ops;
sb->s_bdi = &v9ses->bdi;
@@ -107,7 +109,6 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
struct inode *inode = NULL;
struct dentry *root = NULL;
struct v9fs_session_info *v9ses = NULL;
- struct p9_wstat *st = NULL;
int mode = S_IRWXUGO | S_ISVTX;
struct p9_fid *fid;
int retval = 0;
@@ -124,16 +125,10 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
goto close_session;
}
- st = p9_client_stat(fid);
- if (IS_ERR(st)) {
- retval = PTR_ERR(st);
- goto clunk_fid;
- }
-
sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
if (IS_ERR(sb)) {
retval = PTR_ERR(sb);
- goto free_stat;
+ goto clunk_fid;
}
v9fs_fill_super(sb, v9ses, flags, data);
@@ -151,22 +146,38 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags,
}
sb->s_root = root;
- root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode(st, root->d_inode, sb);
+ if (v9fs_proto_dotl(v9ses)) {
+ struct p9_stat_dotl *st = NULL;
+ st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+ if (IS_ERR(st)) {
+ retval = PTR_ERR(st);
+ goto clunk_fid;
+ }
+
+ v9fs_stat2inode_dotl(st, root->d_inode);
+ kfree(st);
+ } else {
+ struct p9_wstat *st = NULL;
+ st = p9_client_stat(fid);
+ if (IS_ERR(st)) {
+ retval = PTR_ERR(st);
+ goto clunk_fid;
+ }
+
+ root->d_inode->i_ino = v9fs_qid2ino(&st->qid);
+ v9fs_stat2inode(st, root->d_inode, sb);
+
+ p9stat_free(st);
+ kfree(st);
+ }
v9fs_fid_add(root, fid);
- p9stat_free(st);
- kfree(st);
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
simple_set_mnt(mnt, sb);
return 0;
-free_stat:
- p9stat_free(st);
- kfree(st);
-
clunk_fid:
p9_client_clunk(fid);
@@ -176,8 +187,6 @@ close_session:
return retval;
release_sb:
- p9stat_free(st);
- kfree(st);
deactivate_locked_super(sb);
return retval;
}
@@ -257,7 +266,7 @@ static const struct super_operations v9fs_super_ops = {
.destroy_inode = v9fs_destroy_inode,
#endif
.statfs = simple_statfs,
- .clear_inode = v9fs_clear_inode,
+ .evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
};
@@ -268,7 +277,7 @@ static const struct super_operations v9fs_super_ops_dotl = {
.destroy_inode = v9fs_destroy_inode,
#endif
.statfs = v9fs_statfs,
- .clear_inode = v9fs_clear_inode,
+ .evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
};
@@ -278,4 +287,5 @@ struct file_system_type v9fs_fs_type = {
.get_sb = v9fs_get_sb,
.kill_sb = v9fs_kill_super,
.owner = THIS_MODULE,
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
};
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
new file mode 100644
index 00000000000..f88e5c2dc87
--- /dev/null
+++ b/fs/9p/xattr.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+
+#include "fid.h"
+#include "xattr.h"
+
+/*
+ * v9fs_xattr_get()
+ *
+ * Copy an extended attribute into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ ssize_t retval;
+ int msize, read_count;
+ u64 offset = 0, attr_size;
+ struct p9_fid *fid, *attr_fid;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu\n",
+ __func__, name, buffer_size);
+
+ fid = v9fs_fid_lookup(dentry);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
+
+ attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
+ if (IS_ERR(attr_fid)) {
+ retval = PTR_ERR(attr_fid);
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "p9_client_attrwalk failed %zd\n", retval);
+ attr_fid = NULL;
+ goto error;
+ }
+ if (!buffer_size) {
+ /* request to get the attr_size */
+ retval = attr_size;
+ goto error;
+ }
+ if (attr_size > buffer_size) {
+ retval = -ERANGE;
+ goto error;
+ }
+ msize = attr_fid->clnt->msize;
+ while (attr_size) {
+ if (attr_size > (msize - P9_IOHDRSZ))
+ read_count = msize - P9_IOHDRSZ;
+ else
+ read_count = attr_size;
+ read_count = p9_client_read(attr_fid, ((char *)buffer)+offset,
+ NULL, offset, read_count);
+ if (read_count < 0) {
+ /* error in xattr read */
+ retval = read_count;
+ goto error;
+ }
+ offset += read_count;
+ attr_size -= read_count;
+ }
+ /* Total read xattr bytes */
+ retval = offset;
+error:
+ if (attr_fid)
+ p9_client_clunk(attr_fid);
+ return retval;
+
+}
+
+/*
+ * v9fs_xattr_set()
+ *
+ * Create, replace or remove an extended attribute for this inode. Buffer
+ * is NULL to remove an existing extended attribute, and non-NULL to
+ * either replace an existing extended attribute, or create a new extended
+ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
+ * specify that an extended attribute must exist and must not exist
+ * previous to the call, respectively.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int v9fs_xattr_set(struct dentry *dentry, const char *name,
+ const void *value, size_t value_len, int flags)
+{
+ u64 offset = 0;
+ int retval, msize, write_count;
+ struct p9_fid *fid = NULL;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: name = %s value_len = %zu flags = %d\n",
+ __func__, name, value_len, flags);
+
+ fid = v9fs_fid_clone(dentry);
+ if (IS_ERR(fid)) {
+ retval = PTR_ERR(fid);
+ fid = NULL;
+ goto error;
+ }
+ /*
+ * On success fid points to xattr
+ */
+ retval = p9_client_xattrcreate(fid, name, value_len, flags);
+ if (retval < 0) {
+ P9_DPRINTK(P9_DEBUG_VFS,
+ "p9_client_xattrcreate failed %d\n", retval);
+ goto error;
+ }
+ msize = fid->clnt->msize;;
+ while (value_len) {
+ if (value_len > (msize - P9_IOHDRSZ))
+ write_count = msize - P9_IOHDRSZ;
+ else
+ write_count = value_len;
+ write_count = p9_client_write(fid, ((char *)value)+offset,
+ NULL, offset, write_count);
+ if (write_count < 0) {
+ /* error in xattr write */
+ retval = write_count;
+ goto error;
+ }
+ offset += write_count;
+ value_len -= write_count;
+ }
+ /* Total read xattr bytes */
+ retval = offset;
+error:
+ if (fid)
+ retval = p9_client_clunk(fid);
+ return retval;
+}
+
+ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+ return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
+}
+
+const struct xattr_handler *v9fs_xattr_handlers[] = {
+ &v9fs_xattr_user_handler,
+ NULL
+};
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
new file mode 100644
index 00000000000..9ddf672ae5c
--- /dev/null
+++ b/fs/9p/xattr.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#ifndef FS_9P_XATTR_H
+#define FS_9P_XATTR_H
+
+#include <linux/xattr.h>
+
+extern const struct xattr_handler *v9fs_xattr_handlers[];
+extern struct xattr_handler v9fs_xattr_user_handler;
+
+extern ssize_t v9fs_xattr_get(struct dentry *, const char *,
+ void *, size_t);
+extern int v9fs_xattr_set(struct dentry *, const char *,
+ const void *, size_t, int);
+extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t);
+#endif /* FS_9P_XATTR_H */
diff --git a/fs/9p/xattr_user.c b/fs/9p/xattr_user.c
new file mode 100644
index 00000000000..d0b701b7208
--- /dev/null
+++ b/fs/9p/xattr_user.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright IBM Corporation, 2010
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "xattr.h"
+
+static int v9fs_xattr_user_get(struct dentry *dentry, const char *name,
+ void *buffer, size_t size, int type)
+{
+ int retval;
+ char *full_name;
+ size_t name_len;
+ size_t prefix_len = XATTR_USER_PREFIX_LEN;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+
+ name_len = strlen(name);
+ full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL);
+ if (!full_name)
+ return -ENOMEM;
+ memcpy(full_name, XATTR_USER_PREFIX, prefix_len);
+ memcpy(full_name+prefix_len, name, name_len);
+ full_name[prefix_len + name_len] = '\0';
+
+ retval = v9fs_xattr_get(dentry, full_name, buffer, size);
+ kfree(full_name);
+ return retval;
+}
+
+static int v9fs_xattr_user_set(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags, int type)
+{
+ int retval;
+ char *full_name;
+ size_t name_len;
+ size_t prefix_len = XATTR_USER_PREFIX_LEN;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ if (strcmp(name, "") == 0)
+ return -EINVAL;
+
+ name_len = strlen(name);
+ full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL);
+ if (!full_name)
+ return -ENOMEM;
+ memcpy(full_name, XATTR_USER_PREFIX, prefix_len);
+ memcpy(full_name + prefix_len, name, name_len);
+ full_name[prefix_len + name_len] = '\0';
+
+ retval = v9fs_xattr_set(dentry, full_name, value, size, flags);
+ kfree(full_name);
+ return retval;
+}
+
+struct xattr_handler v9fs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = v9fs_xattr_user_get,
+ .set = v9fs_xattr_user_set,
+};
diff --git a/fs/Kconfig b/fs/Kconfig
index 5f85b594761..3d185308ec8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -64,7 +64,7 @@ source "fs/autofs4/Kconfig"
source "fs/fuse/Kconfig"
config CUSE
- tristate "Character device in Userpace support"
+ tristate "Character device in Userspace support"
depends on FUSE_FS
help
This FUSE extension allows character devices to be
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 6f850b06ab6..65794b8fe79 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -50,10 +50,19 @@ static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
@@ -324,10 +333,7 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
/* XXX: this is missing some actual on-disk truncation.. */
if (ia_valid & ATTR_SIZE)
- error = simple_setsize(inode, attr->ia_size);
-
- if (error)
- goto out;
+ truncate_setsize(inode, attr->ia_size);
if (ia_valid & ATTR_MTIME) {
inode->i_mtime = attr->ia_mtime;
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index f05b6155ccc..a8cbdeb3402 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -171,8 +171,7 @@ extern int affs_rename(struct inode *old_dir, struct dentry *old_dentry,
extern unsigned long affs_parent_ino(struct inode *dir);
extern struct inode *affs_new_inode(struct inode *dir);
extern int affs_notify_change(struct dentry *dentry, struct iattr *attr);
-extern void affs_delete_inode(struct inode *inode);
-extern void affs_clear_inode(struct inode *inode);
+extern void affs_evict_inode(struct inode *inode);
extern struct inode *affs_iget(struct super_block *sb,
unsigned long ino);
extern int affs_write_inode(struct inode *inode,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 322710c3eed..c4a9875bd1a 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -406,10 +406,19 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
affs_get_block,
&AFFS_I(mapping->host)->mmu_private);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index f4b2a4ee4f9..3a0fdec175b 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -235,31 +235,36 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr)
goto out;
}
- error = inode_setattr(inode, attr);
- if (!error && (attr->ia_valid & ATTR_MODE))
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ if (attr->ia_valid & ATTR_MODE)
mode_to_prot(inode);
out:
return error;
}
void
-affs_delete_inode(struct inode *inode)
-{
- pr_debug("AFFS: delete_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
- truncate_inode_pages(&inode->i_data, 0);
- inode->i_size = 0;
- affs_truncate(inode);
- clear_inode(inode);
- affs_free_block(inode->i_sb, inode->i_ino);
-}
-
-void
-affs_clear_inode(struct inode *inode)
+affs_evict_inode(struct inode *inode)
{
unsigned long cache_page;
+ pr_debug("AFFS: evict_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
+ truncate_inode_pages(&inode->i_data, 0);
- pr_debug("AFFS: clear_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
+ if (!inode->i_nlink) {
+ inode->i_size = 0;
+ affs_truncate(inode);
+ }
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
affs_free_prealloc(inode);
cache_page = (unsigned long)AFFS_I(inode)->i_lc;
if (cache_page) {
@@ -271,6 +276,9 @@ affs_clear_inode(struct inode *inode)
affs_brelse(AFFS_I(inode)->i_ext_bh);
AFFS_I(inode)->i_ext_last = ~1;
AFFS_I(inode)->i_ext_bh = NULL;
+
+ if (!inode->i_nlink)
+ affs_free_block(inode->i_sb, inode->i_ino);
}
struct inode *
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 16a3e4765f6..33c4e7eef47 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -26,7 +26,7 @@ static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
-affs_commit_super(struct super_block *sb, int clean)
+affs_commit_super(struct super_block *sb, int wait, int clean)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
struct buffer_head *bh = sbi->s_root_bh;
@@ -36,6 +36,8 @@ affs_commit_super(struct super_block *sb, int clean)
secs_to_datestamp(get_seconds(), &tail->disk_change);
affs_fix_checksum(sb, bh);
mark_buffer_dirty(bh);
+ if (wait)
+ sync_dirty_buffer(bh);
}
static void
@@ -46,8 +48,8 @@ affs_put_super(struct super_block *sb)
lock_kernel();
- if (!(sb->s_flags & MS_RDONLY))
- affs_commit_super(sb, 1);
+ if (!(sb->s_flags & MS_RDONLY) && sb->s_dirt)
+ affs_commit_super(sb, 1, 1);
kfree(sbi->s_prefix);
affs_free_bitmap(sb);
@@ -61,27 +63,20 @@ affs_put_super(struct super_block *sb)
static void
affs_write_super(struct super_block *sb)
{
- int clean = 2;
-
lock_super(sb);
- if (!(sb->s_flags & MS_RDONLY)) {
- // if (sbi->s_bitmap[i].bm_bh) {
- // if (buffer_dirty(sbi->s_bitmap[i].bm_bh)) {
- // clean = 0;
- affs_commit_super(sb, clean);
- sb->s_dirt = !clean; /* redo until bitmap synced */
- } else
- sb->s_dirt = 0;
+ if (!(sb->s_flags & MS_RDONLY))
+ affs_commit_super(sb, 1, 2);
+ sb->s_dirt = 0;
unlock_super(sb);
- pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
+ pr_debug("AFFS: write_super() at %lu, clean=2\n", get_seconds());
}
static int
affs_sync_fs(struct super_block *sb, int wait)
{
lock_super(sb);
- affs_commit_super(sb, 2);
+ affs_commit_super(sb, wait, 2);
sb->s_dirt = 0;
unlock_super(sb);
return 0;
@@ -140,8 +135,7 @@ static const struct super_operations affs_sops = {
.alloc_inode = affs_alloc_inode,
.destroy_inode = affs_destroy_inode,
.write_inode = affs_write_inode,
- .delete_inode = affs_delete_inode,
- .clear_inode = affs_clear_inode,
+ .evict_inode = affs_evict_inode,
.put_super = affs_put_super,
.write_super = affs_write_super,
.sync_fs = affs_sync_fs,
@@ -554,9 +548,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
if (*flags & MS_RDONLY) {
- sb->s_dirt = 1;
- while (sb->s_dirt)
- affs_write_super(sb);
+ affs_write_super(sb);
affs_free_bitmap(sb);
} else
res = affs_init_bitmap(sb, flags);
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
index 5c4e61d3c77..8f975f25b48 100644
--- a/fs/afs/Kconfig
+++ b/fs/afs/Kconfig
@@ -2,6 +2,7 @@ config AFS_FS
tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
select AF_RXRPC
+ select DNS_RESOLVER
help
If you say Y here, you will get an experimental Andrew File System
driver. It currently only supports unsecured read-only AFS access.
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index e19c13f059e..0d5eeadf612 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/key.h>
#include <linux/ctype.h>
+#include <linux/dns_resolver.h>
#include <linux/sched.h>
#include <keys/rxrpc-type.h>
#include "internal.h"
@@ -30,21 +31,24 @@ static struct afs_cell *afs_cell_root;
* allocate a cell record and fill in its name, VL server address list and
* allocate an anonymous key
*/
-static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
+static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
+ char *vllist)
{
struct afs_cell *cell;
struct key *key;
- size_t namelen;
char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
+ char *dvllist = NULL, *_vllist = NULL;
+ char delimiter = ':';
int ret;
- _enter("%s,%s", name, vllist);
+ _enter("%*.*s,%s", namelen, namelen, name ?: "", vllist);
BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
- namelen = strlen(name);
- if (namelen > AFS_MAXCELLNAME)
+ if (namelen > AFS_MAXCELLNAME) {
+ _leave(" = -ENAMETOOLONG");
return ERR_PTR(-ENAMETOOLONG);
+ }
/* allocate and initialise a cell record */
cell = kzalloc(sizeof(struct afs_cell) + namelen + 1, GFP_KERNEL);
@@ -64,15 +68,35 @@ static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
INIT_LIST_HEAD(&cell->vl_list);
spin_lock_init(&cell->vl_lock);
+ /* if the ip address is invalid, try dns query */
+ if (!vllist || strlen(vllist) < 7) {
+ ret = dns_query("afsdb", name, namelen, "ipv4", &dvllist, NULL);
+ if (ret < 0) {
+ if (ret == -ENODATA || ret == -EAGAIN || ret == -ENOKEY)
+ /* translate these errors into something
+ * userspace might understand */
+ ret = -EDESTADDRREQ;
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
+ }
+ _vllist = dvllist;
+
+ /* change the delimiter for user-space reply */
+ delimiter = ',';
+
+ } else {
+ _vllist = vllist;
+ }
+
/* fill in the VL server list from the rest of the string */
do {
unsigned a, b, c, d;
- next = strchr(vllist, ':');
+ next = strchr(_vllist, delimiter);
if (next)
*next++ = 0;
- if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4)
+ if (sscanf(_vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4)
goto bad_address;
if (a > 255 || b > 255 || c > 255 || d > 255)
@@ -81,7 +105,7 @@ static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
cell->vl_addrs[cell->vl_naddrs++].s_addr =
htonl((a << 24) | (b << 16) | (c << 8) | d);
- } while (cell->vl_naddrs < AFS_CELL_MAX_ADDRS && (vllist = next));
+ } while (cell->vl_naddrs < AFS_CELL_MAX_ADDRS && (_vllist = next));
/* create a key to represent an anonymous user */
memcpy(keyname, "afs@", 4);
@@ -110,32 +134,36 @@ bad_address:
ret = -EINVAL;
error:
key_put(cell->anonymous_key);
+ kfree(dvllist);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
- * create a cell record
- * - "name" is the name of the cell
- * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
+ * afs_cell_crate() - create a cell record
+ * @name: is the name of the cell.
+ * @namsesz: is the strlen of the cell name.
+ * @vllist: is a colon separated list of IP addresses in "a.b.c.d" format.
+ * @retref: is T to return the cell reference when the cell exists.
*/
-struct afs_cell *afs_cell_create(const char *name, char *vllist)
+struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
+ char *vllist, bool retref)
{
struct afs_cell *cell;
int ret;
- _enter("%s,%s", name, vllist);
+ _enter("%*.*s,%s", namesz, namesz, name ?: "", vllist);
down_write(&afs_cells_sem);
read_lock(&afs_cells_lock);
list_for_each_entry(cell, &afs_cells, link) {
- if (strcasecmp(cell->name, name) == 0)
+ if (strncasecmp(cell->name, name, namesz) == 0)
goto duplicate_name;
}
read_unlock(&afs_cells_lock);
- cell = afs_cell_alloc(name, vllist);
+ cell = afs_cell_alloc(name, namesz, vllist);
if (IS_ERR(cell)) {
_leave(" = %ld", PTR_ERR(cell));
up_write(&afs_cells_sem);
@@ -175,8 +203,18 @@ error:
return ERR_PTR(ret);
duplicate_name:
+ if (retref && !IS_ERR(cell))
+ afs_get_cell(cell);
+
read_unlock(&afs_cells_lock);
up_write(&afs_cells_sem);
+
+ if (retref) {
+ _leave(" = %p", cell);
+ return cell;
+ }
+
+ _leave(" = -EEXIST");
return ERR_PTR(-EEXIST);
}
@@ -201,15 +239,13 @@ int afs_cell_init(char *rootcell)
}
cp = strchr(rootcell, ':');
- if (!cp) {
- printk(KERN_ERR "kAFS: no VL server IP addresses specified\n");
- _leave(" = -EINVAL");
- return -EINVAL;
- }
+ if (!cp)
+ _debug("kAFS: no VL server IP addresses specified");
+ else
+ *cp++ = 0;
/* allocate a cell record for the root cell */
- *cp++ = 0;
- new_root = afs_cell_create(rootcell, cp);
+ new_root = afs_cell_create(rootcell, strlen(rootcell), cp, false);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -229,11 +265,12 @@ int afs_cell_init(char *rootcell)
/*
* lookup a cell record
*/
-struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
+struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz,
+ bool dns_cell)
{
struct afs_cell *cell;
- _enter("\"%*.*s\",", namesz, namesz, name ? name : "");
+ _enter("\"%*.*s\",", namesz, namesz, name ?: "");
down_read(&afs_cells_sem);
read_lock(&afs_cells_lock);
@@ -247,6 +284,8 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
}
}
cell = ERR_PTR(-ENOENT);
+ if (dns_cell)
+ goto create_cell;
found:
;
} else {
@@ -269,6 +308,15 @@ struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
up_read(&afs_cells_sem);
_leave(" = %p", cell);
return cell;
+
+create_cell:
+ read_unlock(&afs_cells_lock);
+ up_read(&afs_cells_sem);
+
+ cell = afs_cell_create(name, namesz, NULL, true);
+
+ _leave(" = %p", cell);
+ return cell;
}
#if 0
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index b42d5cc1d6d..0d38c09bd55 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -477,6 +477,40 @@ static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
}
/*
+ * Try to auto mount the mountpoint with pseudo directory, if the autocell
+ * operation is setted.
+ */
+static struct inode *afs_try_auto_mntpt(
+ int ret, struct dentry *dentry, struct inode *dir, struct key *key,
+ struct afs_fid *fid)
+{
+ const char *devname = dentry->d_name.name;
+ struct afs_vnode *vnode = AFS_FS_I(dir);
+ struct inode *inode;
+
+ _enter("%d, %p{%s}, {%x:%u}, %p",
+ ret, dentry, devname, vnode->fid.vid, vnode->fid.vnode, key);
+
+ if (ret != -ENOENT ||
+ !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ goto out;
+
+ inode = afs_iget_autocell(dir, devname, strlen(devname), key);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ *fid = AFS_FS_I(inode)->fid;
+ _leave("= %p", inode);
+ return inode;
+
+out:
+ _leave("= %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
* look up an entry in a directory
*/
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
@@ -520,6 +554,13 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
ret = afs_do_lookup(dir, dentry, &fid, key);
if (ret < 0) {
+ inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
+ if (!IS_ERR(inode)) {
+ key_put(key);
+ goto success;
+ }
+
+ ret = PTR_ERR(inode);
key_put(key);
if (ret == -ENOENT) {
d_add(dentry, NULL);
@@ -539,6 +580,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_CAST(inode);
}
+success:
dentry->d_op = &afs_fs_dentry_operations;
d_add(dentry, inode);
@@ -696,8 +738,9 @@ static int afs_d_delete(struct dentry *dentry)
goto zap;
if (dentry->d_inode &&
- test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags))
- goto zap;
+ (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags) ||
+ test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(dentry->d_inode)->flags)))
+ goto zap;
_leave(" = 0 [keep]");
return 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index d00b312e311..0747339011c 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -19,6 +19,8 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
#include "internal.h"
struct afs_iget_data {
@@ -102,6 +104,16 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
}
/*
+ * iget5() comparator for inode created by autocell operations
+ *
+ * These pseudo inodes don't match anything.
+ */
+static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
+{
+ return 0;
+}
+
+/*
* iget5() inode initialiser
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
@@ -118,6 +130,67 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
}
/*
+ * inode retrieval for autocell
+ */
+struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
+ int namesz, struct key *key)
+{
+ struct afs_iget_data data;
+ struct afs_super_info *as;
+ struct afs_vnode *vnode;
+ struct super_block *sb;
+ struct inode *inode;
+ static atomic_t afs_autocell_ino;
+
+ _enter("{%x:%u},%*.*s,",
+ AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
+ namesz, namesz, dev_name ?: "");
+
+ sb = dir->i_sb;
+ as = sb->s_fs_info;
+ data.volume = as->volume;
+ data.fid.vid = as->volume->vid;
+ data.fid.unique = 0;
+ data.fid.vnode = 0;
+
+ inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
+ afs_iget5_autocell_test, afs_iget5_set,
+ &data);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
+ inode, inode->i_ino, data.fid.vid, data.fid.vnode,
+ data.fid.unique);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_nlink = 2;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_ctime.tv_sec = get_seconds();
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime;
+ inode->i_blocks = 0;
+ inode->i_version = 0;
+ inode->i_generation = 0;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ inode->i_flags |= S_NOATIME;
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+}
+
+/*
* inode retrieval
*/
struct inode *afs_iget(struct super_block *sb, struct key *key,
@@ -314,9 +387,22 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
}
/*
+ * discard an AFS inode
+ */
+int afs_drop_inode(struct inode *inode)
+{
+ _enter("");
+
+ if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags))
+ return generic_delete_inode(inode);
+ else
+ return generic_drop_inode(inode);
+}
+
+/*
* clear an AFS inode
*/
-void afs_clear_inode(struct inode *inode)
+void afs_evict_inode(struct inode *inode)
{
struct afs_permits *permits;
struct afs_vnode *vnode;
@@ -335,6 +421,9 @@ void afs_clear_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
afs_give_up_callback(vnode);
if (vnode->server) {
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5f679b77ce2..cca8eef736f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -42,6 +42,7 @@ typedef enum {
struct afs_mount_params {
bool rwpath; /* T if the parent should be considered R/W */
bool force; /* T to force cell type */
+ bool autocell; /* T if set auto mount operation */
afs_voltype_t type; /* type of volume requested */
int volnamesz; /* size of volume name */
const char *volname; /* name of volume to mount */
@@ -358,6 +359,8 @@ struct afs_vnode {
#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
+#define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
long acl_order; /* ACL check count (callback break count) */
@@ -468,8 +471,8 @@ extern struct list_head afs_proc_cells;
#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
extern int afs_cell_init(char *);
-extern struct afs_cell *afs_cell_create(const char *, char *);
-extern struct afs_cell *afs_cell_lookup(const char *, unsigned);
+extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
+extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
extern struct afs_cell *afs_grab_cell(struct afs_cell *);
extern void afs_put_cell(struct afs_cell *);
extern void afs_cell_purge(void);
@@ -558,6 +561,8 @@ extern int afs_fs_release_lock(struct afs_server *, struct key *,
/*
* inode.c
*/
+extern struct inode *afs_iget_autocell(struct inode *, const char *, int,
+ struct key *);
extern struct inode *afs_iget(struct super_block *, struct key *,
struct afs_fid *, struct afs_file_status *,
struct afs_callback *);
@@ -565,7 +570,8 @@ extern void afs_zap_data(struct afs_vnode *);
extern int afs_validate(struct afs_vnode *, struct key *);
extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int afs_setattr(struct dentry *, struct iattr *);
-extern void afs_clear_inode(struct inode *);
+extern void afs_evict_inode(struct inode *);
+extern int afs_drop_inode(struct inode *);
/*
* main.c
@@ -581,6 +587,7 @@ extern int afs_abort_to_error(u32);
* mntpt.c
*/
extern const struct inode_operations afs_mntpt_inode_operations;
+extern const struct inode_operations afs_autocell_inode_operations;
extern const struct file_operations afs_mntpt_file_operations;
extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
@@ -752,12 +759,6 @@ extern unsigned afs_debug;
#define dbgprintk(FMT,...) \
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
-/* make sure we maintain the format strings, even when debugging is disabled */
-static inline __attribute__((format(printf,1,2)))
-void _dbprintk(const char *fmt, ...)
-{
-}
-
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
@@ -792,9 +793,9 @@ do { \
} while (0)
#else
-#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
-#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
-#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__)
+#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
+#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
+#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
#endif
/*
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 66d54d348c5..cfd1cbe25b2 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -111,6 +111,8 @@ static int __init afs_init(void)
/* initialise the callback update process */
ret = afs_callback_update_init();
+ if (ret < 0)
+ goto error_callback_update_init;
/* create the RxRPC transport */
ret = afs_open_socket();
@@ -127,15 +129,16 @@ static int __init afs_init(void)
error_fs:
afs_close_socket();
error_open_socket:
+ afs_callback_update_kill();
+error_callback_update_init:
+ afs_vlocation_purge();
error_vl_update_init:
+ afs_cell_purge();
error_cell_init:
#ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs);
error_cache:
#endif
- afs_callback_update_kill();
- afs_vlocation_purge();
- afs_cell_purge();
afs_proc_cleanup();
rcu_barrier();
printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a9e23039ea3..6d552686c49 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -38,6 +38,11 @@ const struct inode_operations afs_mntpt_inode_operations = {
.getattr = afs_getattr,
};
+const struct inode_operations afs_autocell_inode_operations = {
+ .follow_link = afs_mntpt_follow_link,
+ .getattr = afs_getattr,
+};
+
static LIST_HEAD(afs_vfsmounts);
static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
@@ -136,20 +141,16 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
struct afs_super_info *super;
struct vfsmount *mnt;
+ struct afs_vnode *vnode;
struct page *page;
- size_t size;
- char *buf, *devname, *options;
+ char *devname, *options;
+ bool rwpath = false;
int ret;
_enter("{%s}", mntpt->d_name.name);
BUG_ON(!mntpt->d_inode);
- ret = -EINVAL;
- size = mntpt->d_inode->i_size;
- if (size > PAGE_SIZE - 1)
- goto error_no_devname;
-
ret = -ENOMEM;
devname = (char *) get_zeroed_page(GFP_KERNEL);
if (!devname)
@@ -159,28 +160,59 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
if (!options)
goto error_no_options;
- /* read the contents of the AFS special symlink */
- page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- goto error_no_page;
+ vnode = AFS_FS_I(mntpt->d_inode);
+ if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
+ /* if the directory is a pseudo directory, use the d_name */
+ static const char afs_root_cell[] = ":root.cell.";
+ unsigned size = mntpt->d_name.len;
+
+ ret = -ENOENT;
+ if (size < 2 || size > AFS_MAXCELLNAME)
+ goto error_no_page;
+
+ if (mntpt->d_name.name[0] == '.') {
+ devname[0] = '#';
+ memcpy(devname + 1, mntpt->d_name.name, size - 1);
+ memcpy(devname + size, afs_root_cell,
+ sizeof(afs_root_cell));
+ rwpath = true;
+ } else {
+ devname[0] = '%';
+ memcpy(devname + 1, mntpt->d_name.name, size);
+ memcpy(devname + size + 1, afs_root_cell,
+ sizeof(afs_root_cell));
+ }
+ } else {
+ /* read the contents of the AFS special symlink */
+ loff_t size = i_size_read(mntpt->d_inode);
+ char *buf;
+
+ ret = -EINVAL;
+ if (size > PAGE_SIZE - 1)
+ goto error_no_page;
+
+ page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto error_no_page;
+ }
+
+ ret = -EIO;
+ if (PageError(page))
+ goto error;
+
+ buf = kmap_atomic(page, KM_USER0);
+ memcpy(devname, buf, size);
+ kunmap_atomic(buf, KM_USER0);
+ page_cache_release(page);
+ page = NULL;
}
- ret = -EIO;
- if (PageError(page))
- goto error;
-
- buf = kmap_atomic(page, KM_USER0);
- memcpy(devname, buf, size);
- kunmap_atomic(buf, KM_USER0);
- page_cache_release(page);
- page = NULL;
-
/* work out what options we want */
super = AFS_FS_S(mntpt->d_sb);
memcpy(options, "cell=", 5);
strcpy(options + 5, super->volume->cell->name);
- if (super->volume->type == AFSVL_RWVOL)
+ if (super->volume->type == AFSVL_RWVOL || rwpath)
strcat(options, ",rwpath");
/* try and do the mount */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 852739d262a..096b23f821a 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -294,7 +294,7 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
if (strcmp(kbuf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_cell_create(name, args);
+ cell = afs_cell_create(name, strlen(name), args, false);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 67cf810e0fd..654d8fdbf01 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -100,6 +100,7 @@ int afs_open_socket(void)
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
if (ret < 0) {
sock_release(socket);
+ destroy_workqueue(afs_async_calls);
_leave(" = %d [bind]", ret);
return ret;
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index f4909951667..9fdc7fe3a7b 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -91,9 +91,10 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
memcpy(&server->addr, addr, sizeof(struct in_addr));
server->addr.s_addr = addr->s_addr;
+ _leave(" = %p{%d}", server, atomic_read(&server->usage));
+ } else {
+ _leave(" = NULL [nomem]");
}
-
- _leave(" = %p{%d}", server, atomic_read(&server->usage));
return server;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e932e5a3a0c..77e1e5a6115 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
@@ -48,8 +49,9 @@ struct file_system_type afs_fs_type = {
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
+ .drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
- .clear_inode = afs_clear_inode,
+ .evict_inode = afs_evict_inode,
.put_super = afs_put_super,
.show_options = generic_show_options,
};
@@ -62,12 +64,14 @@ enum {
afs_opt_cell,
afs_opt_rwpath,
afs_opt_vol,
+ afs_opt_autocell,
};
static const match_table_t afs_options_list = {
{ afs_opt_cell, "cell=%s" },
{ afs_opt_rwpath, "rwpath" },
{ afs_opt_vol, "vol=%s" },
+ { afs_opt_autocell, "autocell" },
{ afs_no_opt, NULL },
};
@@ -151,7 +155,8 @@ static int afs_parse_options(struct afs_mount_params *params,
switch (token) {
case afs_opt_cell:
cell = afs_cell_lookup(args[0].from,
- args[0].to - args[0].from);
+ args[0].to - args[0].from,
+ false);
if (IS_ERR(cell))
return PTR_ERR(cell);
afs_put_cell(params->cell);
@@ -166,6 +171,10 @@ static int afs_parse_options(struct afs_mount_params *params,
*devname = args[0].from;
break;
+ case afs_opt_autocell:
+ params->autocell = 1;
+ break;
+
default:
printk(KERN_ERR "kAFS:"
" Unknown or invalid mount option: '%s'\n", p);
@@ -252,10 +261,10 @@ static int afs_parse_device_name(struct afs_mount_params *params,
/* lookup the cell record */
if (cellname || !params->cell) {
- cell = afs_cell_lookup(cellname, cellnamesz);
+ cell = afs_cell_lookup(cellname, cellnamesz, true);
if (IS_ERR(cell)) {
- printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
- cellname ?: "");
+ printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n",
+ cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
afs_put_cell(params->cell);
@@ -321,6 +330,9 @@ static int afs_fill_super(struct super_block *sb, void *data)
if (IS_ERR(inode))
goto error_inode;
+ if (params->autocell)
+ set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
+
ret = -ENOMEM;
root = d_alloc_root(inode);
if (!root)
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3dab9e9948d..722743b152d 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -680,7 +680,6 @@ int afs_writeback_all(struct afs_vnode *vnode)
{
struct address_space *mapping = vnode->vfs_inode.i_mapping;
struct writeback_control wbc = {
- .bdi = mapping->backing_dev_info,
.sync_mode = WB_SYNC_ALL,
.nr_to_write = LONG_MAX,
.range_cyclic = 1,
diff --git a/fs/aio.c b/fs/aio.c
index 1ccf25cef1f..3006b5bc33d 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1277,7 +1277,7 @@ out:
/* sys_io_destroy:
* Destroy the aio_context specified. May cancel any outstanding
* AIOs and block on completion. Will fail with -ENOSYS if not
- * implemented. May fail with -EFAULT if the context pointed to
+ * implemented. May fail with -EINVAL if the context pointed to
* is invalid.
*/
SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
@@ -1795,15 +1795,16 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
/* io_getevents:
* Attempts to read at least min_nr events and up to nr events from
- * the completion queue for the aio_context specified by ctx_id. May
- * fail with -EINVAL if ctx_id is invalid, if min_nr is out of range,
- * if nr is out of range, if when is out of range. May fail with
- * -EFAULT if any of the memory specified to is invalid. May return
- * 0 or < min_nr if no events are available and the timeout specified
- * by when has elapsed, where when == NULL specifies an infinite
- * timeout. Note that the timeout pointed to by when is relative and
- * will be updated if not NULL and the operation blocks. Will fail
- * with -ENOSYS if not implemented.
+ * the completion queue for the aio_context specified by ctx_id. If
+ * it succeeds, the number of read events is returned. May fail with
+ * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
+ * out of range, if timeout is out of range. May fail with -EFAULT
+ * if any of the memory specified is invalid. May return 0 or
+ * < min_nr if the timeout specified by timeout has elapsed
+ * before sufficient events are available, where timeout == NULL
+ * specifies an infinite timeout. Note that the timeout pointed to by
+ * timeout is relative and will be updated if not NULL and the
+ * operation blocks. Will fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
diff --git a/fs/attr.c b/fs/attr.c
index b4fa3b0aa59..7ca41811afa 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -14,35 +14,53 @@
#include <linux/fcntl.h>
#include <linux/security.h>
-/* Taken over from the old code... */
-
-/* POSIX UID/GID verification for setting inode attributes. */
+/**
+ * inode_change_ok - check if attribute changes to an inode are allowed
+ * @inode: inode to check
+ * @attr: attributes to change
+ *
+ * Check if we are allowed to change the attributes contained in @attr
+ * in the given inode. This includes the normal unix access permission
+ * checks, as well as checks for rlimits and others.
+ *
+ * Should be called as the first thing in ->setattr implementations,
+ * possibly after taking additional locks.
+ */
int inode_change_ok(const struct inode *inode, struct iattr *attr)
{
- int retval = -EPERM;
unsigned int ia_valid = attr->ia_valid;
+ /*
+ * First check size constraints. These can't be overriden using
+ * ATTR_FORCE.
+ */
+ if (ia_valid & ATTR_SIZE) {
+ int error = inode_newsize_ok(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
/* If force is set do it anyway. */
if (ia_valid & ATTR_FORCE)
- goto fine;
+ return 0;
/* Make sure a caller can chown. */
if ((ia_valid & ATTR_UID) &&
(current_fsuid() != inode->i_uid ||
attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN))
- goto error;
+ return -EPERM;
/* Make sure caller can chgrp. */
if ((ia_valid & ATTR_GID) &&
(current_fsuid() != inode->i_uid ||
(!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) &&
!capable(CAP_CHOWN))
- goto error;
+ return -EPERM;
/* Make sure a caller can chmod. */
if (ia_valid & ATTR_MODE) {
if (!is_owner_or_cap(inode))
- goto error;
+ return -EPERM;
/* Also check the setgid bit! */
if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
inode->i_gid) && !capable(CAP_FSETID))
@@ -52,12 +70,10 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
/* Check for setting the inode time. */
if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) {
if (!is_owner_or_cap(inode))
- goto error;
+ return -EPERM;
}
-fine:
- retval = 0;
-error:
- return retval;
+
+ return 0;
}
EXPORT_SYMBOL(inode_change_ok);
@@ -105,21 +121,21 @@ out_big:
EXPORT_SYMBOL(inode_newsize_ok);
/**
- * generic_setattr - copy simple metadata updates into the generic inode
+ * setattr_copy - copy simple metadata updates into the generic inode
* @inode: the inode to be updated
* @attr: the new attributes
*
- * generic_setattr must be called with i_mutex held.
+ * setattr_copy must be called with i_mutex held.
*
- * generic_setattr updates the inode's metadata with that specified
+ * setattr_copy updates the inode's metadata with that specified
* in attr. Noticably missing is inode size update, which is more complex
- * as it requires pagecache updates. See simple_setsize.
+ * as it requires pagecache updates.
*
* The inode is not marked as dirty after this operation. The rationale is
* that for "simple" filesystems, the struct inode is the inode storage.
* The caller is free to mark the inode dirty afterwards if needed.
*/
-void generic_setattr(struct inode *inode, const struct iattr *attr)
+void setattr_copy(struct inode *inode, const struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
@@ -144,32 +160,7 @@ void generic_setattr(struct inode *inode, const struct iattr *attr)
inode->i_mode = mode;
}
}
-EXPORT_SYMBOL(generic_setattr);
-
-/*
- * note this function is deprecated, the new truncate sequence should be
- * used instead -- see eg. simple_setsize, generic_setattr.
- */
-int inode_setattr(struct inode *inode, const struct iattr *attr)
-{
- unsigned int ia_valid = attr->ia_valid;
-
- if (ia_valid & ATTR_SIZE &&
- attr->ia_size != i_size_read(inode)) {
- int error;
-
- error = vmtruncate(inode, attr->ia_size);
- if (error)
- return error;
- }
-
- generic_setattr(inode, attr);
-
- mark_inode_dirty(inode);
-
- return 0;
-}
-EXPORT_SYMBOL(inode_setattr);
+EXPORT_SYMBOL(setattr_copy);
int notify_change(struct dentry * dentry, struct iattr * attr)
{
@@ -237,13 +228,10 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
if (ia_valid & ATTR_SIZE)
down_write(&dentry->d_inode->i_alloc_sem);
- if (inode->i_op && inode->i_op->setattr) {
+ if (inode->i_op->setattr)
error = inode->i_op->setattr(dentry, attr);
- } else {
- error = inode_change_ok(inode, attr);
- if (!error)
- error = inode_setattr(inode, attr);
- }
+ else
+ error = simple_setattr(dentry, attr);
if (ia_valid & ATTR_SIZE)
up_write(&dentry->d_inode->i_alloc_sem);
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 9a0520b5066..11b1ea786d0 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/param.h>
#include <linux/time.h>
+#include <linux/compat.h>
#include <linux/smp_lock.h>
#include "autofs_i.h"
@@ -25,13 +26,17 @@ static int autofs_root_symlink(struct inode *,struct dentry *,const char *);
static int autofs_root_unlink(struct inode *,struct dentry *);
static int autofs_root_rmdir(struct inode *,struct dentry *);
static int autofs_root_mkdir(struct inode *,struct dentry *,int);
-static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
+static long autofs_root_ioctl(struct file *,unsigned int,unsigned long);
+static long autofs_root_compat_ioctl(struct file *,unsigned int,unsigned long);
const struct file_operations autofs_root_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = autofs_root_readdir,
- .ioctl = autofs_root_ioctl,
+ .unlocked_ioctl = autofs_root_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = autofs_root_compat_ioctl,
+#endif
};
const struct inode_operations autofs_root_inode_operations = {
@@ -492,6 +497,25 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
}
/* Get/set timeout ioctl() operation */
+#ifdef CONFIG_COMPAT
+static inline int autofs_compat_get_set_timeout(struct autofs_sb_info *sbi,
+ unsigned int __user *p)
+{
+ unsigned long ntimeout;
+
+ if (get_user(ntimeout, p) ||
+ put_user(sbi->exp_timeout / HZ, p))
+ return -EFAULT;
+
+ if (ntimeout > UINT_MAX/HZ)
+ sbi->exp_timeout = 0;
+ else
+ sbi->exp_timeout = ntimeout * HZ;
+
+ return 0;
+}
+#endif
+
static inline int autofs_get_set_timeout(struct autofs_sb_info *sbi,
unsigned long __user *p)
{
@@ -546,7 +570,7 @@ static inline int autofs_expire_run(struct super_block *sb,
* ioctl()'s on the root directory is the chief method for the daemon to
* generate kernel reactions
*/
-static int autofs_root_ioctl(struct inode *inode, struct file *filp,
+static int autofs_do_root_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
@@ -571,6 +595,10 @@ static int autofs_root_ioctl(struct inode *inode, struct file *filp,
return 0;
case AUTOFS_IOC_PROTOVER: /* Get protocol version */
return autofs_get_protover(argp);
+#ifdef CONFIG_COMPAT
+ case AUTOFS_IOC_SETTIMEOUT32:
+ return autofs_compat_get_set_timeout(sbi, argp);
+#endif
case AUTOFS_IOC_SETTIMEOUT:
return autofs_get_set_timeout(sbi, argp);
case AUTOFS_IOC_EXPIRE:
@@ -579,4 +607,37 @@ static int autofs_root_ioctl(struct inode *inode, struct file *filp,
default:
return -ENOSYS;
}
+
+}
+
+static long autofs_root_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ lock_kernel();
+ ret = autofs_do_root_ioctl(filp->f_path.dentry->d_inode,
+ filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long autofs_root_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int ret;
+
+ lock_kernel();
+ if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
+ ret = autofs_do_root_ioctl(inode, filp, cmd, arg);
+ else
+ ret = autofs_do_root_ioctl(inode, filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ unlock_kernel();
+
+ return ret;
}
+#endif
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index db4117ed780..cb1bd38dc08 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -18,7 +18,9 @@
#include <linux/slab.h>
#include <linux/param.h>
#include <linux/time.h>
+#include <linux/compat.h>
#include <linux/smp_lock.h>
+
#include "autofs_i.h"
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
@@ -26,6 +28,7 @@ static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
static int autofs4_dir_mkdir(struct inode *,struct dentry *,int);
static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
+static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
static int autofs4_dir_open(struct inode *inode, struct file *file);
static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
static void *autofs4_follow_link(struct dentry *, struct nameidata *);
@@ -40,6 +43,9 @@ const struct file_operations autofs4_root_operations = {
.readdir = dcache_readdir,
.llseek = dcache_dir_lseek,
.unlocked_ioctl = autofs4_root_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = autofs4_root_compat_ioctl,
+#endif
};
const struct file_operations autofs4_dir_operations = {
@@ -198,8 +204,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
}
/* Initialize expiry counter after successful mount */
- if (ino)
- ino->last_used = jiffies;
+ ino->last_used = jiffies;
spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_PENDING;
@@ -840,6 +845,26 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
}
/* Get/set timeout ioctl() operation */
+#ifdef CONFIG_COMPAT
+static inline int autofs4_compat_get_set_timeout(struct autofs_sb_info *sbi,
+ compat_ulong_t __user *p)
+{
+ int rv;
+ unsigned long ntimeout;
+
+ if ((rv = get_user(ntimeout, p)) ||
+ (rv = put_user(sbi->exp_timeout/HZ, p)))
+ return rv;
+
+ if (ntimeout > UINT_MAX/HZ)
+ sbi->exp_timeout = 0;
+ else
+ sbi->exp_timeout = ntimeout * HZ;
+
+ return 0;
+}
+#endif
+
static inline int autofs4_get_set_timeout(struct autofs_sb_info *sbi,
unsigned long __user *p)
{
@@ -933,6 +958,10 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
return autofs4_get_protosubver(sbi, p);
case AUTOFS_IOC_SETTIMEOUT:
return autofs4_get_set_timeout(sbi, p);
+#ifdef CONFIG_COMPAT
+ case AUTOFS_IOC_SETTIMEOUT32:
+ return autofs4_compat_get_set_timeout(sbi, p);
+#endif
case AUTOFS_IOC_ASKUMOUNT:
return autofs4_ask_umount(filp->f_path.mnt, p);
@@ -961,3 +990,22 @@ static long autofs4_root_ioctl(struct file *filp,
return ret;
}
+
+#ifdef CONFIG_COMPAT
+static long autofs4_root_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ int ret;
+
+ lock_kernel();
+ if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
+ ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
+ else
+ ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ unlock_kernel();
+
+ return ret;
+}
+#endif
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 52e59bf4aa5..f024d8aadde 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -55,12 +55,6 @@ static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
return POLLERR;
}
-static int bad_file_ioctl (struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- return -EIO;
-}
-
static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
@@ -159,7 +153,6 @@ static const struct file_operations bad_file_ops =
.aio_write = bad_file_aio_write,
.readdir = bad_file_readdir,
.poll = bad_file_poll,
- .ioctl = bad_file_ioctl,
.unlocked_ioctl = bad_file_unlocked_ioctl,
.compat_ioctl = bad_file_compat_ioctl,
.mmap = bad_file_mmap,
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 34ddda888e6..dc39d282488 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -436,7 +436,7 @@ befs_init_inodecache(void)
init_once);
if (befs_inode_cachep == NULL) {
printk(KERN_ERR "befs_init_inodecache: "
- "Couldn't initalize inode slabcache\n");
+ "Couldn't initialize inode slabcache\n");
return -ENOMEM;
}
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
index 7109e451abf..f7f87e233dd 100644
--- a/fs/bfs/bfs.h
+++ b/fs/bfs/bfs.h
@@ -17,7 +17,6 @@ struct bfs_sb_info {
unsigned long si_lf_eblk;
unsigned long si_lasti;
unsigned long *si_imap;
- struct buffer_head *si_sbh; /* buffer header w/superblock */
struct mutex bfs_lock;
};
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index 88b9a3ff44e..eb67edd0f8e 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -70,7 +70,6 @@ static int bfs_get_block(struct inode *inode, sector_t block,
struct super_block *sb = inode->i_sb;
struct bfs_sb_info *info = BFS_SB(sb);
struct bfs_inode_info *bi = BFS_I(inode);
- struct buffer_head *sbh = info->si_sbh;
phys = bi->i_sblock + block;
if (!create) {
@@ -112,7 +111,6 @@ static int bfs_get_block(struct inode *inode, sector_t block,
info->si_freeb -= phys - bi->i_eblock;
info->si_lf_eblk = bi->i_eblock = phys;
mark_inode_dirty(inode);
- mark_buffer_dirty(sbh);
err = 0;
goto out;
}
@@ -147,7 +145,6 @@ static int bfs_get_block(struct inode *inode, sector_t block,
*/
info->si_freeb -= bi->i_eblock - bi->i_sblock + 1 - inode->i_blocks;
mark_inode_dirty(inode);
- mark_buffer_dirty(sbh);
map_bh(bh_result, sb, phys);
out:
mutex_unlock(&info->bfs_lock);
@@ -168,9 +165,17 @@ static int bfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags,
- pagep, fsdata, bfs_get_block);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep,
+ bfs_get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index f22a7d3dc36..c4daf0f5fc0 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -31,7 +31,6 @@ MODULE_LICENSE("GPL");
#define dprintf(x...)
#endif
-static void bfs_write_super(struct super_block *s);
void dump_imap(const char *prefix, struct super_block *s);
struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
@@ -99,6 +98,24 @@ error:
return ERR_PTR(-EIO);
}
+static struct bfs_inode *find_inode(struct super_block *sb, u16 ino, struct buffer_head **p)
+{
+ if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(sb)->si_lasti)) {
+ printf("Bad inode number %s:%08x\n", sb->s_id, ino);
+ return ERR_PTR(-EIO);
+ }
+
+ ino -= BFS_ROOT_INO;
+
+ *p = sb_bread(sb, 1 + ino / BFS_INODES_PER_BLOCK);
+ if (!*p) {
+ printf("Unable to read inode %s:%08x\n", sb->s_id, ino);
+ return ERR_PTR(-EIO);
+ }
+
+ return (struct bfs_inode *)(*p)->b_data + ino % BFS_INODES_PER_BLOCK;
+}
+
static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct bfs_sb_info *info = BFS_SB(inode->i_sb);
@@ -106,28 +123,15 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
unsigned long i_sblock;
struct bfs_inode *di;
struct buffer_head *bh;
- int block, off;
int err = 0;
dprintf("ino=%08x\n", ino);
- if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
- printf("Bad inode number %s:%08x\n", inode->i_sb->s_id, ino);
- return -EIO;
- }
+ di = find_inode(inode->i_sb, ino, &bh);
+ if (IS_ERR(di))
+ return PTR_ERR(di);
mutex_lock(&info->bfs_lock);
- block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
- bh = sb_bread(inode->i_sb, block);
- if (!bh) {
- printf("Unable to read inode %s:%08x\n",
- inode->i_sb->s_id, ino);
- mutex_unlock(&info->bfs_lock);
- return -EIO;
- }
-
- off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
- di = (struct bfs_inode *)bh->b_data + off;
if (ino == BFS_ROOT_INO)
di->i_vtype = cpu_to_le32(BFS_VDIR);
@@ -158,12 +162,11 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-static void bfs_delete_inode(struct inode *inode)
+static void bfs_evict_inode(struct inode *inode)
{
unsigned long ino = inode->i_ino;
struct bfs_inode *di;
struct buffer_head *bh;
- int block, off;
struct super_block *s = inode->i_sb;
struct bfs_sb_info *info = BFS_SB(s);
struct bfs_inode_info *bi = BFS_I(inode);
@@ -171,28 +174,19 @@ static void bfs_delete_inode(struct inode *inode)
dprintf("ino=%08lx\n", ino);
truncate_inode_pages(&inode->i_data, 0);
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
- if ((ino < BFS_ROOT_INO) || (ino > info->si_lasti)) {
- printf("invalid ino=%08lx\n", ino);
+ if (inode->i_nlink)
return;
- }
-
- inode->i_size = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
- mutex_lock(&info->bfs_lock);
- mark_inode_dirty(inode);
- block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
- bh = sb_bread(s, block);
- if (!bh) {
- printf("Unable to read inode %s:%08lx\n",
- inode->i_sb->s_id, ino);
- mutex_unlock(&info->bfs_lock);
+ di = find_inode(s, inode->i_ino, &bh);
+ if (IS_ERR(di))
return;
- }
- off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
- di = (struct bfs_inode *)bh->b_data + off;
- memset((void *)di, 0, sizeof(struct bfs_inode));
+
+ mutex_lock(&info->bfs_lock);
+ /* clear on-disk inode */
+ memset(di, 0, sizeof(struct bfs_inode));
mark_buffer_dirty(bh);
brelse(bh);
@@ -209,32 +203,9 @@ static void bfs_delete_inode(struct inode *inode)
* "last block of the last file" even if there is no
* real file there, saves us 1 gap.
*/
- if (info->si_lf_eblk == bi->i_eblock) {
+ if (info->si_lf_eblk == bi->i_eblock)
info->si_lf_eblk = bi->i_sblock - 1;
- mark_buffer_dirty(info->si_sbh);
- }
mutex_unlock(&info->bfs_lock);
- clear_inode(inode);
-}
-
-static int bfs_sync_fs(struct super_block *sb, int wait)
-{
- struct bfs_sb_info *info = BFS_SB(sb);
-
- mutex_lock(&info->bfs_lock);
- mark_buffer_dirty(info->si_sbh);
- sb->s_dirt = 0;
- mutex_unlock(&info->bfs_lock);
-
- return 0;
-}
-
-static void bfs_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- bfs_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
}
static void bfs_put_super(struct super_block *s)
@@ -246,10 +217,6 @@ static void bfs_put_super(struct super_block *s)
lock_kernel();
- if (s->s_dirt)
- bfs_write_super(s);
-
- brelse(info->si_sbh);
mutex_destroy(&info->bfs_lock);
kfree(info->si_imap);
kfree(info);
@@ -319,10 +286,8 @@ static const struct super_operations bfs_sops = {
.alloc_inode = bfs_alloc_inode,
.destroy_inode = bfs_destroy_inode,
.write_inode = bfs_write_inode,
- .delete_inode = bfs_delete_inode,
+ .evict_inode = bfs_evict_inode,
.put_super = bfs_put_super,
- .write_super = bfs_write_super,
- .sync_fs = bfs_sync_fs,
.statfs = bfs_statfs,
};
@@ -349,7 +314,7 @@ void dump_imap(const char *prefix, struct super_block *s)
static int bfs_fill_super(struct super_block *s, void *data, int silent)
{
- struct buffer_head *bh;
+ struct buffer_head *bh, *sbh;
struct bfs_super_block *bfs_sb;
struct inode *inode;
unsigned i, imap_len;
@@ -365,10 +330,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
sb_set_blocksize(s, BFS_BSIZE);
- info->si_sbh = sb_bread(s, 0);
- if (!info->si_sbh)
+ sbh = sb_bread(s, 0);
+ if (!sbh)
goto out;
- bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
+ bfs_sb = (struct bfs_super_block *)sbh->b_data;
if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
if (!silent)
printf("No BFS filesystem on %s (magic=%08x)\n",
@@ -472,10 +437,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
info->si_lf_eblk = eblock;
}
brelse(bh);
- if (!(s->s_flags & MS_RDONLY)) {
- mark_buffer_dirty(info->si_sbh);
- s->s_dirt = 1;
- }
+ brelse(sbh);
dump_imap("read_super", s);
return 0;
@@ -485,7 +447,7 @@ out3:
out2:
kfree(info->si_imap);
out1:
- brelse(info->si_sbh);
+ brelse(sbh);
out:
mutex_destroy(&info->bfs_lock);
kfree(info);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 2c5f9a0e5d7..63039ed9576 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -990,10 +990,9 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
/* clear any space allocated but not loaded */
if (phdr->p_filesz < phdr->p_memsz) {
- ret = clear_user((void *) (seg->addr + phdr->p_filesz),
- phdr->p_memsz - phdr->p_filesz);
- if (ret)
- return ret;
+ if (clear_user((void *) (seg->addr + phdr->p_filesz),
+ phdr->p_memsz - phdr->p_filesz))
+ return -EFAULT;
}
if (mm) {
@@ -1027,7 +1026,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
struct elf32_fdpic_loadseg *seg;
struct elf32_phdr *phdr;
unsigned long load_addr, delta_vaddr;
- int loop, dvset, ret;
+ int loop, dvset;
load_addr = params->load_addr;
delta_vaddr = 0;
@@ -1127,9 +1126,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
* PT_LOAD */
if (prot & PROT_WRITE && disp > 0) {
kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
- ret = clear_user((void __user *) maddr, disp);
- if (ret)
- return ret;
+ if (clear_user((void __user *) maddr, disp))
+ return -EFAULT;
maddr += disp;
}
@@ -1164,19 +1162,17 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
if (prot & PROT_WRITE && excess1 > 0) {
kdebug("clear[%d] ad=%lx sz=%lx",
loop, maddr + phdr->p_filesz, excess1);
- ret = clear_user((void __user *) maddr + phdr->p_filesz,
- excess1);
- if (ret)
- return ret;
+ if (clear_user((void __user *) maddr + phdr->p_filesz,
+ excess1))
+ return -EFAULT;
}
#else
if (excess > 0) {
kdebug("clear[%d] ad=%lx sz=%lx",
loop, maddr + phdr->p_filesz, excess);
- ret = clear_user((void *) maddr + phdr->p_filesz, excess);
- if (ret)
- return ret;
+ if (clear_user((void *) maddr + phdr->p_filesz, excess))
+ return -EFAULT;
}
#endif
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 49566c1687d..811384bec8d 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -56,16 +56,19 @@
#endif
/*
- * User data (stack, data section and bss) needs to be aligned
- * for the same reasons as SLAB memory is, and to the same amount.
- * Avoid duplicating architecture specific code by using the same
- * macro as with SLAB allocation:
+ * User data (data section and bss) needs to be aligned.
+ * We pick 0x20 here because it is the max value elf2flt has always
+ * used in producing FLAT files, and because it seems to be large
+ * enough to make all the gcc alignment related tests happy.
*/
-#ifdef ARCH_SLAB_MINALIGN
-#define FLAT_DATA_ALIGN (ARCH_SLAB_MINALIGN)
-#else
-#define FLAT_DATA_ALIGN (sizeof(void *))
-#endif
+#define FLAT_DATA_ALIGN (0x20)
+
+/*
+ * User data (stack) also needs to be aligned.
+ * Here we can be a bit looser than the data sections since this
+ * needs to only meet arch ABI requirements.
+ */
+#define FLAT_STACK_ALIGN max_t(unsigned long, sizeof(void *), ARCH_SLAB_MINALIGN)
#define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */
#define UNLOADED_LIB 0x7ff000ff /* Placeholder for unused library */
@@ -129,7 +132,7 @@ static unsigned long create_flat_tables(
sp = (unsigned long *)p;
sp -= (envc + argc + 2) + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
- sp = (unsigned long *) ((unsigned long)sp & -FLAT_DATA_ALIGN);
+ sp = (unsigned long *) ((unsigned long)sp & -FLAT_STACK_ALIGN);
argv = sp + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
envp = argv + (argc + 1);
@@ -589,7 +592,7 @@ static int load_flat_file(struct linux_binprm * bprm,
if (IS_ERR_VALUE(result)) {
printk("Unable to read data+bss, errno %d\n", (int)-result);
do_munmap(current->mm, textpos, text_len);
- do_munmap(current->mm, realdatastart, data_len + extra);
+ do_munmap(current->mm, realdatastart, len);
ret = result;
goto err;
}
@@ -876,7 +879,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs)
stack_len = TOP_OF_ARGS - bprm->p; /* the strings */
stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
- stack_len += FLAT_DATA_ALIGN - 1; /* reserve for upcoming alignment */
+ stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */
res = load_flat_file(bprm, &libinfo, 0, &stack_len);
if (IS_ERR_VALUE(res))
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index c4e83537ead..9e60fd20171 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -502,8 +502,9 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
return inode;
}
-static void bm_clear_inode(struct inode *inode)
+static void bm_evict_inode(struct inode *inode)
{
+ end_writeback(inode);
kfree(inode->i_private);
}
@@ -685,7 +686,7 @@ static const struct file_operations bm_status_operations = {
static const struct super_operations s_ops = {
.statfs = simple_statfs,
- .clear_inode = bm_clear_inode,
+ .evict_inode = bm_evict_inode,
};
static int bm_fill_super(struct super_block * sb, void * data, int silent)
diff --git a/fs/bio.c b/fs/bio.c
index e7bf6ca64dc..8abb2dfb2e7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (!bio)
goto out_bmd;
- bio->bi_rw |= (!write_to_vm << BIO_RW);
+ if (!write_to_vm)
+ bio->bi_rw |= REQ_WRITE;
ret = 0;
@@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
* set data direction, and check if mapped pages need bouncing
*/
if (!write_to_vm)
- bio->bi_rw |= (1 << BIO_RW);
+ bio->bi_rw |= REQ_WRITE;
bio->bi_bdev = bdev;
bio->bi_flags |= (1 << BIO_USER_MAPPED);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7346c96308a..50e8c8582fa 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -172,9 +172,8 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode,
- I_BDEV(inode), iov, offset, nr_segs,
- blkdev_get_blocks, NULL);
+ return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
+ nr_segs, blkdev_get_blocks, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)
@@ -309,9 +308,8 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return block_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, blkdev_get_block);
+ return block_write_begin(mapping, pos, len, flags, pagep,
+ blkdev_get_block);
}
static int blkdev_write_end(struct file *file, struct address_space *mapping,
@@ -428,10 +426,13 @@ static inline void __bd_forget(struct inode *inode)
inode->i_mapping = &inode->i_data;
}
-static void bdev_clear_inode(struct inode *inode)
+static void bdev_evict_inode(struct inode *inode)
{
struct block_device *bdev = &BDEV_I(inode)->bdev;
struct list_head *p;
+ truncate_inode_pages(&inode->i_data, 0);
+ invalidate_inode_buffers(inode); /* is it needed here? */
+ end_writeback(inode);
spin_lock(&bdev_lock);
while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
__bd_forget(list_entry(p, struct inode, i_devices));
@@ -445,7 +446,7 @@ static const struct super_operations bdev_sops = {
.alloc_inode = bdev_alloc_inode,
.destroy_inode = bdev_destroy_inode,
.drop_inode = generic_delete_inode,
- .clear_inode = bdev_clear_inode,
+ .evict_inode = bdev_evict_inode,
};
static int bd_get_sb(struct file_system_type *fs_type,
@@ -681,8 +682,8 @@ retry:
if (!bd_may_claim(bdev, whole, holder))
return -EBUSY;
- /* if someone else is claiming, wait for it to finish */
- if (whole->bd_claiming && whole->bd_claiming != holder) {
+ /* if claiming is already in progress, wait for it to finish */
+ if (whole->bd_claiming) {
wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
DEFINE_WAIT(wait);
@@ -706,8 +707,13 @@ retry:
* @bdev is about to be opened exclusively. Check @bdev can be opened
* exclusively and mark that an exclusive open is in progress. Each
* successful call to this function must be matched with a call to
- * either bd_claim() or bd_abort_claiming(). If this function
- * succeeds, the matching bd_claim() is guaranteed to succeed.
+ * either bd_finish_claiming() or bd_abort_claiming() (which do not
+ * fail).
+ *
+ * This function is used to gain exclusive access to the block device
+ * without actually causing other exclusive open attempts to fail. It
+ * should be used when the open sequence itself requires exclusive
+ * access but may subsequently fail.
*
* CONTEXT:
* Might sleep.
@@ -734,6 +740,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
return ERR_PTR(-ENXIO);
whole = bdget_disk(disk, 0);
+ module_put(disk->fops->owner);
put_disk(disk);
if (!whole)
return ERR_PTR(-ENOMEM);
@@ -782,15 +789,46 @@ static void bd_abort_claiming(struct block_device *whole, void *holder)
__bd_abort_claiming(whole, holder); /* releases bdev_lock */
}
+/* increment holders when we have a legitimate claim. requires bdev_lock */
+static void __bd_claim(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ /* note that for a whole device bd_holders
+ * will be incremented twice, and bd_holder will
+ * be set to bd_claim before being set to holder
+ */
+ whole->bd_holders++;
+ whole->bd_holder = bd_claim;
+ bdev->bd_holders++;
+ bdev->bd_holder = holder;
+}
+
+/**
+ * bd_finish_claiming - finish claiming a block device
+ * @bdev: block device of interest (passed to bd_start_claiming())
+ * @whole: whole block device returned by bd_start_claiming()
+ * @holder: holder trying to claim @bdev
+ *
+ * Finish a claiming block started by bd_start_claiming().
+ *
+ * CONTEXT:
+ * Grabs and releases bdev_lock.
+ */
+static void bd_finish_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder)
+{
+ spin_lock(&bdev_lock);
+ BUG_ON(!bd_may_claim(bdev, whole, holder));
+ __bd_claim(bdev, whole, holder);
+ __bd_abort_claiming(whole, holder); /* not actually an abort */
+}
+
/**
* bd_claim - claim a block device
* @bdev: block device to claim
* @holder: holder trying to claim @bdev
*
- * Try to claim @bdev which must have been opened successfully. This
- * function may be called with or without preceding
- * blk_start_claiming(). In the former case, this function is always
- * successful and terminates the claiming block.
+ * Try to claim @bdev which must have been opened successfully.
*
* CONTEXT:
* Might sleep.
@@ -806,23 +844,10 @@ int bd_claim(struct block_device *bdev, void *holder)
might_sleep();
spin_lock(&bdev_lock);
-
res = bd_prepare_to_claim(bdev, whole, holder);
- if (res == 0) {
- /* note that for a whole device bd_holders
- * will be incremented twice, and bd_holder will
- * be set to bd_claim before being set to holder
- */
- whole->bd_holders++;
- whole->bd_holder = bd_claim;
- bdev->bd_holders++;
- bdev->bd_holder = holder;
- }
-
- if (whole->bd_claiming)
- __bd_abort_claiming(whole, holder); /* releases bdev_lock */
- else
- spin_unlock(&bdev_lock);
+ if (res == 0)
+ __bd_claim(bdev, whole, holder);
+ spin_unlock(&bdev_lock);
return res;
}
@@ -1315,19 +1340,20 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
/*
* hooks: /n/, see "layering violations".
*/
- ret = devcgroup_inode_permission(bdev->bd_inode, perm);
- if (ret != 0) {
- bdput(bdev);
- return ret;
+ if (!for_part) {
+ ret = devcgroup_inode_permission(bdev->bd_inode, perm);
+ if (ret != 0) {
+ bdput(bdev);
+ return ret;
+ }
}
- lock_kernel();
restart:
ret = -ENXIO;
disk = get_gendisk(bdev->bd_dev, &partno);
if (!disk)
- goto out_unlock_kernel;
+ goto out;
mutex_lock_nested(&bdev->bd_mutex, for_part);
if (!bdev->bd_openers) {
@@ -1407,7 +1433,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (for_part)
bdev->bd_part_count++;
mutex_unlock(&bdev->bd_mutex);
- unlock_kernel();
return 0;
out_clear:
@@ -1420,9 +1445,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_contains = NULL;
out_unlock_bdev:
mutex_unlock(&bdev->bd_mutex);
- out_unlock_kernel:
- unlock_kernel();
-
+ out:
if (disk)
module_put(disk->fops->owner);
put_disk(disk);
@@ -1476,7 +1499,7 @@ static int blkdev_open(struct inode * inode, struct file * filp)
if (whole) {
if (res == 0)
- BUG_ON(bd_claim(bdev, filp) != 0);
+ bd_finish_claiming(bdev, whole, filp);
else
bd_abort_claiming(whole, filp);
}
@@ -1491,7 +1514,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
struct block_device *victim = NULL;
mutex_lock_nested(&bdev->bd_mutex, for_part);
- lock_kernel();
if (for_part)
bdev->bd_part_count--;
@@ -1516,7 +1538,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
victim = bdev->bd_contains;
bdev->bd_contains = NULL;
}
- unlock_kernel();
mutex_unlock(&bdev->bd_mutex);
bdput(bdev);
if (victim)
@@ -1712,7 +1733,7 @@ struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *h
if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
goto out_blkdev_put;
- BUG_ON(bd_claim(bdev, holder) != 0);
+ bd_finish_claiming(bdev, whole, holder);
return bdev;
out_blkdev_put:
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 8d432cd9d58..2222d161c7b 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -60,6 +60,8 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
size = __btrfs_getxattr(inode, name, value, size);
if (size > 0) {
acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return acl;
set_cached_acl(inode, type, acl);
}
kfree(value);
@@ -160,6 +162,12 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
int ret;
struct posix_acl *acl = NULL;
+ if (!is_owner_or_cap(dentry->d_inode))
+ return -EPERM;
+
+ if (!IS_POSIXACL(dentry->d_inode))
+ return -EOPNOTSUPP;
+
if (value) {
acl = posix_acl_from_xattr(value, size);
if (acl == NULL) {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0d1d966b0fe..c3df14ce2cc 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2304,12 +2304,17 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
return ret;
}
+/*
+ * min slot controls the lowest index we're willing to push to the
+ * right. We'll push up to and including min_slot, but no lower
+ */
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
int data_size, int empty,
struct extent_buffer *right,
- int free_space, u32 left_nritems)
+ int free_space, u32 left_nritems,
+ u32 min_slot)
{
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
@@ -2327,7 +2332,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (empty)
nr = 0;
else
- nr = 1;
+ nr = max_t(u32, 1, min_slot);
if (path->slots[0] >= left_nritems)
push_space += data_size;
@@ -2469,10 +2474,14 @@ out_unlock:
*
* returns 1 if the push failed because the other node didn't have enough
* room, 0 if everything worked out and < 0 if there were major errors.
+ *
+ * this will push starting from min_slot to the end of the leaf. It won't
+ * push any slot lower than min_slot
*/
static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_path *path, int data_size,
- int empty)
+ *root, struct btrfs_path *path,
+ int min_data_size, int data_size,
+ int empty, u32 min_slot)
{
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
@@ -2514,8 +2523,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (left_nritems == 0)
goto out_unlock;
- return __push_leaf_right(trans, root, path, data_size, empty,
- right, free_space, left_nritems);
+ return __push_leaf_right(trans, root, path, min_data_size, empty,
+ right, free_space, left_nritems, min_slot);
out_unlock:
btrfs_tree_unlock(right);
free_extent_buffer(right);
@@ -2525,12 +2534,17 @@ out_unlock:
/*
* push some data in the path leaf to the left, trying to free up at
* least data_size bytes. returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items. The
+ * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
+ * items
*/
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int data_size,
int empty, struct extent_buffer *left,
- int free_space, int right_nritems)
+ int free_space, u32 right_nritems,
+ u32 max_slot)
{
struct btrfs_disk_key disk_key;
struct extent_buffer *right = path->nodes[0];
@@ -2549,9 +2563,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
slot = path->slots[1];
if (empty)
- nr = right_nritems;
+ nr = min(right_nritems, max_slot);
else
- nr = right_nritems - 1;
+ nr = min(right_nritems - 1, max_slot);
for (i = 0; i < nr; i++) {
item = btrfs_item_nr(right, i);
@@ -2712,10 +2726,14 @@ out:
/*
* push some data in the path leaf to the left, trying to free up at
* least data_size bytes. returns zero if the push worked, nonzero otherwise
+ *
+ * max_slot can put a limit on how far into the leaf we'll push items. The
+ * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
+ * items
*/
static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
- *root, struct btrfs_path *path, int data_size,
- int empty)
+ *root, struct btrfs_path *path, int min_data_size,
+ int data_size, int empty, u32 max_slot)
{
struct extent_buffer *right = path->nodes[0];
struct extent_buffer *left;
@@ -2761,8 +2779,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
- return __push_leaf_left(trans, root, path, data_size,
- empty, left, free_space, right_nritems);
+ return __push_leaf_left(trans, root, path, min_data_size,
+ empty, left, free_space, right_nritems,
+ max_slot);
out:
btrfs_tree_unlock(left);
free_extent_buffer(left);
@@ -2855,6 +2874,64 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
}
/*
+ * double splits happen when we need to insert a big item in the middle
+ * of a leaf. A double split can leave us with 3 mostly empty leaves:
+ * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
+ * A B C
+ *
+ * We avoid this by trying to push the items on either side of our target
+ * into the adjacent leaves. If all goes well we can avoid the double split
+ * completely.
+ */
+static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ int data_size)
+{
+ int ret;
+ int progress = 0;
+ int slot;
+ u32 nritems;
+
+ slot = path->slots[0];
+
+ /*
+ * try to push all the items after our slot into the
+ * right leaf
+ */
+ ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ progress++;
+
+ nritems = btrfs_header_nritems(path->nodes[0]);
+ /*
+ * our goal is to get our slot at the start or end of a leaf. If
+ * we've done so we're done
+ */
+ if (path->slots[0] == 0 || path->slots[0] == nritems)
+ return 0;
+
+ if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ return 0;
+
+ /* try to push all the items before our slot into the next leaf */
+ slot = path->slots[0];
+ ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ progress++;
+
+ if (progress)
+ return 0;
+ return 1;
+}
+
+/*
* split the path's leaf in two, making sure there is at least data_size
* available for the resulting leaf level of the path.
*
@@ -2876,6 +2953,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
int wret;
int split;
int num_doubles = 0;
+ int tried_avoid_double = 0;
l = path->nodes[0];
slot = path->slots[0];
@@ -2884,12 +2962,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
return -EOVERFLOW;
/* first try to make some room by pushing left and right */
- if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
- wret = push_leaf_right(trans, root, path, data_size, 0);
+ if (data_size) {
+ wret = push_leaf_right(trans, root, path, data_size,
+ data_size, 0, 0);
if (wret < 0)
return wret;
if (wret) {
- wret = push_leaf_left(trans, root, path, data_size, 0);
+ wret = push_leaf_left(trans, root, path, data_size,
+ data_size, 0, (u32)-1);
if (wret < 0)
return wret;
}
@@ -2923,6 +3003,8 @@ again:
if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) +
data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ if (data_size && !tried_avoid_double)
+ goto push_for_double;
split = 2;
}
}
@@ -2939,6 +3021,8 @@ again:
if (mid != nritems &&
leaf_space_used(l, mid, nritems - mid) +
data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+ if (data_size && !tried_avoid_double)
+ goto push_for_double;
split = 2 ;
}
}
@@ -3019,6 +3103,13 @@ again:
}
return ret;
+
+push_for_double:
+ push_for_double_split(trans, root, path, data_size);
+ tried_avoid_double = 1;
+ if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+ return 0;
+ goto again;
}
static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
@@ -3915,13 +4006,15 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
extent_buffer_get(leaf);
btrfs_set_path_blocking(path);
- wret = push_leaf_left(trans, root, path, 1, 1);
+ wret = push_leaf_left(trans, root, path, 1, 1,
+ 1, (u32)-1);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (path->nodes[0] == leaf &&
btrfs_header_nritems(leaf)) {
- wret = push_leaf_right(trans, root, path, 1, 1);
+ wret = push_leaf_right(trans, root, path, 1,
+ 1, 1, 0);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29c20092847..eaf286abad1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2389,13 +2389,13 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
pgoff_t offset, pgoff_t last_index);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
-void btrfs_delete_inode(struct inode *inode);
+void btrfs_evict_inode(struct inode *inode);
void btrfs_put_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
void btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
-void btrfs_drop_inode(struct inode *inode);
+int btrfs_drop_inode(struct inode *inode);
int btrfs_init_cachep(void);
void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f3b287c22ca..64f10082f04 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
end_io_wq->work.func = end_workqueue_fn;
end_io_wq->work.flags = 0;
- if (bio->bi_rw & (1 << BIO_RW)) {
+ if (bio->bi_rw & REQ_WRITE) {
if (end_io_wq->metadata)
btrfs_queue_worker(&fs_info->endio_meta_write_workers,
&end_io_wq->work);
@@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
atomic_inc(&fs_info->nr_async_submits);
- if (rw & (1 << BIO_RW_SYNCIO))
+ if (rw & REQ_SYNC)
btrfs_set_work_high_prio(&async->work);
btrfs_queue_worker(&fs_info->workers, &async->work);
@@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
bio, 1);
BUG_ON(ret);
- if (!(rw & (1 << BIO_RW))) {
+ if (!(rw & REQ_WRITE)) {
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
@@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
* ram and up to date before trying to verify things. For
* blocksize <= pagesize, it is basically a noop
*/
- if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
+ if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
!bio_ready_for_csum(bio)) {
btrfs_queue_worker(&fs_info->endio_meta_workers,
&end_io_wq->work);
@@ -1941,8 +1941,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_level_size(tree_root,
btrfs_super_log_root_level(disk_super));
- log_tree_root = kzalloc(sizeof(struct btrfs_root),
- GFP_NOFS);
+ log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+ if (!log_tree_root) {
+ err = -ENOMEM;
+ goto fail_trans_kthread;
+ }
__setup_root(nodesize, leafsize, sectorsize, stripesize,
log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
@@ -1982,6 +1985,10 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
goto fail_trans_kthread;
+ if (IS_ERR(fs_info->fs_root)) {
+ err = PTR_ERR(fs_info->fs_root);
+ goto fail_trans_kthread;
+ }
if (!(sb->s_flags & MS_RDONLY)) {
down_read(&fs_info->cleanup_work_sem);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b9080d71991..32d094002a5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4360,7 +4360,8 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
cache = btrfs_lookup_block_group(root->fs_info, buf->start);
- BUG_ON(block_rsv->space_info != cache->space_info);
+ if (block_rsv->space_info != cache->space_info)
+ goto out;
if (btrfs_header_generation(buf) == trans->transid) {
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a4080c21ec5..d74e6af9b53 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2594,7 +2594,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
struct writeback_control wbc_writepages = {
- .bdi = wbc->bdi,
.sync_mode = wbc->sync_mode,
.older_than_this = NULL,
.nr_to_write = 64,
@@ -2628,7 +2627,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
.sync_io = mode == WB_SYNC_ALL,
};
struct writeback_control wbc_writepages = {
- .bdi = inode->i_mapping->backing_dev_info,
.sync_mode = mode,
.older_than_this = NULL,
.nr_to_write = nr_pages * 2,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 787b50a16a1..e354c33df08 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1140,7 +1140,7 @@ int btrfs_sync_file(struct file *file, int datasync)
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
- if (file && file->private_data)
+ if (file->private_data)
btrfs_ioctl_trans_end(file);
trans = btrfs_start_transaction(root, 0);
@@ -1190,14 +1190,22 @@ static const struct vm_operations_struct btrfs_file_vm_ops = {
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
- vma->vm_ops = &btrfs_file_vm_ops;
+ struct address_space *mapping = filp->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
+
file_accessed(filp);
+ vma->vm_ops = &btrfs_file_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+
return 0;
}
const struct file_operations btrfs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
+ .write = do_sync_write,
.aio_read = generic_file_aio_read,
.splice_read = generic_file_splice_read,
.aio_write = btrfs_file_aio_write,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fa6ccc1bfe2..c03864406af 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret);
- if (!(rw & (1 << BIO_RW))) {
+ if (!(rw & REQ_WRITE)) {
if (bio_flags & EXTENT_BIO_COMPRESSED) {
return btrfs_submit_compressed_read(inode, bio,
mirror_num, bio_flags);
@@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
bio->bi_size = 0;
bio_add_page(bio, page, failrec->len, start - page_offset(page));
- if (failed_bio->bi_rw & (1 << BIO_RW))
+ if (failed_bio->bi_rw & REQ_WRITE)
rw = WRITE;
else
rw = READ;
@@ -2673,7 +2673,7 @@ static int check_path_shared(struct btrfs_root *root,
struct extent_buffer *eb;
int level;
int ret;
- u64 refs;
+ u64 refs = 1;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
if (!path->nodes[level])
@@ -2938,7 +2938,6 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, dir);
BUG_ON(ret);
- dir->i_sb->s_dirt = 1;
btrfs_free_path(path);
return 0;
@@ -3656,17 +3655,19 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
}
- attr->ia_valid &= ~ATTR_SIZE;
- if (attr->ia_valid)
- err = inode_setattr(inode, attr);
+ if (attr->ia_valid) {
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ if (attr->ia_valid & ATTR_MODE)
+ err = btrfs_acl_chmod(inode);
+ }
- if (!err && ((attr->ia_valid & ATTR_MODE)))
- err = btrfs_acl_chmod(inode);
return err;
}
-void btrfs_delete_inode(struct inode *inode)
+void btrfs_evict_inode(struct inode *inode)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3674,10 +3675,14 @@ void btrfs_delete_inode(struct inode *inode)
int ret;
truncate_inode_pages(&inode->i_data, 0);
+ if (inode->i_nlink && btrfs_root_refs(&root->root_item) != 0)
+ goto no_delete;
+
if (is_bad_inode(inode)) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
}
+ /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
@@ -3727,7 +3732,7 @@ void btrfs_delete_inode(struct inode *inode)
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
- clear_inode(inode);
+ end_writeback(inode);
return;
}
@@ -3858,7 +3863,7 @@ again:
p = &parent->rb_right;
else {
WARN_ON(!(entry->vfs_inode.i_state &
- (I_WILL_FREE | I_FREEING | I_CLEAR)));
+ (I_WILL_FREE | I_FREEING)));
rb_erase(parent, &root->inode_tree);
RB_CLEAR_NODE(parent);
spin_unlock(&root->inode_lock);
@@ -3937,7 +3942,7 @@ again:
if (atomic_read(&inode->i_count) > 1)
d_prune_aliases(inode);
/*
- * btrfs_drop_inode will remove it from
+ * btrfs_drop_inode will have it removed from
* the inode cache when its usage count
* hits zero.
*/
@@ -5642,7 +5647,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
struct bio_vec *bvec = bio->bi_io_vec;
u64 start;
int skip_sum;
- int write = rw & (1 << BIO_RW);
+ int write = rw & REQ_WRITE;
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -6331,13 +6336,14 @@ free:
kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
}
-void btrfs_drop_inode(struct inode *inode)
+int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
- generic_delete_inode(inode);
+
+ if (btrfs_root_refs(&root->root_item) == 0)
+ return 1;
else
- generic_drop_inode(inode);
+ return generic_drop_inode(inode);
}
static void init_once(void *foo)
@@ -6884,7 +6890,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
if (em->block_start == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
- ret = btrfs_prealloc_file_range(inode, 0, cur_offset,
+ ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
last_byte - cur_offset,
1 << inode->i_blkbits,
offset + len,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4cdb98cf26d..9254b3d58db 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1280,7 +1280,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- goto out;
+ goto out_up_write;
}
trans->block_rsv = &root->fs_info->global_block_rsv;
@@ -1458,7 +1458,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
*/
/* the destination must be opened for writing */
- if (!(file->f_mode & FMODE_WRITE))
+ if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND))
return -EINVAL;
ret = mnt_want_write(file->f_path.mnt);
@@ -1511,7 +1511,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
/* determine range to clone */
ret = -EINVAL;
- if (off >= src->i_size || off + len > src->i_size)
+ if (off + len > src->i_size || off + len < off)
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
@@ -1578,6 +1578,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
u64 disko = 0, diskl = 0;
u64 datao = 0, datal = 0;
u8 comp;
+ u64 endoff;
size = btrfs_item_size_nr(leaf, slot);
read_extent_buffer(leaf, buf,
@@ -1712,9 +1713,18 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_release_path(root, path);
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- if (new_key.offset + datal > inode->i_size)
- btrfs_i_size_write(inode,
- new_key.offset + datal);
+
+ /*
+ * we round up to the block size at eof when
+ * determining which extents to clone above,
+ * but shouldn't round up the file size
+ */
+ endoff = new_key.offset + datal;
+ if (endoff > off+olen)
+ endoff = off+olen;
+ if (endoff > inode->i_size)
+ btrfs_i_size_write(inode, endoff);
+
BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
@@ -1845,7 +1855,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
dir_id, "default", 7, 1);
- if (!di) {
+ if (IS_ERR_OR_NULL(di)) {
btrfs_free_path(path);
btrfs_end_transaction(trans, root);
printk(KERN_ERR "Umm, you don't have the default dir item, "
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 05d41e56923..b37d723b9d4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -784,16 +784,17 @@ again:
struct btrfs_extent_ref_v0 *ref0;
ref0 = btrfs_item_ptr(eb, path1->slots[0],
struct btrfs_extent_ref_v0);
- root = find_tree_root(rc, eb, ref0);
- if (!root->ref_cows)
- cur->cowonly = 1;
if (key.objectid == key.offset) {
+ root = find_tree_root(rc, eb, ref0);
if (root && !should_ignore_root(root))
cur->root = root;
else
list_add(&cur->list, &useless);
break;
}
+ if (is_cowonly_root(btrfs_ref_root_v0(eb,
+ ref0)))
+ cur->cowonly = 1;
}
#else
BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index b91ccd97264..2d958be761c 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -330,7 +330,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
{
struct btrfs_path *path;
int ret;
- u32 refs;
struct btrfs_root_item *ri;
struct extent_buffer *leaf;
@@ -344,8 +343,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
leaf = path->nodes[0];
ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
- refs = btrfs_disk_root_refs(leaf, ri);
- BUG_ON(refs != 0);
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d34b2dfc962..1776dbd8dc9 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -360,6 +360,8 @@ static struct dentry *get_default_root(struct super_block *sb,
*/
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
+ if (IS_ERR(di))
+ return ERR_CAST(di);
if (!di) {
/*
* Ok the default dir item isn't there. This is weird since
@@ -390,8 +392,8 @@ setup_root:
location.offset = 0;
inode = btrfs_iget(sb, &location, new_root, &new);
- if (!inode)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
/*
* If we're just mounting the root most subvol put the inode and return
@@ -795,7 +797,7 @@ static int btrfs_unfreeze(struct super_block *sb)
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
- .delete_inode = btrfs_delete_inode,
+ .evict_inode = btrfs_evict_inode,
.put_super = btrfs_put_super,
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d6e3af8be95..dd318ff280b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -258,7 +258,7 @@ loop_lock:
BUG_ON(atomic_read(&cur->bi_cnt) == 0);
- if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
+ if (cur->bi_rw & REQ_SYNC)
num_sync_run++;
submit_bio(cur->bi_rw, cur);
@@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
int max_errors = 0;
struct btrfs_multi_bio *multi = NULL;
- if (multi_ret && !(rw & (1 << BIO_RW)))
+ if (multi_ret && !(rw & REQ_WRITE))
stripes_allocated = 1;
again:
if (multi_ret) {
@@ -2687,7 +2687,7 @@ again:
mirror_num = 0;
/* if our multi bio struct is too small, back off and try again */
- if (rw & (1 << BIO_RW)) {
+ if (rw & REQ_WRITE) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_DUP)) {
stripes_required = map->num_stripes;
@@ -2697,7 +2697,7 @@ again:
max_errors = 1;
}
}
- if (multi_ret && (rw & (1 << BIO_RW)) &&
+ if (multi_ret && (rw & REQ_WRITE) &&
stripes_allocated < stripes_required) {
stripes_allocated = map->num_stripes;
free_extent_map(em);
@@ -2733,7 +2733,7 @@ again:
num_stripes = 1;
stripe_index = 0;
if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (unplug_page || (rw & (1 << BIO_RW)))
+ if (unplug_page || (rw & REQ_WRITE))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -2744,7 +2744,7 @@ again:
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (rw & (1 << BIO_RW))
+ if (rw & REQ_WRITE)
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -2755,7 +2755,7 @@ again:
stripe_index = do_div(stripe_nr, factor);
stripe_index *= map->sub_stripes;
- if (unplug_page || (rw & (1 << BIO_RW)))
+ if (unplug_page || (rw & REQ_WRITE))
num_stripes = map->sub_stripes;
else if (mirror_num)
stripe_index += mirror_num - 1;
@@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
struct btrfs_pending_bios *pending_bios;
/* don't bother with additional async steps for reads, right now */
- if (!(rw & (1 << BIO_RW))) {
+ if (!(rw & REQ_WRITE)) {
bio_get(bio);
submit_bio(rw, bio);
bio_put(bio);
@@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
bio->bi_rw |= rw;
spin_lock(&device->io_lock);
- if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
+ if (bio->bi_rw & REQ_SYNC)
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
diff --git a/fs/buffer.c b/fs/buffer.c
index d54812b198e..50efa339e05 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1833,9 +1833,10 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
}
EXPORT_SYMBOL(page_zero_new_buffers);
-static int __block_prepare_write(struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_block_t *get_block)
+int block_prepare_write(struct page *page, unsigned from, unsigned to,
+ get_block_t *get_block)
{
+ struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
@@ -1908,10 +1909,13 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
- if (unlikely(err))
+ if (unlikely(err)) {
page_zero_new_buffers(page, from, to);
+ ClearPageUptodate(page);
+ }
return err;
}
+EXPORT_SYMBOL(block_prepare_write);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
@@ -1948,90 +1952,41 @@ static int __block_commit_write(struct inode *inode, struct page *page,
return 0;
}
-/*
- * Filesystems implementing the new truncate sequence should use the
- * _newtrunc postfix variant which won't incorrectly call vmtruncate.
- * The filesystem needs to handle block truncation upon failure.
- */
-int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
{
- struct inode *inode = mapping->host;
- int status = 0;
- struct page *page;
- pgoff_t index;
- unsigned start, end;
- int ownpage = 0;
-
- index = pos >> PAGE_CACHE_SHIFT;
- start = pos & (PAGE_CACHE_SIZE - 1);
- end = start + len;
-
- page = *pagep;
- if (page == NULL) {
- ownpage = 1;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
- status = -ENOMEM;
- goto out;
- }
- *pagep = page;
- } else
- BUG_ON(!PageLocked(page));
-
- status = __block_prepare_write(inode, page, start, end, get_block);
- if (unlikely(status)) {
- ClearPageUptodate(page);
+ unsigned start = pos & (PAGE_CACHE_SIZE - 1);
- if (ownpage) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
- }
- }
-
-out:
- return status;
+ return block_prepare_write(page, start, start + len, get_block);
}
-EXPORT_SYMBOL(block_write_begin_newtrunc);
+EXPORT_SYMBOL(__block_write_begin);
/*
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
*
- * If *pagep is not NULL, then block_write_begin uses the locked page
- * at *pagep rather than allocating its own. In this case, the page will
- * not be unlocked or deallocated on failure.
+ * The filesystem needs to handle block truncation upon failure.
*/
-int block_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
+int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
+ unsigned flags, struct page **pagep, get_block_t *get_block)
{
- int ret;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int status;
- ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, get_block);
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- *
- * Filesystems which pass down their own page also cannot
- * call into vmtruncate here because it would lead to lock
- * inversion problems (*pagep is locked). This is a further
- * example of where the old truncate sequence is inadequate.
- */
- if (unlikely(ret) && *pagep == NULL) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
+ status = __block_write_begin(page, pos, len, get_block);
+ if (unlikely(status)) {
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
}
- return ret;
+ *pagep = page;
+ return status;
}
EXPORT_SYMBOL(block_write_begin);
@@ -2351,7 +2306,7 @@ out:
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
*/
-int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
+int cont_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block, loff_t *bytes)
@@ -2363,7 +2318,7 @@ int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
err = cont_expand_zero(file, mapping, pos, bytes);
if (err)
- goto out;
+ return err;
zerofrom = *bytes & ~PAGE_CACHE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
@@ -2371,44 +2326,10 @@ int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
(*bytes)++;
}
- *pagep = NULL;
- err = block_write_begin_newtrunc(file, mapping, pos, len,
- flags, pagep, fsdata, get_block);
-out:
- return err;
-}
-EXPORT_SYMBOL(cont_write_begin_newtrunc);
-
-int cont_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block, loff_t *bytes)
-{
- int ret;
-
- ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, get_block, bytes);
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
-
- return ret;
+ return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
EXPORT_SYMBOL(cont_write_begin);
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
- get_block_t *get_block)
-{
- struct inode *inode = page->mapping->host;
- int err = __block_prepare_write(inode, page, from, to, get_block);
- if (err)
- ClearPageUptodate(page);
- return err;
-}
-EXPORT_SYMBOL(block_prepare_write);
-
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
@@ -2510,11 +2431,11 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
}
/*
- * Filesystems implementing the new truncate sequence should use the
- * _newtrunc postfix variant which won't incorrectly call vmtruncate.
+ * On entry, the page is fully not uptodate.
+ * On exit the page is fully uptodate in the areas outside (from,to)
* The filesystem needs to handle block truncation upon failure.
*/
-int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping,
+int nobh_write_begin(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block)
@@ -2547,8 +2468,8 @@ int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
*pagep = NULL;
- return block_write_begin_newtrunc(file, mapping, pos, len,
- flags, pagep, fsdata, get_block);
+ return block_write_begin(mapping, pos, len, flags, pagep,
+ get_block);
}
if (PageMappedToDisk(page))
@@ -2654,35 +2575,6 @@ out_release:
return ret;
}
-EXPORT_SYMBOL(nobh_write_begin_newtrunc);
-
-/*
- * On entry, the page is fully not uptodate.
- * On exit the page is fully uptodate in the areas outside (from,to)
- */
-int nobh_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
-{
- int ret;
-
- ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, get_block);
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- */
- if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
- }
-
- return ret;
-}
EXPORT_SYMBOL(nobh_write_begin);
int nobh_write_end(struct file *file, struct address_space *mapping,
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index 2906077ac79..a2603e7c0bb 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -146,7 +146,7 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
goto error_unsupported;
/* get the cache size and blocksize */
- ret = vfs_statfs(root, &stats);
+ ret = vfs_statfs(&path, &stats);
if (ret < 0)
goto error_unsupported;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index c2413561ea7..727caedcdd9 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -552,8 +552,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
*/
static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
{
- struct fs_struct *fs;
- struct dentry *dir;
+ struct path path;
const struct cred *saved_cred;
int ret;
@@ -573,24 +572,21 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
}
/* extract the directory dentry from the cwd */
- fs = current->fs;
- read_lock(&fs->lock);
- dir = dget(fs->pwd.dentry);
- read_unlock(&fs->lock);
+ get_fs_pwd(current->fs, &path);
- if (!S_ISDIR(dir->d_inode->i_mode))
+ if (!S_ISDIR(path.dentry->d_inode->i_mode))
goto notdir;
cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_cull(cache, dir, args);
+ ret = cachefiles_cull(cache, path.dentry, args);
cachefiles_end_secure(cache, saved_cred);
- dput(dir);
+ path_put(&path);
_leave(" = %d", ret);
return ret;
notdir:
- dput(dir);
+ path_put(&path);
kerror("cull command requires dirfd to be a directory");
return -ENOTDIR;
@@ -628,8 +624,7 @@ inval:
*/
static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
{
- struct fs_struct *fs;
- struct dentry *dir;
+ struct path path;
const struct cred *saved_cred;
int ret;
@@ -649,24 +644,21 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
}
/* extract the directory dentry from the cwd */
- fs = current->fs;
- read_lock(&fs->lock);
- dir = dget(fs->pwd.dentry);
- read_unlock(&fs->lock);
+ get_fs_pwd(current->fs, &path);
- if (!S_ISDIR(dir->d_inode->i_mode))
+ if (!S_ISDIR(path.dentry->d_inode->i_mode))
goto notdir;
cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_check_in_use(cache, dir, args);
+ ret = cachefiles_check_in_use(cache, path.dentry, args);
cachefiles_end_secure(cache, saved_cred);
- dput(dir);
+ path_put(&path);
//_leave(" = %d", ret);
return ret;
notdir:
- dput(dir);
+ path_put(&path);
kerror("inuse command requires dirfd to be a directory");
return -ENOTDIR;
@@ -683,6 +675,10 @@ int cachefiles_has_space(struct cachefiles_cache *cache,
unsigned fnr, unsigned bnr)
{
struct kstatfs stats;
+ struct path path = {
+ .mnt = cache->mnt,
+ .dentry = cache->mnt->mnt_root,
+ };
int ret;
//_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
@@ -697,7 +693,7 @@ int cachefiles_has_space(struct cachefiles_cache *cache,
/* find out how many pages of blockdev are available */
memset(&stats, 0, sizeof(stats));
- ret = vfs_statfs(cache->mnt->mnt_root, &stats);
+ ret = vfs_statfs(&path, &stats);
if (ret < 0) {
if (ret == -EIO)
cachefiles_io_error(cache, "statfs failed");
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index a8cd821226d..bd6bc1bde2d 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -267,13 +267,6 @@ do { \
#define dbgprintk(FMT, ...) \
printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-/* make sure we maintain the format strings, even when debugging is disabled */
-static inline void _dbprintk(const char *fmt, ...)
- __attribute__((format(printf, 1, 2)));
-static inline void _dbprintk(const char *fmt, ...)
-{
-}
-
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
@@ -304,9 +297,9 @@ do { \
} while (0)
#else
-#define _enter(FMT, ...) _dbprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) _dbprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
#if 1 /* defined(__KDEBUGALL) */
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index f4a7840bf42..42c7fafc8bf 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -37,9 +37,9 @@ void __cachefiles_printk_object(struct cachefiles_object *object,
printk(KERN_ERR "%sobject: OBJ%x\n",
prefix, object->fscache.debug_id);
- printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
+ printk(KERN_ERR "%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
prefix, fscache_object_states[object->fscache.state],
- object->fscache.flags, object->fscache.work.flags,
+ object->fscache.flags, work_busy(&object->fscache.work),
object->fscache.events,
object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
@@ -212,7 +212,7 @@ wait_for_old_object:
/* if the object we're waiting for is queued for processing,
* then just put ourselves on the queue behind it */
- if (slow_work_is_queued(&xobject->fscache.work)) {
+ if (work_pending(&xobject->fscache.work)) {
_debug("queue OBJ%x behind OBJ%x immediately",
object->fscache.debug_id,
xobject->fscache.debug_id);
@@ -220,8 +220,7 @@ wait_for_old_object:
}
/* otherwise we sleep until either the object we're waiting for
- * is done, or the slow-work facility wants the thread back to
- * do other work */
+ * is done, or the fscache_object is congested */
wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
init_wait(&wait);
requeue = false;
@@ -229,8 +228,8 @@ wait_for_old_object:
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
break;
- requeue = slow_work_sleep_till_thread_needed(
- &object->fscache.work, &timeout);
+
+ requeue = fscache_object_sleep_till_congested(&timeout);
} while (timeout > 0 && !requeue);
finish_wait(wq, &wait);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 0f0d41fbb03..0e3c0924cc3 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_FAST;
+ op->op.flags |= FSCACHE_OP_ASYNC;
op->op.processor = cachefiles_read_copier;
pagevec_init(&pagevec, 0);
@@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
pagevec_init(&pagevec, 0);
op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_FAST;
+ op->op.flags |= FSCACHE_OP_ASYNC;
op->op.processor = cachefiles_read_copier;
INIT_LIST_HEAD(&backpages);
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 04b8280582a..bc87b9c1d27 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -2,7 +2,7 @@ config CEPH_FS
tristate "Ceph distributed file system (EXPERIMENTAL)"
depends on INET && EXPERIMENTAL
select LIBCRC32C
- select CONFIG_CRYPTO_AES
+ select CRYPTO_AES
help
Choose Y or M here to include support for mounting the
experimental Ceph distributed file system. Ceph is an extremely
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 6a660e610be..278e1172600 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -6,7 +6,7 @@ ifneq ($(KERNELRELEASE),)
obj-$(CONFIG_CEPH_FS) += ceph.o
-ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \
+ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
export.o caps.o snap.o xattr.o \
messenger.o msgpool.o buffer.o pagelist.o \
mds_client.o mdsmap.o \
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index d9c60b84949..5598a0d0229 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -309,7 +309,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
zero_user_segment(page, s, PAGE_CACHE_SIZE);
}
- if (add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS)) {
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ GFP_NOFS)) {
page_cache_release(page);
dout("readpages %p add_to_page_cache failed %p\n",
inode, page);
@@ -552,7 +553,7 @@ static void writepages_finish(struct ceph_osd_request *req,
* page truncation thread, possibly losing some data that
* raced its way in
*/
- if ((issued & CEPH_CAP_FILE_CACHE) == 0)
+ if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
generic_error_remove_page(inode->i_mapping, page);
unlock_page(page);
@@ -797,9 +798,12 @@ get_more_pages:
dout("%p will write page %p idx %lu\n",
inode, page, page->index);
- writeback_stat = atomic_long_inc_return(&client->writeback_count);
- if (writeback_stat > CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) {
- set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC);
+ writeback_stat =
+ atomic_long_inc_return(&client->writeback_count);
+ if (writeback_stat > CONGESTION_ON_THRESH(
+ client->mount_args->congestion_kb)) {
+ set_bdi_congested(&client->backing_dev_info,
+ BLK_RW_ASYNC);
}
set_page_writeback(page);
@@ -1036,7 +1040,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
*pagep = page;
dout("write_begin file %p inode %p page %p %d~%d\n", file,
- inode, page, (int)pos, (int)len);
+ inode, page, (int)pos, (int)len);
r = ceph_update_writeable_page(file, pos, len, page);
} while (r == -EAGAIN);
diff --git a/fs/ceph/armor.c b/fs/ceph/armor.c
index 67b2c030924..eb2a666b0be 100644
--- a/fs/ceph/armor.c
+++ b/fs/ceph/armor.c
@@ -1,11 +1,15 @@
#include <linux/errno.h>
+int ceph_armor(char *dst, const char *src, const char *end);
+int ceph_unarmor(char *dst, const char *src, const char *end);
+
/*
* base64 encode/decode.
*/
-const char *pem_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+static const char *pem_key =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static int encode_bits(int c)
{
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
index 89490beaf53..6d2e3060062 100644
--- a/fs/ceph/auth.c
+++ b/fs/ceph/auth.c
@@ -20,7 +20,7 @@ static u32 supported_protocols[] = {
CEPH_AUTH_CEPHX
};
-int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
+static int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
{
switch (protocol) {
case CEPH_AUTH_NONE:
@@ -133,8 +133,8 @@ bad:
return -ERANGE;
}
-int ceph_build_auth_request(struct ceph_auth_client *ac,
- void *msg_buf, size_t msg_len)
+static int ceph_build_auth_request(struct ceph_auth_client *ac,
+ void *msg_buf, size_t msg_len)
{
struct ceph_mon_request_header *monhdr = msg_buf;
void *p = monhdr + 1;
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index 83d4d2785ff..582e0b2caf8 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -87,8 +87,8 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret,
/*
* get existing (or insert new) ticket handler
*/
-struct ceph_x_ticket_handler *get_ticket_handler(struct ceph_auth_client *ac,
- int service)
+static struct ceph_x_ticket_handler *
+get_ticket_handler(struct ceph_auth_client *ac, int service)
{
struct ceph_x_ticket_handler *th;
struct ceph_x_info *xi = ac->private;
@@ -429,7 +429,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
auth->struct_v = 1;
auth->key = 0;
for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
- auth->key ^= *u;
+ auth->key ^= *(__le64 *)u;
dout(" server_challenge %llx client_challenge %llx key %llx\n",
xi->server_challenge, le64_to_cpu(auth->client_challenge),
le64_to_cpu(auth->key));
@@ -493,7 +493,7 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
return -EAGAIN;
}
- op = le32_to_cpu(head->op);
+ op = le16_to_cpu(head->op);
result = le32_to_cpu(head->result);
dout("handle_reply op %d result %d\n", op, result);
switch (op) {
@@ -613,6 +613,9 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
remove_ticket_handler(ac, th);
}
+ if (xi->auth_authorizer.buf)
+ ceph_buffer_put(xi->auth_authorizer.buf);
+
kfree(ac->private);
ac->private = NULL;
}
diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c
index c67535d70aa..cd39f17021d 100644
--- a/fs/ceph/buffer.c
+++ b/fs/ceph/buffer.c
@@ -47,22 +47,6 @@ void ceph_buffer_release(struct kref *kref)
kfree(b);
}
-int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp)
-{
- b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
- if (b->vec.iov_base) {
- b->is_vmalloc = false;
- } else {
- b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
- b->is_vmalloc = true;
- }
- if (!b->vec.iov_base)
- return -ENOMEM;
- b->alloc_len = len;
- b->vec.iov_len = len;
- return 0;
-}
-
int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
{
size_t len;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ae3e3a30644..7bf182b0397 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -113,58 +113,41 @@ const char *ceph_cap_string(int caps)
return cap_str[i];
}
-/*
- * Cap reservations
- *
- * Maintain a global pool of preallocated struct ceph_caps, referenced
- * by struct ceph_caps_reservations. This ensures that we preallocate
- * memory needed to successfully process an MDS response. (If an MDS
- * sends us cap information and we fail to process it, we will have
- * problems due to the client and MDS being out of sync.)
- *
- * Reservations are 'owned' by a ceph_cap_reservation context.
- */
-static spinlock_t caps_list_lock;
-static struct list_head caps_list; /* unused (reserved or unreserved) */
-static int caps_total_count; /* total caps allocated */
-static int caps_use_count; /* in use */
-static int caps_reserve_count; /* unused, reserved */
-static int caps_avail_count; /* unused, unreserved */
-static int caps_min_count; /* keep at least this many (unreserved) */
-
-void __init ceph_caps_init(void)
+void ceph_caps_init(struct ceph_mds_client *mdsc)
{
- INIT_LIST_HEAD(&caps_list);
- spin_lock_init(&caps_list_lock);
+ INIT_LIST_HEAD(&mdsc->caps_list);
+ spin_lock_init(&mdsc->caps_list_lock);
}
-void ceph_caps_finalize(void)
+void ceph_caps_finalize(struct ceph_mds_client *mdsc)
{
struct ceph_cap *cap;
- spin_lock(&caps_list_lock);
- while (!list_empty(&caps_list)) {
- cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
+ spin_lock(&mdsc->caps_list_lock);
+ while (!list_empty(&mdsc->caps_list)) {
+ cap = list_first_entry(&mdsc->caps_list,
+ struct ceph_cap, caps_item);
list_del(&cap->caps_item);
kmem_cache_free(ceph_cap_cachep, cap);
}
- caps_total_count = 0;
- caps_avail_count = 0;
- caps_use_count = 0;
- caps_reserve_count = 0;
- caps_min_count = 0;
- spin_unlock(&caps_list_lock);
+ mdsc->caps_total_count = 0;
+ mdsc->caps_avail_count = 0;
+ mdsc->caps_use_count = 0;
+ mdsc->caps_reserve_count = 0;
+ mdsc->caps_min_count = 0;
+ spin_unlock(&mdsc->caps_list_lock);
}
-void ceph_adjust_min_caps(int delta)
+void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
{
- spin_lock(&caps_list_lock);
- caps_min_count += delta;
- BUG_ON(caps_min_count < 0);
- spin_unlock(&caps_list_lock);
+ spin_lock(&mdsc->caps_list_lock);
+ mdsc->caps_min_count += delta;
+ BUG_ON(mdsc->caps_min_count < 0);
+ spin_unlock(&mdsc->caps_list_lock);
}
-int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
+int ceph_reserve_caps(struct ceph_mds_client *mdsc,
+ struct ceph_cap_reservation *ctx, int need)
{
int i;
struct ceph_cap *cap;
@@ -176,16 +159,17 @@ int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
dout("reserve caps ctx=%p need=%d\n", ctx, need);
/* first reserve any caps that are already allocated */
- spin_lock(&caps_list_lock);
- if (caps_avail_count >= need)
+ spin_lock(&mdsc->caps_list_lock);
+ if (mdsc->caps_avail_count >= need)
have = need;
else
- have = caps_avail_count;
- caps_avail_count -= have;
- caps_reserve_count += have;
- BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
- caps_avail_count);
- spin_unlock(&caps_list_lock);
+ have = mdsc->caps_avail_count;
+ mdsc->caps_avail_count -= have;
+ mdsc->caps_reserve_count += have;
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count +
+ mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
for (i = have; i < need; i++) {
cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
@@ -198,19 +182,20 @@ int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
}
BUG_ON(have + alloc != need);
- spin_lock(&caps_list_lock);
- caps_total_count += alloc;
- caps_reserve_count += alloc;
- list_splice(&newcaps, &caps_list);
+ spin_lock(&mdsc->caps_list_lock);
+ mdsc->caps_total_count += alloc;
+ mdsc->caps_reserve_count += alloc;
+ list_splice(&newcaps, &mdsc->caps_list);
- BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
- caps_avail_count);
- spin_unlock(&caps_list_lock);
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count +
+ mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
ctx->count = need;
dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
- ctx, caps_total_count, caps_use_count, caps_reserve_count,
- caps_avail_count);
+ ctx, mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
return 0;
out_alloc_count:
@@ -220,92 +205,104 @@ out_alloc_count:
return ret;
}
-int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
+int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
+ struct ceph_cap_reservation *ctx)
{
dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
if (ctx->count) {
- spin_lock(&caps_list_lock);
- BUG_ON(caps_reserve_count < ctx->count);
- caps_reserve_count -= ctx->count;
- caps_avail_count += ctx->count;
+ spin_lock(&mdsc->caps_list_lock);
+ BUG_ON(mdsc->caps_reserve_count < ctx->count);
+ mdsc->caps_reserve_count -= ctx->count;
+ mdsc->caps_avail_count += ctx->count;
ctx->count = 0;
dout("unreserve caps %d = %d used + %d resv + %d avail\n",
- caps_total_count, caps_use_count, caps_reserve_count,
- caps_avail_count);
- BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
- caps_avail_count);
- spin_unlock(&caps_list_lock);
+ mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count +
+ mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
}
return 0;
}
-static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
+static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
+ struct ceph_cap_reservation *ctx)
{
struct ceph_cap *cap = NULL;
/* temporary, until we do something about cap import/export */
- if (!ctx)
- return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+ if (!ctx) {
+ cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+ if (cap) {
+ mdsc->caps_use_count++;
+ mdsc->caps_total_count++;
+ }
+ return cap;
+ }
- spin_lock(&caps_list_lock);
+ spin_lock(&mdsc->caps_list_lock);
dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
- ctx, ctx->count, caps_total_count, caps_use_count,
- caps_reserve_count, caps_avail_count);
+ ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
BUG_ON(!ctx->count);
- BUG_ON(ctx->count > caps_reserve_count);
- BUG_ON(list_empty(&caps_list));
+ BUG_ON(ctx->count > mdsc->caps_reserve_count);
+ BUG_ON(list_empty(&mdsc->caps_list));
ctx->count--;
- caps_reserve_count--;
- caps_use_count++;
+ mdsc->caps_reserve_count--;
+ mdsc->caps_use_count++;
- cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
+ cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
list_del(&cap->caps_item);
- BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
- caps_avail_count);
- spin_unlock(&caps_list_lock);
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count + mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
return cap;
}
-void ceph_put_cap(struct ceph_cap *cap)
+void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
{
- spin_lock(&caps_list_lock);
+ spin_lock(&mdsc->caps_list_lock);
dout("put_cap %p %d = %d used + %d resv + %d avail\n",
- cap, caps_total_count, caps_use_count,
- caps_reserve_count, caps_avail_count);
- caps_use_count--;
+ cap, mdsc->caps_total_count, mdsc->caps_use_count,
+ mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ mdsc->caps_use_count--;
/*
* Keep some preallocated caps around (ceph_min_count), to
* avoid lots of free/alloc churn.
*/
- if (caps_avail_count >= caps_reserve_count + caps_min_count) {
- caps_total_count--;
+ if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
+ mdsc->caps_min_count) {
+ mdsc->caps_total_count--;
kmem_cache_free(ceph_cap_cachep, cap);
} else {
- caps_avail_count++;
- list_add(&cap->caps_item, &caps_list);
+ mdsc->caps_avail_count++;
+ list_add(&cap->caps_item, &mdsc->caps_list);
}
- BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
- caps_avail_count);
- spin_unlock(&caps_list_lock);
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count + mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
}
void ceph_reservation_status(struct ceph_client *client,
int *total, int *avail, int *used, int *reserved,
int *min)
{
+ struct ceph_mds_client *mdsc = &client->mdsc;
+
if (total)
- *total = caps_total_count;
+ *total = mdsc->caps_total_count;
if (avail)
- *avail = caps_avail_count;
+ *avail = mdsc->caps_avail_count;
if (used)
- *used = caps_use_count;
+ *used = mdsc->caps_use_count;
if (reserved)
- *reserved = caps_reserve_count;
+ *reserved = mdsc->caps_reserve_count;
if (min)
- *min = caps_min_count;
+ *min = mdsc->caps_min_count;
}
/*
@@ -330,22 +327,29 @@ static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
return NULL;
}
+struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+{
+ struct ceph_cap *cap;
+
+ spin_lock(&ci->vfs_inode.i_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ spin_unlock(&ci->vfs_inode.i_lock);
+ return cap;
+}
+
/*
- * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
- * -1.
+ * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
*/
-static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
+static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
{
struct ceph_cap *cap;
int mds = -1;
struct rb_node *p;
- /* prefer mds with WR|WRBUFFER|EXCL caps */
+ /* prefer mds with WR|BUFFER|EXCL caps */
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
mds = cap->mds;
- if (mseq)
- *mseq = cap->mseq;
if (cap->issued & (CEPH_CAP_FILE_WR |
CEPH_CAP_FILE_BUFFER |
CEPH_CAP_FILE_EXCL))
@@ -358,7 +362,7 @@ int ceph_get_cap_mds(struct inode *inode)
{
int mds;
spin_lock(&inode->i_lock);
- mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
+ mds = __ceph_get_cap_mds(ceph_inode(inode));
spin_unlock(&inode->i_lock);
return mds;
}
@@ -477,8 +481,8 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
* Each time we receive FILE_CACHE anew, we increment
* i_rdcache_gen.
*/
- if ((issued & CEPH_CAP_FILE_CACHE) &&
- (had & CEPH_CAP_FILE_CACHE) == 0)
+ if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+ (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
ci->i_rdcache_gen++;
/*
@@ -537,7 +541,7 @@ retry:
new_cap = NULL;
} else {
spin_unlock(&inode->i_lock);
- new_cap = get_cap(caps_reservation);
+ new_cap = get_cap(mdsc, caps_reservation);
if (new_cap == NULL)
return -ENOMEM;
goto retry;
@@ -582,6 +586,7 @@ retry:
} else {
pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
realmino);
+ WARN_ON(!realm);
}
}
@@ -621,7 +626,7 @@ retry:
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
spin_unlock(&inode->i_lock);
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
return 0;
}
@@ -825,7 +830,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
{
int want = 0;
int mode;
- for (mode = 0; mode < 4; mode++)
+ for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
if (ci->i_nr_by_mode[mode])
want |= ceph_caps_for_mode(mode);
return want;
@@ -895,7 +900,7 @@ void __ceph_remove_cap(struct ceph_cap *cap)
ci->i_auth_cap = NULL;
if (removed)
- ceph_put_cap(cap);
+ ceph_put_cap(mdsc, cap);
if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
struct ceph_snap_realm *realm = ci->i_snap_realm;
@@ -981,6 +986,46 @@ static int send_cap_msg(struct ceph_mds_session *session,
return 0;
}
+static void __queue_cap_release(struct ceph_mds_session *session,
+ u64 ino, u64 cap_id, u32 migrate_seq,
+ u32 issue_seq)
+{
+ struct ceph_msg *msg;
+ struct ceph_mds_cap_release *head;
+ struct ceph_mds_cap_item *item;
+
+ spin_lock(&session->s_cap_lock);
+ BUG_ON(!session->s_num_cap_releases);
+ msg = list_first_entry(&session->s_cap_releases,
+ struct ceph_msg, list_head);
+
+ dout(" adding %llx release to mds%d msg %p (%d left)\n",
+ ino, session->s_mds, msg, session->s_num_cap_releases);
+
+ BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
+ head = msg->front.iov_base;
+ head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
+ item = msg->front.iov_base + msg->front.iov_len;
+ item->ino = cpu_to_le64(ino);
+ item->cap_id = cpu_to_le64(cap_id);
+ item->migrate_seq = cpu_to_le32(migrate_seq);
+ item->seq = cpu_to_le32(issue_seq);
+
+ session->s_num_cap_releases--;
+
+ msg->front.iov_len += sizeof(*item);
+ if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
+ dout(" release msg %p full\n", msg);
+ list_move_tail(&msg->list_head, &session->s_cap_releases_done);
+ } else {
+ dout(" release msg %p at %d/%d (%d)\n", msg,
+ (int)le32_to_cpu(head->num),
+ (int)CEPH_CAPS_PER_RELEASE,
+ (int)msg->front.iov_len);
+ }
+ spin_unlock(&session->s_cap_lock);
+}
+
/*
* Queue cap releases when an inode is dropped from our cache. Since
* inode is about to be destroyed, there is no need for i_lock.
@@ -994,41 +1039,9 @@ void ceph_queue_caps_release(struct inode *inode)
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
struct ceph_mds_session *session = cap->session;
- struct ceph_msg *msg;
- struct ceph_mds_cap_release *head;
- struct ceph_mds_cap_item *item;
- spin_lock(&session->s_cap_lock);
- BUG_ON(!session->s_num_cap_releases);
- msg = list_first_entry(&session->s_cap_releases,
- struct ceph_msg, list_head);
-
- dout(" adding %p release to mds%d msg %p (%d left)\n",
- inode, session->s_mds, msg, session->s_num_cap_releases);
-
- BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
- head = msg->front.iov_base;
- head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
- item = msg->front.iov_base + msg->front.iov_len;
- item->ino = cpu_to_le64(ceph_ino(inode));
- item->cap_id = cpu_to_le64(cap->cap_id);
- item->migrate_seq = cpu_to_le32(cap->mseq);
- item->seq = cpu_to_le32(cap->issue_seq);
-
- session->s_num_cap_releases--;
-
- msg->front.iov_len += sizeof(*item);
- if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
- dout(" release msg %p full\n", msg);
- list_move_tail(&msg->list_head,
- &session->s_cap_releases_done);
- } else {
- dout(" release msg %p at %d/%d (%d)\n", msg,
- (int)le32_to_cpu(head->num),
- (int)CEPH_CAPS_PER_RELEASE,
- (int)msg->front.iov_len);
- }
- spin_unlock(&session->s_cap_lock);
+ __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
+ cap->mseq, cap->issue_seq);
p = rb_next(p);
__ceph_remove_cap(cap);
}
@@ -1167,7 +1180,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
}
if (wake)
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
return delayed;
}
@@ -1183,6 +1196,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession)
+ __releases(ci->vfs_inode->i_lock)
+ __acquires(ci->vfs_inode->i_lock)
{
struct inode *inode = &ci->vfs_inode;
int mds;
@@ -1218,7 +1233,13 @@ retry:
BUG_ON(capsnap->dirty == 0);
/* pick mds, take s_mutex */
- mds = __ceph_get_cap_mds(ci, &mseq);
+ if (ci->i_auth_cap == NULL) {
+ dout("no auth cap (migrating?), doing nothing\n");
+ goto out;
+ }
+ mds = ci->i_auth_cap->session->s_mds;
+ mseq = ci->i_auth_cap->mseq;
+
if (session && session->s_mds != mds) {
dout("oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
@@ -1237,8 +1258,8 @@ retry:
}
/*
* if session == NULL, we raced against a cap
- * deletion. retry, and we'll get a better
- * @mds value next time.
+ * deletion or migration. retry, and we'll
+ * get a better @mds value next time.
*/
spin_lock(&inode->i_lock);
goto retry;
@@ -1276,6 +1297,7 @@ retry:
list_del_init(&ci->i_snap_flush_item);
spin_unlock(&mdsc->snap_flush_lock);
+out:
if (psession)
*psession = session;
else if (session) {
@@ -1421,7 +1443,6 @@ static int try_nonblocking_invalidate(struct inode *inode)
*/
void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session)
- __releases(session->s_mutex)
{
struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
struct ceph_mds_client *mdsc = &client->mdsc;
@@ -1496,11 +1517,13 @@ retry_locked:
ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
ci->i_rdcache_gen && /* may have cached pages */
(file_wanted == 0 || /* no open files */
- (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */
+ (revoking & (CEPH_CAP_FILE_CACHE|
+ CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
!tried_invalidate) {
dout("check_caps trying to invalidate on %p\n", inode);
if (try_nonblocking_invalidate(inode) < 0) {
- if (revoking & CEPH_CAP_FILE_CACHE) {
+ if (revoking & (CEPH_CAP_FILE_CACHE|
+ CEPH_CAP_FILE_LAZYIO)) {
dout("check_caps queuing invalidate\n");
queue_invalidate = 1;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
@@ -2139,7 +2162,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
else if (flushsnaps)
ceph_flush_snaps(ci);
if (wake)
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
if (put)
iput(inode);
}
@@ -2215,7 +2238,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
iput(inode);
} else if (complete_capsnap) {
ceph_flush_snaps(ci);
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
}
if (drop_capsnap)
iput(inode);
@@ -2236,8 +2259,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
struct ceph_mds_session *session,
struct ceph_cap *cap,
struct ceph_buffer *xattr_buf)
- __releases(inode->i_lock)
- __releases(session->s_mutex)
+ __releases(inode->i_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2264,6 +2286,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
* will invalidate _after_ writeback.)
*/
if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
!ci->i_wrbuffer_ref) {
if (try_nonblocking_invalidate(inode) == 0) {
revoked_rdcache = 1;
@@ -2355,15 +2378,22 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
/* revocation, grant, or no-op? */
if (cap->issued & ~newcaps) {
- dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
- ceph_cap_string(newcaps));
- if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
- writeback = 1; /* will delay ack */
- else if (dirty & ~newcaps)
- check_caps = 1; /* initiate writeback in check_caps */
- else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
- revoked_rdcache)
- check_caps = 2; /* send revoke ack in check_caps */
+ int revoking = cap->issued & ~newcaps;
+
+ dout("revocation: %s -> %s (revoking %s)\n",
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps),
+ ceph_cap_string(revoking));
+ if (revoking & used & CEPH_CAP_FILE_BUFFER)
+ writeback = 1; /* initiate writeback; will delay ack */
+ else if (revoking == CEPH_CAP_FILE_CACHE &&
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
+ queue_invalidate)
+ ; /* do nothing yet, invalidation will be queued */
+ else if (cap == ci->i_auth_cap)
+ check_caps = 1; /* check auth cap only */
+ else
+ check_caps = 2; /* check all caps */
cap->issued = newcaps;
cap->implemented |= newcaps;
} else if (cap->issued == newcaps) {
@@ -2391,7 +2421,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
if (queue_invalidate)
ceph_queue_invalidate(inode);
if (wake)
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
if (check_caps == 1)
ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
@@ -2446,7 +2476,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
struct ceph_inode_info,
i_flushing_item)->vfs_inode);
mdsc->num_cap_flushing--;
- wake_up(&mdsc->cap_flushing_wq);
+ wake_up_all(&mdsc->cap_flushing_wq);
dout(" inode %p now !flushing\n", inode);
if (ci->i_dirty_caps == 0) {
@@ -2458,7 +2488,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
spin_unlock(&mdsc->cap_dirty_lock);
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
out:
spin_unlock(&inode->i_lock);
@@ -2554,7 +2584,8 @@ static void handle_cap_trunc(struct inode *inode,
* caller holds s_mutex
*/
static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
- struct ceph_mds_session *session)
+ struct ceph_mds_session *session,
+ int *open_target_sessions)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2586,6 +2617,12 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
ci->i_cap_exporting_mds = mds;
ci->i_cap_exporting_mseq = mseq;
ci->i_cap_exporting_issued = cap->issued;
+
+ /*
+ * make sure we have open sessions with all possible
+ * export targets, so that we get the matching IMPORT
+ */
+ *open_target_sessions = 1;
}
__ceph_remove_cap(cap);
}
@@ -2655,12 +2692,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_mds_caps *h;
int mds = session->s_mds;
int op;
- u32 seq;
+ u32 seq, mseq;
struct ceph_vino vino;
u64 cap_id;
u64 size, max_size;
u64 tid;
void *snaptrace;
+ size_t snaptrace_len;
+ void *flock;
+ u32 flock_len;
+ int open_target_sessions = 0;
dout("handle_caps from mds%d\n", mds);
@@ -2669,15 +2710,30 @@ void ceph_handle_caps(struct ceph_mds_session *session,
if (msg->front.iov_len < sizeof(*h))
goto bad;
h = msg->front.iov_base;
- snaptrace = h + 1;
op = le32_to_cpu(h->op);
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
cap_id = le64_to_cpu(h->cap_id);
seq = le32_to_cpu(h->seq);
+ mseq = le32_to_cpu(h->migrate_seq);
size = le64_to_cpu(h->size);
max_size = le64_to_cpu(h->max_size);
+ snaptrace = h + 1;
+ snaptrace_len = le32_to_cpu(h->snap_trace_len);
+
+ if (le16_to_cpu(msg->hdr.version) >= 2) {
+ void *p, *end;
+
+ p = snaptrace + snaptrace_len;
+ end = msg->front.iov_base + msg->front.iov_len;
+ ceph_decode_32_safe(&p, end, flock_len, bad);
+ flock = p;
+ } else {
+ flock = NULL;
+ flock_len = 0;
+ }
+
mutex_lock(&session->s_mutex);
session->s_seq++;
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
@@ -2689,6 +2745,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
vino.snap, inode);
if (!inode) {
dout(" i don't have ino %llx\n", vino.ino);
+
+ if (op == CEPH_CAP_OP_IMPORT)
+ __queue_cap_release(session, vino.ino, cap_id,
+ mseq, seq);
+
+ /*
+ * send any full release message to try to move things
+ * along for the mds (who clearly thinks we still have this
+ * cap).
+ */
+ ceph_add_cap_releases(mdsc, session);
+ ceph_send_cap_releases(mdsc, session);
goto done;
}
@@ -2699,12 +2767,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
goto done;
case CEPH_CAP_OP_EXPORT:
- handle_cap_export(inode, h, session);
+ handle_cap_export(inode, h, session, &open_target_sessions);
goto done;
case CEPH_CAP_OP_IMPORT:
handle_cap_import(mdsc, inode, h, session,
- snaptrace, le32_to_cpu(h->snap_trace_len));
+ snaptrace, snaptrace_len);
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
session);
goto done_unlocked;
@@ -2714,7 +2782,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
spin_lock(&inode->i_lock);
cap = __get_cap_for_mds(ceph_inode(inode), mds);
if (!cap) {
- dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
+ dout(" no cap on %p ino %llx.%llx from mds%d\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
spin_unlock(&inode->i_lock);
goto done;
@@ -2746,6 +2814,8 @@ done:
done_unlocked:
if (inode)
iput(inode);
+ if (open_target_sessions)
+ ceph_mdsc_open_export_target_sessions(mdsc, session);
return;
bad:
@@ -2865,18 +2935,19 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
struct ceph_mds_request_release *rel = *p;
+ int used, dirty;
int ret = 0;
- int used = 0;
spin_lock(&inode->i_lock);
used = __ceph_caps_used(ci);
+ dirty = __ceph_caps_dirty(ci);
- dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
- mds, ceph_cap_string(used), ceph_cap_string(drop),
+ dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
+ inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
ceph_cap_string(unless));
- /* only drop unused caps */
- drop &= ~used;
+ /* only drop unused, clean caps */
+ drop &= ~(used | dirty);
cap = __get_cap_for_mds(ci, mds);
if (cap && __cap_is_valid(cap)) {
@@ -2956,6 +3027,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
memcpy(*p, dentry->d_name.name, dentry->d_name.len);
*p += dentry->d_name.len;
rel->dname_seq = cpu_to_le32(di->lease_seq);
+ __ceph_mdsc_drop_dentry_lease(dentry);
}
spin_unlock(&dentry->d_lock);
return ret;
diff --git a/fs/ceph/ceph_frag.h b/fs/ceph/ceph_frag.h
index 793f50cb7c2..5babb8e9535 100644
--- a/fs/ceph/ceph_frag.h
+++ b/fs/ceph/ceph_frag.h
@@ -1,5 +1,5 @@
-#ifndef _FS_CEPH_FRAG_H
-#define _FS_CEPH_FRAG_H
+#ifndef FS_CEPH_FRAG_H
+#define FS_CEPH_FRAG_H
/*
* "Frags" are a way to describe a subset of a 32-bit number space,
diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c
index 79d76bc4303..3ac6cc7c115 100644
--- a/fs/ceph/ceph_fs.c
+++ b/fs/ceph/ceph_fs.c
@@ -29,46 +29,44 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
int ceph_flags_to_mode(int flags)
{
+ int mode;
+
#ifdef O_DIRECTORY /* fixme */
if ((flags & O_DIRECTORY) == O_DIRECTORY)
return CEPH_FILE_MODE_PIN;
#endif
+ if ((flags & O_APPEND) == O_APPEND)
+ flags |= O_WRONLY;
+
+ if ((flags & O_ACCMODE) == O_RDWR)
+ mode = CEPH_FILE_MODE_RDWR;
+ else if ((flags & O_ACCMODE) == O_WRONLY)
+ mode = CEPH_FILE_MODE_WR;
+ else
+ mode = CEPH_FILE_MODE_RD;
+
#ifdef O_LAZY
if (flags & O_LAZY)
- return CEPH_FILE_MODE_LAZY;
+ mode |= CEPH_FILE_MODE_LAZY;
#endif
- if ((flags & O_APPEND) == O_APPEND)
- flags |= O_WRONLY;
- flags &= O_ACCMODE;
- if ((flags & O_RDWR) == O_RDWR)
- return CEPH_FILE_MODE_RDWR;
- if ((flags & O_WRONLY) == O_WRONLY)
- return CEPH_FILE_MODE_WR;
- return CEPH_FILE_MODE_RD;
+ return mode;
}
int ceph_caps_for_mode(int mode)
{
- switch (mode) {
- case CEPH_FILE_MODE_PIN:
- return CEPH_CAP_PIN;
- case CEPH_FILE_MODE_RD:
- return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
+ int caps = CEPH_CAP_PIN;
+
+ if (mode & CEPH_FILE_MODE_RD)
+ caps |= CEPH_CAP_FILE_SHARED |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
- case CEPH_FILE_MODE_RDWR:
- return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
- CEPH_CAP_FILE_EXCL |
- CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE |
- CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
- CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
- CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
- case CEPH_FILE_MODE_WR:
- return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
- CEPH_CAP_FILE_EXCL |
+ if (mode & CEPH_FILE_MODE_WR)
+ caps |= CEPH_CAP_FILE_EXCL |
CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
- }
- return 0;
+ if (mode & CEPH_FILE_MODE_LAZY)
+ caps |= CEPH_CAP_FILE_LAZYIO;
+
+ return caps;
}
diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h
index 2fa992eaf7d..d5619ac8671 100644
--- a/fs/ceph/ceph_fs.h
+++ b/fs/ceph/ceph_fs.h
@@ -9,27 +9,13 @@
* LGPL2
*/
-#ifndef _FS_CEPH_CEPH_FS_H
-#define _FS_CEPH_CEPH_FS_H
+#ifndef CEPH_FS_H
+#define CEPH_FS_H
#include "msgr.h"
#include "rados.h"
/*
- * Ceph release version
- */
-#define CEPH_VERSION_MAJOR 0
-#define CEPH_VERSION_MINOR 20
-#define CEPH_VERSION_PATCH 0
-
-#define _CEPH_STRINGIFY(x) #x
-#define CEPH_STRINGIFY(x) _CEPH_STRINGIFY(x)
-#define CEPH_MAKE_VERSION(x, y, z) CEPH_STRINGIFY(x) "." CEPH_STRINGIFY(y) \
- "." CEPH_STRINGIFY(z)
-#define CEPH_VERSION CEPH_MAKE_VERSION(CEPH_VERSION_MAJOR, \
- CEPH_VERSION_MINOR, CEPH_VERSION_PATCH)
-
-/*
* subprotocol versions. when specific messages types or high-level
* protocols change, bump the affected components. we keep rev
* internal cluster protocols separately from the public,
@@ -53,18 +39,10 @@
/*
* feature bits
*/
-#define CEPH_FEATURE_UID 1
-#define CEPH_FEATURE_NOSRCADDR 2
-#define CEPH_FEATURE_FLOCK 4
-
-#define CEPH_FEATURE_SUPPORTED_MON CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR
-#define CEPH_FEATURE_REQUIRED_MON CEPH_FEATURE_UID
-#define CEPH_FEATURE_SUPPORTED_MDS CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR|CEPH_FEATURE_FLOCK
-#define CEPH_FEATURE_REQUIRED_MDS CEPH_FEATURE_UID
-#define CEPH_FEATURE_SUPPORTED_OSD CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR
-#define CEPH_FEATURE_REQUIRED_OSD CEPH_FEATURE_UID
-#define CEPH_FEATURE_SUPPORTED_CLIENT CEPH_FEATURE_NOSRCADDR
-#define CEPH_FEATURE_REQUIRED_CLIENT CEPH_FEATURE_NOSRCADDR
+#define CEPH_FEATURE_UID (1<<0)
+#define CEPH_FEATURE_NOSRCADDR (1<<1)
+#define CEPH_FEATURE_MONCLOCKCHECK (1<<2)
+#define CEPH_FEATURE_FLOCK (1<<3)
/*
@@ -96,6 +74,8 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
#define CEPH_CRYPTO_NONE 0x0
#define CEPH_CRYPTO_AES 0x1
+#define CEPH_AES_IV "cephsageyudagreg"
+
/* security/authentication protocols */
#define CEPH_AUTH_UNKNOWN 0x0
#define CEPH_AUTH_NONE 0x1
@@ -275,6 +255,7 @@ extern const char *ceph_mds_state_name(int s);
#define CEPH_LOCK_IDFT 512 /* dir frag tree */
#define CEPH_LOCK_INEST 1024 /* mds internal */
#define CEPH_LOCK_IXATTR 2048
+#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
/* client_session ops */
@@ -316,6 +297,8 @@ enum {
CEPH_MDS_OP_RMXATTR = 0x01106,
CEPH_MDS_OP_SETLAYOUT = 0x01107,
CEPH_MDS_OP_SETATTR = 0x01108,
+ CEPH_MDS_OP_SETFILELOCK= 0x01109,
+ CEPH_MDS_OP_GETFILELOCK= 0x00110,
CEPH_MDS_OP_MKNOD = 0x01201,
CEPH_MDS_OP_LINK = 0x01202,
@@ -386,6 +369,15 @@ union ceph_mds_request_args {
struct {
struct ceph_file_layout layout;
} __attribute__ ((packed)) setlayout;
+ struct {
+ __u8 rule; /* currently fcntl or flock */
+ __u8 type; /* shared, exclusive, remove*/
+ __le64 pid; /* process id requesting the lock */
+ __le64 pid_namespace;
+ __le64 start; /* initial location to lock */
+ __le64 length; /* num bytes to lock from start */
+ __u8 wait; /* will caller wait for lock to become available? */
+ } __attribute__ ((packed)) filelock_change;
} __attribute__ ((packed));
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
@@ -480,6 +472,23 @@ struct ceph_mds_reply_dirfrag {
__le32 dist[];
} __attribute__ ((packed));
+#define CEPH_LOCK_FCNTL 1
+#define CEPH_LOCK_FLOCK 2
+
+#define CEPH_LOCK_SHARED 1
+#define CEPH_LOCK_EXCL 2
+#define CEPH_LOCK_UNLOCK 4
+
+struct ceph_filelock {
+ __le64 start;/* file offset to start lock at */
+ __le64 length; /* num bytes to lock; 0 for all following start */
+ __le64 client; /* which client holds the lock */
+ __le64 pid; /* process id holding the lock on the client */
+ __le64 pid_namespace;
+ __u8 type; /* shared lock, exclusive lock, or unlock */
+} __attribute__ ((packed));
+
+
/* file access modes */
#define CEPH_FILE_MODE_PIN 0
#define CEPH_FILE_MODE_RD 1
@@ -508,9 +517,10 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_SAUTH 2
#define CEPH_CAP_SLINK 4
#define CEPH_CAP_SXATTR 6
-#define CEPH_CAP_SFILE 8 /* goes at the end (uses >2 cap bits) */
+#define CEPH_CAP_SFILE 8
+#define CEPH_CAP_SFLOCK 20
-#define CEPH_CAP_BITS 16
+#define CEPH_CAP_BITS 22
/* composed values */
#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
@@ -528,6 +538,9 @@ int ceph_flags_to_mode(int flags);
#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
+#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK)
+#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK)
+
/* cap masks (for getattr) */
#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
@@ -563,7 +576,8 @@ int ceph_flags_to_mode(int flags);
CEPH_CAP_FILE_EXCL)
#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
- CEPH_CAP_ANY_FILE_WR | CEPH_CAP_PIN)
+ CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
+ CEPH_CAP_PIN)
#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
CEPH_LOCK_IXATTR)
@@ -653,12 +667,21 @@ struct ceph_mds_cap_reconnect {
__le64 cap_id;
__le32 wanted;
__le32 issued;
+ __le64 snaprealm;
+ __le64 pathbase; /* base ino for our path to this ino */
+ __le32 flock_len; /* size of flock state blob, if any */
+} __attribute__ ((packed));
+/* followed by flock blob */
+
+struct ceph_mds_cap_reconnect_v1 {
+ __le64 cap_id;
+ __le32 wanted;
+ __le32 issued;
__le64 size;
struct ceph_timespec mtime, atime;
__le64 snaprealm;
__le64 pathbase; /* base ino for our path to this ino */
} __attribute__ ((packed));
-/* followed by encoded string */
struct ceph_mds_snaprealm_reconnect {
__le64 ino; /* snap realm base */
diff --git a/fs/ceph/ceph_hash.h b/fs/ceph/ceph_hash.h
index 5ac470c433c..d099c3f9023 100644
--- a/fs/ceph/ceph_hash.h
+++ b/fs/ceph/ceph_hash.h
@@ -1,5 +1,5 @@
-#ifndef _FS_CEPH_HASH_H
-#define _FS_CEPH_HASH_H
+#ifndef FS_CEPH_HASH_H
+#define FS_CEPH_HASH_H
#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c
index 7503aee828c..c6179d3a26a 100644
--- a/fs/ceph/ceph_strings.c
+++ b/fs/ceph/ceph_strings.c
@@ -28,6 +28,7 @@ const char *ceph_osd_op_name(int op)
case CEPH_OSD_OP_TRUNCATE: return "truncate";
case CEPH_OSD_OP_ZERO: return "zero";
case CEPH_OSD_OP_WRITEFULL: return "writefull";
+ case CEPH_OSD_OP_ROLLBACK: return "rollback";
case CEPH_OSD_OP_APPEND: return "append";
case CEPH_OSD_OP_STARTSYNC: return "startsync";
@@ -129,6 +130,8 @@ const char *ceph_mds_op_name(int op)
case CEPH_MDS_OP_LSSNAP: return "lssnap";
case CEPH_MDS_OP_MKSNAP: return "mksnap";
case CEPH_MDS_OP_RMSNAP: return "rmsnap";
+ case CEPH_MDS_OP_SETFILELOCK: return "setfilelock";
+ case CEPH_MDS_OP_GETFILELOCK: return "getfilelock";
}
return "???";
}
diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h
index dcd7e752370..97e435b191f 100644
--- a/fs/ceph/crush/crush.h
+++ b/fs/ceph/crush/crush.h
@@ -1,5 +1,5 @@
-#ifndef _CRUSH_CRUSH_H
-#define _CRUSH_CRUSH_H
+#ifndef CEPH_CRUSH_CRUSH_H
+#define CEPH_CRUSH_CRUSH_H
#include <linux/types.h>
diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h
index ff48e110e4b..91e884230d5 100644
--- a/fs/ceph/crush/hash.h
+++ b/fs/ceph/crush/hash.h
@@ -1,5 +1,5 @@
-#ifndef _CRUSH_HASH_H
-#define _CRUSH_HASH_H
+#ifndef CEPH_CRUSH_HASH_H
+#define CEPH_CRUSH_HASH_H
#define CRUSH_HASH_RJENKINS1 0
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c
index 9ba54efb654..a4eec133258 100644
--- a/fs/ceph/crush/mapper.c
+++ b/fs/ceph/crush/mapper.c
@@ -238,7 +238,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket,
static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
{
- dprintk("choose %d x=%d r=%d\n", in->id, x, r);
+ dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
switch (in->alg) {
case CRUSH_BUCKET_UNIFORM:
return bucket_uniform_choose((struct crush_bucket_uniform *)in,
@@ -264,7 +264,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
*/
static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
{
- if (weight[item] >= 0x1000)
+ if (weight[item] >= 0x10000)
return 0;
if (weight[item] == 0)
return 1;
@@ -305,7 +305,9 @@ static int crush_choose(struct crush_map *map,
int itemtype;
int collide, reject;
const int orig_tries = 5; /* attempts before we fall back to search */
- dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos);
+
+ dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
+ bucket->id, x, outpos, numrep);
for (rep = outpos; rep < numrep; rep++) {
/* keep trying until we get a non-out, non-colliding item */
@@ -366,6 +368,7 @@ static int crush_choose(struct crush_map *map,
BUG_ON(item >= 0 ||
(-1-item) >= map->max_buckets);
in = map->buckets[-1-item];
+ retry_bucket = 1;
continue;
}
@@ -377,15 +380,25 @@ static int crush_choose(struct crush_map *map,
}
}
- if (recurse_to_leaf &&
- item < 0 &&
- crush_choose(map, map->buckets[-1-item],
- weight,
- x, outpos+1, 0,
- out2, outpos,
- firstn, 0, NULL) <= outpos) {
- reject = 1;
- } else {
+ reject = 0;
+ if (recurse_to_leaf) {
+ if (item < 0) {
+ if (crush_choose(map,
+ map->buckets[-1-item],
+ weight,
+ x, outpos+1, 0,
+ out2, outpos,
+ firstn, 0,
+ NULL) <= outpos)
+ /* didn't get leaf */
+ reject = 1;
+ } else {
+ /* we already have a leaf! */
+ out2[outpos] = item;
+ }
+ }
+
+ if (!reject) {
/* out? */
if (itemtype == 0)
reject = is_out(map, weight,
@@ -424,12 +437,12 @@ reject:
continue;
}
- dprintk("choose got %d\n", item);
+ dprintk("CHOOSE got %d\n", item);
out[outpos] = item;
outpos++;
}
- dprintk("choose returns %d\n", outpos);
+ dprintk("CHOOSE returns %d\n", outpos);
return outpos;
}
diff --git a/fs/ceph/crush/mapper.h b/fs/ceph/crush/mapper.h
index 98e90046fd9..c46b99c18bb 100644
--- a/fs/ceph/crush/mapper.h
+++ b/fs/ceph/crush/mapper.h
@@ -1,5 +1,5 @@
-#ifndef _CRUSH_MAPPER_H
-#define _CRUSH_MAPPER_H
+#ifndef CEPH_CRUSH_MAPPER_H
+#define CEPH_CRUSH_MAPPER_H
/*
* CRUSH functions for find rules and then mapping an input to an
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index f704b3b6242..a3e627f6329 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -75,10 +75,11 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
}
-const u8 *aes_iv = "cephsageyudagreg";
+static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
-int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len,
- const void *src, size_t src_len)
+static int ceph_aes_encrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
{
struct scatterlist sg_in[2], sg_out[1];
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
@@ -126,9 +127,10 @@ int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len,
return 0;
}
-int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len,
- const void *src1, size_t src1_len,
- const void *src2, size_t src2_len)
+static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
+ size_t *dst_len,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len)
{
struct scatterlist sg_in[3], sg_out[1];
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
@@ -179,8 +181,9 @@ int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len,
return 0;
}
-int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len,
- const void *src, size_t src_len)
+static int ceph_aes_decrypt(const void *key, int key_len,
+ void *dst, size_t *dst_len,
+ const void *src, size_t src_len)
{
struct scatterlist sg_in[1], sg_out[2];
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
@@ -238,10 +241,10 @@ int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len,
return 0;
}
-int ceph_aes_decrypt2(const void *key, int key_len,
- void *dst1, size_t *dst1_len,
- void *dst2, size_t *dst2_len,
- const void *src, size_t src_len)
+static int ceph_aes_decrypt2(const void *key, int key_len,
+ void *dst1, size_t *dst1_len,
+ void *dst2, size_t *dst2_len,
+ const void *src, size_t src_len)
{
struct scatterlist sg_in[1], sg_out[3];
struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
index 40b502e6bd8..bdf38607323 100644
--- a/fs/ceph/crypto.h
+++ b/fs/ceph/crypto.h
@@ -42,7 +42,7 @@ extern int ceph_encrypt2(struct ceph_crypto_key *secret,
const void *src2, size_t src2_len);
/* armor.c */
-extern int ceph_armor(char *dst, const void *src, const void *end);
-extern int ceph_unarmor(void *dst, const char *src, const char *end);
+extern int ceph_armor(char *dst, const char *src, const char *end);
+extern int ceph_unarmor(char *dst, const char *src, const char *end);
#endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 3be33fb066c..360c4f22718 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -261,7 +261,7 @@ static int osdc_show(struct seq_file *s, void *pp)
static int caps_show(struct seq_file *s, void *p)
{
- struct ceph_client *client = p;
+ struct ceph_client *client = s->private;
int total, avail, used, reserved, min;
ceph_reservation_status(client, &total, &avail, &used, &reserved, &min);
@@ -291,7 +291,7 @@ static int dentry_lru_show(struct seq_file *s, void *ptr)
return 0;
}
-#define DEFINE_SHOW_FUNC(name) \
+#define DEFINE_SHOW_FUNC(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
struct seq_file *sf; \
@@ -361,8 +361,8 @@ int ceph_debugfs_client_init(struct ceph_client *client)
int ret = 0;
char name[80];
- snprintf(name, sizeof(name), FSID_FORMAT ".client%lld",
- PR_FSID(&client->fsid), client->monc.auth->global_id);
+ snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
+ client->monc.auth->global_id);
client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
if (!client->debugfs_dir)
@@ -432,11 +432,12 @@ int ceph_debugfs_client_init(struct ceph_client *client)
if (!client->debugfs_caps)
goto out;
- client->debugfs_congestion_kb = debugfs_create_file("writeback_congestion_kb",
- 0600,
- client->debugfs_dir,
- client,
- &congestion_kb_fops);
+ client->debugfs_congestion_kb =
+ debugfs_create_file("writeback_congestion_kb",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &congestion_kb_fops);
if (!client->debugfs_congestion_kb)
goto out;
@@ -466,7 +467,7 @@ void ceph_debugfs_client_cleanup(struct ceph_client *client)
debugfs_remove(client->debugfs_dir);
}
-#else // CONFIG_DEBUG_FS
+#else /* CONFIG_DEBUG_FS */
int __init ceph_debugfs_init(void)
{
@@ -486,4 +487,4 @@ void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
}
-#endif // CONFIG_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h
index 65b3e022eaf..3d25415afe6 100644
--- a/fs/ceph/decode.h
+++ b/fs/ceph/decode.h
@@ -99,11 +99,13 @@ static inline void ceph_encode_timespec(struct ceph_timespec *tv,
*/
static inline void ceph_encode_addr(struct ceph_entity_addr *a)
{
- a->in_addr.ss_family = htons(a->in_addr.ss_family);
+ __be16 ss_family = htons(a->in_addr.ss_family);
+ a->in_addr.ss_family = *(__u16 *)&ss_family;
}
static inline void ceph_decode_addr(struct ceph_entity_addr *a)
{
- a->in_addr.ss_family = ntohs(a->in_addr.ss_family);
+ __be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
+ a->in_addr.ss_family = ntohs(ss_family);
WARN_ON(a->in_addr.ss_family == 512);
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index f85719310db..67bbb41d552 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -27,7 +27,7 @@
const struct inode_operations ceph_dir_iops;
const struct file_operations ceph_dir_fops;
-struct dentry_operations ceph_dentry_ops;
+const struct dentry_operations ceph_dentry_ops;
/*
* Initialize ceph dentry state.
@@ -94,6 +94,8 @@ static unsigned fpos_off(loff_t p)
*/
static int __dcache_readdir(struct file *filp,
void *dirent, filldir_t filldir)
+ __releases(inode->i_lock)
+ __acquires(inode->i_lock)
{
struct inode *inode = filp->f_dentry->d_inode;
struct ceph_file_info *fi = filp->private_data;
@@ -266,6 +268,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
spin_lock(&inode->i_lock);
if ((filp->f_pos == 2 || fi->dentry) &&
!ceph_test_opt(client, NOASYNCREADDIR) &&
+ ceph_snap(inode) != CEPH_SNAPDIR &&
(ci->i_ceph_flags & CEPH_I_COMPLETE) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
err = __dcache_readdir(filp, dirent, filldir);
@@ -1013,18 +1016,22 @@ out_touch:
/*
* When a dentry is released, clear the dir I_COMPLETE if it was part
- * of the current dir gen.
+ * of the current dir gen or if this is in the snapshot namespace.
*/
static void ceph_dentry_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
struct inode *parent_inode = dentry->d_parent->d_inode;
+ u64 snapid = ceph_snap(parent_inode);
- if (parent_inode) {
+ dout("dentry_release %p parent %p\n", dentry, parent_inode);
+
+ if (parent_inode && snapid != CEPH_SNAPDIR) {
struct ceph_inode_info *ci = ceph_inode(parent_inode);
spin_lock(&parent_inode->i_lock);
- if (ci->i_shared_gen == di->lease_shared_gen) {
+ if (ci->i_shared_gen == di->lease_shared_gen ||
+ snapid <= CEPH_MAXSNAP) {
dout(" clearing %p complete (d_release)\n",
parent_inode);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
@@ -1234,14 +1241,16 @@ const struct inode_operations ceph_dir_iops = {
.create = ceph_create,
};
-struct dentry_operations ceph_dentry_ops = {
+const struct dentry_operations ceph_dentry_ops = {
.d_revalidate = ceph_d_revalidate,
.d_release = ceph_dentry_release,
};
-struct dentry_operations ceph_snapdir_dentry_ops = {
+const struct dentry_operations ceph_snapdir_dentry_ops = {
.d_revalidate = ceph_snapdir_d_revalidate,
+ .d_release = ceph_dentry_release,
};
-struct dentry_operations ceph_snap_dentry_ops = {
+const struct dentry_operations ceph_snap_dentry_ops = {
+ .d_release = ceph_dentry_release,
};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 6251a1574b9..8c044a4f045 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file)
kmem_cache_free(ceph_file_cachep, cf);
/* wake up anyone waiting for caps on this inode */
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
return 0;
}
@@ -317,7 +317,7 @@ void ceph_release_page_vector(struct page **pages, int num_pages)
/*
* allocate a vector new pages
*/
-struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
+static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
{
struct page **pages;
int i;
@@ -665,7 +665,7 @@ more:
* throw out any page cache pages in this range. this
* may block.
*/
- truncate_inode_pages_range(inode->i_mapping, pos,
+ truncate_inode_pages_range(inode->i_mapping, pos,
(pos+len) | (PAGE_CACHE_SIZE-1));
} else {
pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
@@ -740,28 +740,32 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
+ struct ceph_file_info *fi = filp->private_data;
loff_t *ppos = &iocb->ki_pos;
size_t len = iov->iov_len;
struct inode *inode = filp->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
- void *base = iov->iov_base;
+ void __user *base = iov->iov_base;
ssize_t ret;
- int got = 0;
+ int want, got = 0;
int checkeof = 0, read = 0;
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), pos, (unsigned)len, inode);
again:
__ceph_do_pending_vmtruncate(inode);
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE,
- &got, -1);
+ if (fi->fmode & CEPH_FILE_MODE_LAZY)
+ want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
+ else
+ want = CEPH_CAP_FILE_CACHE;
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
if (ret < 0)
goto out;
dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), pos, (unsigned)len,
ceph_cap_string(got));
- if ((got & CEPH_CAP_FILE_CACHE) == 0 ||
+ if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_filp->f_flags & O_DIRECT) ||
(inode->i_sb->s_flags & MS_SYNCHRONOUS))
/* hmm, this isn't really async... */
@@ -807,11 +811,12 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_dentry->d_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc;
loff_t endoff = pos + iov->iov_len;
- int got = 0;
+ int want, got = 0;
int ret, err;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -824,8 +829,11 @@ retry_snap:
dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
inode->i_size);
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
- &got, endoff);
+ if (fi->fmode & CEPH_FILE_MODE_LAZY)
+ want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
+ else
+ want = CEPH_CAP_FILE_BUFFER;
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
if (ret < 0)
goto out;
@@ -833,7 +841,7 @@ retry_snap:
inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
ceph_cap_string(got));
- if ((got & CEPH_CAP_FILE_BUFFER) == 0 ||
+ if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_filp->f_flags & O_DIRECT) ||
(inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
@@ -930,6 +938,8 @@ const struct file_operations ceph_file_fops = {
.aio_write = ceph_aio_write,
.mmap = ceph_mmap,
.fsync = ceph_fsync,
+ .lock = ceph_lock,
+ .flock = ceph_flock,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.unlocked_ioctl = ceph_ioctl,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 226f5a50d36..5d893d31e39 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -442,8 +442,9 @@ int ceph_fill_file_size(struct inode *inode, int issued,
* the file is either opened or mmaped
*/
if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
- CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
- CEPH_CAP_FILE_EXCL)) ||
+ CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
+ CEPH_CAP_FILE_EXCL|
+ CEPH_CAP_FILE_LAZYIO)) ||
mapping_mapped(inode->i_mapping) ||
__ceph_caps_file_wanted(ci)) {
ci->i_truncate_pending++;
@@ -827,7 +828,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
spin_lock(&dcache_lock);
spin_lock(&dn->d_lock);
- list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
+ list_move(&dn->d_u.d_child, &dir->d_subdirs);
dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
dn->d_u.d_child.prev, dn->d_u.d_child.next);
spin_unlock(&dn->d_lock);
@@ -854,8 +855,8 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
d_drop(dn);
realdn = d_materialise_unique(dn, in);
if (IS_ERR(realdn)) {
- pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
- dn, in, ceph_vinop(in));
+ pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
+ PTR_ERR(realdn), dn, in, ceph_vinop(in));
if (prehash)
*prehash = false; /* don't rehash on error */
dn = realdn; /* note realdn contains the error */
@@ -1199,8 +1200,10 @@ retry_lookup:
goto out;
}
err = ceph_init_dentry(dn);
- if (err < 0)
+ if (err < 0) {
+ dput(dn);
goto out;
+ }
} else if (dn->d_inode &&
(ceph_ino(dn->d_inode) != vino.ino ||
ceph_snap(dn->d_inode) != vino.snap)) {
@@ -1234,18 +1237,23 @@ retry_lookup:
goto out;
}
dn = splice_dentry(dn, in, NULL);
+ if (IS_ERR(dn))
+ dn = NULL;
}
if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
- dput(dn);
- continue;
+ goto next_item;
}
- update_dentry_lease(dn, rinfo->dir_dlease[i],
- req->r_session, req->r_request_started);
- dput(dn);
+ if (dn)
+ update_dentry_lease(dn, rinfo->dir_dlease[i],
+ req->r_session,
+ req->r_request_started);
+next_item:
+ if (dn)
+ dput(dn);
}
req->r_did_prepopulate = true;
@@ -1494,7 +1502,7 @@ retry:
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
if (wake)
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index d085f07756b..76e307d2aba 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -143,6 +143,27 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return 0;
}
+static long ceph_ioctl_lazyio(struct file *file)
+{
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
+ spin_lock(&inode->i_lock);
+ ci->i_nr_by_mode[fi->fmode]--;
+ fi->fmode |= CEPH_FILE_MODE_LAZY;
+ ci->i_nr_by_mode[fi->fmode]++;
+ spin_unlock(&inode->i_lock);
+ dout("ioctl_layzio: file %p marked lazy\n", file);
+
+ ceph_check_caps(ci, 0, NULL);
+ } else {
+ dout("ioctl_layzio: file %p already lazy\n", file);
+ }
+ return 0;
+}
+
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
@@ -155,6 +176,9 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case CEPH_IOC_GET_DATALOC:
return ceph_ioctl_get_dataloc(file, (void __user *)arg);
+
+ case CEPH_IOC_LAZYIO:
+ return ceph_ioctl_lazyio(file);
}
return -ENOTTY;
}
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index 25e4f1a9d05..88451a3b685 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -37,4 +37,6 @@ struct ceph_ioctl_dataloc {
#define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3, \
struct ceph_ioctl_dataloc)
+#define CEPH_IOC_LAZYIO _IO(CEPH_IOCTL_MAGIC, 4)
+
#endif
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
new file mode 100644
index 00000000000..ae85af06454
--- /dev/null
+++ b/fs/ceph/locks.c
@@ -0,0 +1,256 @@
+#include "ceph_debug.h"
+
+#include <linux/file.h>
+#include <linux/namei.h>
+
+#include "super.h"
+#include "mds_client.h"
+#include "pagelist.h"
+
+/**
+ * Implement fcntl and flock locking functions.
+ */
+static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
+ u64 pid, u64 pid_ns,
+ int cmd, u64 start, u64 length, u8 wait)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_mds_client *mdsc =
+ &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+
+ dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
+ "length: %llu, wait: %d, type`: %d", (int)lock_type,
+ (int)operation, pid, start, length, wait, cmd);
+
+ req->r_args.filelock_change.rule = lock_type;
+ req->r_args.filelock_change.type = cmd;
+ req->r_args.filelock_change.pid = cpu_to_le64(pid);
+ /* This should be adjusted, but I'm not sure if
+ namespaces actually get id numbers*/
+ req->r_args.filelock_change.pid_namespace =
+ cpu_to_le64((u64)pid_ns);
+ req->r_args.filelock_change.start = cpu_to_le64(start);
+ req->r_args.filelock_change.length = cpu_to_le64(length);
+ req->r_args.filelock_change.wait = wait;
+
+ err = ceph_mdsc_do_request(mdsc, inode, req);
+ ceph_mdsc_put_request(req);
+ dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
+ "length: %llu, wait: %d, type`: %d err code %d", (int)lock_type,
+ (int)operation, pid, start, length, wait, cmd, err);
+ return err;
+}
+
+/**
+ * Attempt to set an fcntl lock.
+ * For now, this just goes away to the server. Later it may be more awesome.
+ */
+int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
+{
+ u64 length;
+ u8 lock_cmd;
+ int err;
+ u8 wait = 0;
+ u16 op = CEPH_MDS_OP_SETFILELOCK;
+
+ fl->fl_nspid = get_pid(task_tgid(current));
+ dout("ceph_lock, fl_pid:%d", fl->fl_pid);
+
+ /* set wait bit as appropriate, then make command as Ceph expects it*/
+ if (F_SETLKW == cmd)
+ wait = 1;
+ if (F_GETLK == cmd)
+ op = CEPH_MDS_OP_GETFILELOCK;
+
+ if (F_RDLCK == fl->fl_type)
+ lock_cmd = CEPH_LOCK_SHARED;
+ else if (F_WRLCK == fl->fl_type)
+ lock_cmd = CEPH_LOCK_EXCL;
+ else
+ lock_cmd = CEPH_LOCK_UNLOCK;
+
+ if (LLONG_MAX == fl->fl_end)
+ length = 0;
+ else
+ length = fl->fl_end - fl->fl_start + 1;
+
+ err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+ (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ lock_cmd, fl->fl_start,
+ length, wait);
+ if (!err) {
+ dout("mds locked, locking locally");
+ err = posix_lock_file(file, fl, NULL);
+ if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
+ /* undo! This should only happen if the kernel detects
+ * local deadlock. */
+ ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
+ (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ CEPH_LOCK_UNLOCK, fl->fl_start,
+ length, 0);
+ dout("got %d on posix_lock_file, undid lock", err);
+ }
+ } else {
+ dout("mds returned error code %d", err);
+ }
+ return err;
+}
+
+int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
+{
+ u64 length;
+ u8 lock_cmd;
+ int err;
+ u8 wait = 1;
+
+ fl->fl_nspid = get_pid(task_tgid(current));
+ dout("ceph_flock, fl_pid:%d", fl->fl_pid);
+
+ /* set wait bit, then clear it out of cmd*/
+ if (cmd & LOCK_NB)
+ wait = 0;
+ cmd = cmd & (LOCK_SH | LOCK_EX | LOCK_UN);
+ /* set command sequence that Ceph wants to see:
+ shared lock, exclusive lock, or unlock */
+ if (LOCK_SH == cmd)
+ lock_cmd = CEPH_LOCK_SHARED;
+ else if (LOCK_EX == cmd)
+ lock_cmd = CEPH_LOCK_EXCL;
+ else
+ lock_cmd = CEPH_LOCK_UNLOCK;
+ /* mds requires start and length rather than start and end */
+ if (LLONG_MAX == fl->fl_end)
+ length = 0;
+ else
+ length = fl->fl_end - fl->fl_start + 1;
+
+ err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
+ file, (u64)fl->fl_pid, (u64)fl->fl_nspid,
+ lock_cmd, fl->fl_start,
+ length, wait);
+ if (!err) {
+ err = flock_lock_file_wait(file, fl);
+ if (err) {
+ ceph_lock_message(CEPH_LOCK_FLOCK,
+ CEPH_MDS_OP_SETFILELOCK,
+ file, (u64)fl->fl_pid,
+ (u64)fl->fl_nspid,
+ CEPH_LOCK_UNLOCK, fl->fl_start,
+ length, 0);
+ dout("got %d on flock_lock_file_wait, undid lock", err);
+ }
+ } else {
+ dout("mds error code %d", err);
+ }
+ return err;
+}
+
+/**
+ * Must be called with BKL already held. Fills in the passed
+ * counter variables, so you can prepare pagelist metadata before calling
+ * ceph_encode_locks.
+ */
+void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
+{
+ struct file_lock *lock;
+
+ *fcntl_count = 0;
+ *flock_count = 0;
+
+ for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
+ if (lock->fl_flags & FL_POSIX)
+ ++(*fcntl_count);
+ else if (lock->fl_flags & FL_FLOCK)
+ ++(*flock_count);
+ }
+ dout("counted %d flock locks and %d fcntl locks",
+ *flock_count, *fcntl_count);
+}
+
+/**
+ * Encode the flock and fcntl locks for the given inode into the pagelist.
+ * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
+ * sequential flock locks.
+ * Must be called with BLK already held, and the lock numbers should have
+ * been gathered under the same lock holding window.
+ */
+int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist,
+ int num_fcntl_locks, int num_flock_locks)
+{
+ struct file_lock *lock;
+ struct ceph_filelock cephlock;
+ int err = 0;
+
+ dout("encoding %d flock and %d fcntl locks", num_flock_locks,
+ num_fcntl_locks);
+ err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32));
+ if (err)
+ goto fail;
+ for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
+ if (lock->fl_flags & FL_POSIX) {
+ err = lock_to_ceph_filelock(lock, &cephlock);
+ if (err)
+ goto fail;
+ err = ceph_pagelist_append(pagelist, &cephlock,
+ sizeof(struct ceph_filelock));
+ }
+ if (err)
+ goto fail;
+ }
+
+ err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32));
+ if (err)
+ goto fail;
+ for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
+ if (lock->fl_flags & FL_FLOCK) {
+ err = lock_to_ceph_filelock(lock, &cephlock);
+ if (err)
+ goto fail;
+ err = ceph_pagelist_append(pagelist, &cephlock,
+ sizeof(struct ceph_filelock));
+ }
+ if (err)
+ goto fail;
+ }
+fail:
+ return err;
+}
+
+/*
+ * Given a pointer to a lock, convert it to a ceph filelock
+ */
+int lock_to_ceph_filelock(struct file_lock *lock,
+ struct ceph_filelock *cephlock)
+{
+ int err = 0;
+
+ cephlock->start = cpu_to_le64(lock->fl_start);
+ cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
+ cephlock->client = cpu_to_le64(0);
+ cephlock->pid = cpu_to_le64(lock->fl_pid);
+ cephlock->pid_namespace = cpu_to_le64((u64)lock->fl_nspid);
+
+ switch (lock->fl_type) {
+ case F_RDLCK:
+ cephlock->type = CEPH_LOCK_SHARED;
+ break;
+ case F_WRLCK:
+ cephlock->type = CEPH_LOCK_EXCL;
+ break;
+ case F_UNLCK:
+ cephlock->type = CEPH_LOCK_UNLOCK;
+ break;
+ default:
+ dout("Have unknown lock type %d", lock->fl_type);
+ err = -EINVAL;
+ }
+
+ return err;
+}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index b49f12822cb..a75ddbf9fe3 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3,6 +3,7 @@
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/smp_lock.h>
#include "mds_client.h"
#include "mon_client.h"
@@ -37,6 +38,11 @@
* are no longer valid.
*/
+struct ceph_reconnect_state {
+ struct ceph_pagelist *pagelist;
+ bool flock;
+};
+
static void __wake_requests(struct ceph_mds_client *mdsc,
struct list_head *head);
@@ -449,7 +455,7 @@ void ceph_mdsc_release_request(struct kref *kref)
kfree(req->r_path1);
kfree(req->r_path2);
put_request_session(req);
- ceph_unreserve_caps(&req->r_caps_reservation);
+ ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
kfree(req);
}
@@ -512,7 +518,8 @@ static void __register_request(struct ceph_mds_client *mdsc,
{
req->r_tid = ++mdsc->last_tid;
if (req->r_num_caps)
- ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
+ ceph_reserve_caps(mdsc, &req->r_caps_reservation,
+ req->r_num_caps);
dout("__register_request %p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
__insert_request(mdsc, req);
@@ -704,6 +711,51 @@ static int __open_session(struct ceph_mds_client *mdsc,
}
/*
+ * open sessions for any export targets for the given mds
+ *
+ * called under mdsc->mutex
+ */
+static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_info *mi;
+ struct ceph_mds_session *ts;
+ int i, mds = session->s_mds;
+ int target;
+
+ if (mds >= mdsc->mdsmap->m_max_mds)
+ return;
+ mi = &mdsc->mdsmap->m_info[mds];
+ dout("open_export_target_sessions for mds%d (%d targets)\n",
+ session->s_mds, mi->num_export_targets);
+
+ for (i = 0; i < mi->num_export_targets; i++) {
+ target = mi->export_targets[i];
+ ts = __ceph_lookup_mds_session(mdsc, target);
+ if (!ts) {
+ ts = register_session(mdsc, target);
+ if (IS_ERR(ts))
+ return;
+ }
+ if (session->s_state == CEPH_MDS_SESSION_NEW ||
+ session->s_state == CEPH_MDS_SESSION_CLOSING)
+ __open_session(mdsc, session);
+ else
+ dout(" mds%d target mds%d %p is %s\n", session->s_mds,
+ i, ts, session_state_name(ts->s_state));
+ ceph_put_mds_session(ts);
+ }
+}
+
+void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ mutex_lock(&mdsc->mutex);
+ __open_export_target_sessions(mdsc, session);
+ mutex_unlock(&mdsc->mutex);
+}
+
+/*
* session caps
*/
@@ -764,7 +816,7 @@ static int iterate_session_caps(struct ceph_mds_session *session,
last_inode = NULL;
}
if (old_cap) {
- ceph_put_cap(old_cap);
+ ceph_put_cap(session->s_mdsc, old_cap);
old_cap = NULL;
}
@@ -793,7 +845,7 @@ out:
if (last_inode)
iput(last_inode);
if (old_cap)
- ceph_put_cap(old_cap);
+ ceph_put_cap(session->s_mdsc, old_cap);
return ret;
}
@@ -868,7 +920,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = ceph_inode(inode);
- wake_up(&ci->i_cap_wq);
+ wake_up_all(&ci->i_cap_wq);
if (arg) {
spin_lock(&inode->i_lock);
ci->i_wanted_max_size = 0;
@@ -1066,16 +1118,17 @@ static int trim_caps(struct ceph_mds_client *mdsc,
*
* Called under s_mutex.
*/
-static int add_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session,
- int extra)
+int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
{
- struct ceph_msg *msg;
+ struct ceph_msg *msg, *partial = NULL;
struct ceph_mds_cap_release *head;
int err = -ENOMEM;
+ int extra = mdsc->client->mount_args->cap_release_safety;
+ int num;
- if (extra < 0)
- extra = mdsc->client->mount_args->cap_release_safety;
+ dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
+ extra);
spin_lock(&session->s_cap_lock);
@@ -1084,9 +1137,14 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_msg,
list_head);
head = msg->front.iov_base;
- extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
+ num = le32_to_cpu(head->num);
+ if (num) {
+ dout(" partial %p with (%d/%d)\n", msg, num,
+ (int)CEPH_CAPS_PER_RELEASE);
+ extra += CEPH_CAPS_PER_RELEASE - num;
+ partial = msg;
+ }
}
-
while (session->s_num_cap_releases < session->s_nr_caps + extra) {
spin_unlock(&session->s_cap_lock);
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
@@ -1103,19 +1161,14 @@ static int add_cap_releases(struct ceph_mds_client *mdsc,
session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
}
- if (!list_empty(&session->s_cap_releases)) {
- msg = list_first_entry(&session->s_cap_releases,
- struct ceph_msg,
- list_head);
- head = msg->front.iov_base;
- if (head->num) {
- dout(" queueing non-full %p (%d)\n", msg,
- le32_to_cpu(head->num));
- list_move_tail(&msg->list_head,
- &session->s_cap_releases_done);
- session->s_num_cap_releases -=
- CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
- }
+ if (partial) {
+ head = partial->front.iov_base;
+ num = le32_to_cpu(head->num);
+ dout(" queueing partial %p with %d/%d\n", partial, num,
+ (int)CEPH_CAPS_PER_RELEASE);
+ list_move_tail(&partial->list_head,
+ &session->s_cap_releases_done);
+ session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num;
}
err = 0;
spin_unlock(&session->s_cap_lock);
@@ -1176,8 +1229,8 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
/*
* called under s_mutex
*/
-static void send_cap_releases(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
+void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
{
struct ceph_msg *msg;
@@ -1250,6 +1303,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
return ERR_PTR(-ENOMEM);
mutex_init(&req->r_fill_mutex);
+ req->r_mdsc = mdsc;
req->r_started = jiffies;
req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
@@ -1514,6 +1568,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
ceph_encode_filepath(&p, end, ino1, path1);
ceph_encode_filepath(&p, end, ino2, path2);
+ /* make note of release offset, in case we need to replay */
+ req->r_request_release_offset = p - msg->front.iov_base;
+
/* cap releases */
releases = 0;
if (req->r_inode_drop)
@@ -1561,7 +1618,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
if (req->r_callback)
req->r_callback(mdsc, req);
else
- complete(&req->r_completion);
+ complete_all(&req->r_completion);
}
/*
@@ -1577,9 +1634,44 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
req->r_mds = mds;
req->r_attempts++;
+ if (req->r_inode) {
+ struct ceph_cap *cap =
+ ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
+
+ if (cap)
+ req->r_sent_on_mseq = cap->mseq;
+ else
+ req->r_sent_on_mseq = -1;
+ }
dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
+ if (req->r_got_unsafe) {
+ /*
+ * Replay. Do not regenerate message (and rebuild
+ * paths, etc.); just use the original message.
+ * Rebuilding paths will break for renames because
+ * d_move mangles the src name.
+ */
+ msg = req->r_request;
+ rhead = msg->front.iov_base;
+
+ flags = le32_to_cpu(rhead->flags);
+ flags |= CEPH_MDS_FLAG_REPLAY;
+ rhead->flags = cpu_to_le32(flags);
+
+ if (req->r_target_inode)
+ rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+
+ rhead->num_retry = req->r_attempts - 1;
+
+ /* remove cap/dentry releases from message */
+ rhead->num_releases = 0;
+ msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset);
+ msg->front.iov_len = req->r_request_release_offset;
+ return 0;
+ }
+
if (req->r_request) {
ceph_msg_put(req->r_request);
req->r_request = NULL;
@@ -1601,13 +1693,9 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
rhead->flags = cpu_to_le32(flags);
rhead->num_fwd = req->r_num_fwd;
rhead->num_retry = req->r_attempts - 1;
+ rhead->ino = 0;
dout(" r_locked_dir = %p\n", req->r_locked_dir);
-
- if (req->r_target_inode && req->r_got_unsafe)
- rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
- else
- rhead->ino = 0;
return 0;
}
@@ -1889,25 +1977,44 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
result = le32_to_cpu(head->result);
/*
- * Tolerate 2 consecutive ESTALEs from the same mds.
- * FIXME: we should be looking at the cap migrate_seq.
+ * Handle an ESTALE
+ * if we're not talking to the authority, send to them
+ * if the authority has changed while we weren't looking,
+ * send to new authority
+ * Otherwise we just have to return an ESTALE
*/
if (result == -ESTALE) {
- req->r_direct_mode = USE_AUTH_MDS;
- req->r_num_stale++;
- if (req->r_num_stale <= 2) {
+ dout("got ESTALE on request %llu", req->r_tid);
+ if (!req->r_inode) {
+ /* do nothing; not an authority problem */
+ } else if (req->r_direct_mode != USE_AUTH_MDS) {
+ dout("not using auth, setting for that now");
+ req->r_direct_mode = USE_AUTH_MDS;
__do_request(mdsc, req);
mutex_unlock(&mdsc->mutex);
goto out;
+ } else {
+ struct ceph_inode_info *ci = ceph_inode(req->r_inode);
+ struct ceph_cap *cap =
+ ceph_get_cap_for_mds(ci, req->r_mds);;
+
+ dout("already using auth");
+ if ((!cap || cap != ci->i_auth_cap) ||
+ (cap->mseq != req->r_sent_on_mseq)) {
+ dout("but cap changed, so resending");
+ __do_request(mdsc, req);
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
}
- } else {
- req->r_num_stale = 0;
+ dout("have to return ESTALE on request %llu", req->r_tid);
}
+
if (head->safe) {
req->r_got_safe = true;
__unregister_request(mdsc, req);
- complete(&req->r_safe_completion);
+ complete_all(&req->r_safe_completion);
if (req->r_got_unsafe) {
/*
@@ -1922,7 +2029,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
/* last unsafe request during umount? */
if (mdsc->stopping && !__get_oldest_req(mdsc))
- complete(&mdsc->safe_umount_waiters);
+ complete_all(&mdsc->safe_umount_waiters);
mutex_unlock(&mdsc->mutex);
goto out;
}
@@ -1960,7 +2067,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (err == 0) {
if (result == 0 && rinfo->dir_nr)
ceph_readdir_prepopulate(req, req->r_session);
- ceph_unreserve_caps(&req->r_caps_reservation);
+ ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
}
mutex_unlock(&req->r_fill_mutex);
@@ -1980,7 +2087,7 @@ out_err:
}
mutex_unlock(&mdsc->mutex);
- add_cap_releases(mdsc, req->r_session, -1);
+ ceph_add_cap_releases(mdsc, req->r_session);
mutex_unlock(&session->s_mutex);
/* kick calling process */
@@ -2101,7 +2208,7 @@ static void handle_session(struct ceph_mds_session *session,
pr_info("mds%d reconnect denied\n", session->s_mds);
remove_session_caps(session);
wake = 1; /* for good measure */
- complete(&mdsc->session_close_waiters);
+ complete_all(&mdsc->session_close_waiters);
kick_requests(mdsc, mds);
break;
@@ -2168,9 +2275,14 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
void *arg)
{
- struct ceph_mds_cap_reconnect rec;
+ union {
+ struct ceph_mds_cap_reconnect v2;
+ struct ceph_mds_cap_reconnect_v1 v1;
+ } rec;
+ size_t reclen;
struct ceph_inode_info *ci;
- struct ceph_pagelist *pagelist = arg;
+ struct ceph_reconnect_state *recon_state = arg;
+ struct ceph_pagelist *pagelist = recon_state->pagelist;
char *path;
int pathlen, err;
u64 pathbase;
@@ -2203,17 +2315,44 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_lock(&inode->i_lock);
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
- rec.cap_id = cpu_to_le64(cap->cap_id);
- rec.pathbase = cpu_to_le64(pathbase);
- rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
- rec.issued = cpu_to_le32(cap->issued);
- rec.size = cpu_to_le64(inode->i_size);
- ceph_encode_timespec(&rec.mtime, &inode->i_mtime);
- ceph_encode_timespec(&rec.atime, &inode->i_atime);
- rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+
+ if (recon_state->flock) {
+ rec.v2.cap_id = cpu_to_le64(cap->cap_id);
+ rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ rec.v2.issued = cpu_to_le32(cap->issued);
+ rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+ rec.v2.pathbase = cpu_to_le64(pathbase);
+ rec.v2.flock_len = 0;
+ reclen = sizeof(rec.v2);
+ } else {
+ rec.v1.cap_id = cpu_to_le64(cap->cap_id);
+ rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ rec.v1.issued = cpu_to_le32(cap->issued);
+ rec.v1.size = cpu_to_le64(inode->i_size);
+ ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
+ ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
+ rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+ rec.v1.pathbase = cpu_to_le64(pathbase);
+ reclen = sizeof(rec.v1);
+ }
spin_unlock(&inode->i_lock);
- err = ceph_pagelist_append(pagelist, &rec, sizeof(rec));
+ if (recon_state->flock) {
+ int num_fcntl_locks, num_flock_locks;
+
+ lock_kernel();
+ ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
+ rec.v2.flock_len = (2*sizeof(u32) +
+ (num_fcntl_locks+num_flock_locks) *
+ sizeof(struct ceph_filelock));
+
+ err = ceph_pagelist_append(pagelist, &rec, reclen);
+ if (!err)
+ err = ceph_encode_locks(inode, pagelist,
+ num_fcntl_locks,
+ num_flock_locks);
+ unlock_kernel();
+ }
out:
kfree(path);
@@ -2242,6 +2381,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
int mds = session->s_mds;
int err = -ENOMEM;
struct ceph_pagelist *pagelist;
+ struct ceph_reconnect_state recon_state;
pr_info("mds%d reconnect start\n", mds);
@@ -2276,7 +2416,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
if (err)
goto fail;
- err = iterate_session_caps(session, encode_caps_cb, pagelist);
+
+ recon_state.pagelist = pagelist;
+ recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
+ err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
@@ -2301,6 +2444,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
}
reply->pagelist = pagelist;
+ if (recon_state.flock)
+ reply->hdr.version = cpu_to_le16(2);
reply->hdr.data_len = cpu_to_le32(pagelist->length);
reply->nr_pages = calc_pages_for(0, pagelist->length);
ceph_con_send(&session->s_con, reply);
@@ -2351,9 +2496,11 @@ static void check_new_map(struct ceph_mds_client *mdsc,
oldstate = ceph_mdsmap_get_state(oldmap, i);
newstate = ceph_mdsmap_get_state(newmap, i);
- dout("check_new_map mds%d state %s -> %s (session %s)\n",
+ dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
i, ceph_mds_state_name(oldstate),
+ ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
ceph_mds_state_name(newstate),
+ ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
session_state_name(s->s_state));
if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
@@ -2403,6 +2550,21 @@ static void check_new_map(struct ceph_mds_client *mdsc,
wake_up_session_caps(s, 1);
}
}
+
+ for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
+ s = mdsc->sessions[i];
+ if (!s)
+ continue;
+ if (!ceph_mdsmap_is_laggy(newmap, i))
+ continue;
+ if (s->s_state == CEPH_MDS_SESSION_OPEN ||
+ s->s_state == CEPH_MDS_SESSION_HUNG ||
+ s->s_state == CEPH_MDS_SESSION_CLOSING) {
+ dout(" connecting to export targets of laggy mds%d\n",
+ i);
+ __open_export_target_sessions(mdsc, s);
+ }
+ }
}
@@ -2433,6 +2595,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
struct ceph_dentry_info *di;
int mds = session->s_mds;
struct ceph_mds_lease *h = msg->front.iov_base;
+ u32 seq;
struct ceph_vino vino;
int mask;
struct qstr dname;
@@ -2446,6 +2609,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
mask = le16_to_cpu(h->mask);
+ seq = le32_to_cpu(h->seq);
dname.name = (void *)h + sizeof(*h) + sizeof(u32);
dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
if (dname.len != get_unaligned_le32(h+1))
@@ -2456,8 +2620,9 @@ static void handle_lease(struct ceph_mds_client *mdsc,
/* lookup inode */
inode = ceph_find_inode(sb, vino);
- dout("handle_lease '%s', mask %d, ino %llx %p\n",
- ceph_lease_op_name(h->action), mask, vino.ino, inode);
+ dout("handle_lease %s, mask %d, ino %llx %p %.*s\n",
+ ceph_lease_op_name(h->action), mask, vino.ino, inode,
+ dname.len, dname.name);
if (inode == NULL) {
dout("handle_lease no inode %llx\n", vino.ino);
goto release;
@@ -2482,7 +2647,8 @@ static void handle_lease(struct ceph_mds_client *mdsc,
switch (h->action) {
case CEPH_MDS_LEASE_REVOKE:
if (di && di->lease_session == session) {
- h->seq = cpu_to_le32(di->lease_seq);
+ if (ceph_seq_cmp(di->lease_seq, seq) > 0)
+ h->seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
}
release = 1;
@@ -2496,7 +2662,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
unsigned long duration =
le32_to_cpu(h->duration_ms) * HZ / 1000;
- di->lease_seq = le32_to_cpu(h->seq);
+ di->lease_seq = seq;
dentry->d_time = di->lease_renew_from + duration;
di->lease_renew_after = di->lease_renew_from +
(duration >> 1);
@@ -2686,10 +2852,10 @@ static void delayed_work(struct work_struct *work)
send_renew_caps(mdsc, s);
else
ceph_con_keepalive(&s->s_con);
- add_cap_releases(mdsc, s, -1);
+ ceph_add_cap_releases(mdsc, s);
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
s->s_state == CEPH_MDS_SESSION_HUNG)
- send_cap_releases(mdsc, s);
+ ceph_send_cap_releases(mdsc, s);
mutex_unlock(&s->s_mutex);
ceph_put_mds_session(s);
@@ -2735,6 +2901,9 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
spin_lock_init(&mdsc->dentry_lru_lock);
INIT_LIST_HEAD(&mdsc->dentry_lru);
+ ceph_caps_init(mdsc);
+ ceph_adjust_min_caps(mdsc, client->min_caps);
+
return 0;
}
@@ -2779,6 +2948,12 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
drop_leases(mdsc);
ceph_flush_dirty_caps(mdsc);
wait_requests(mdsc);
+
+ /*
+ * wait for reply handlers to drop their request refs and
+ * their inode/dcache refs
+ */
+ ceph_msgr_flush();
}
/*
@@ -2924,6 +3099,7 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
if (mdsc->mdsmap)
ceph_mdsmap_destroy(mdsc->mdsmap);
kfree(mdsc->sessions);
+ ceph_caps_finalize(mdsc);
}
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index d9936c4f121..ab7e89f5e34 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -151,6 +151,7 @@ typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
struct ceph_mds_request {
u64 r_tid; /* transaction id */
struct rb_node r_node;
+ struct ceph_mds_client *r_mdsc;
int r_op; /* mds op code */
int r_mds;
@@ -188,6 +189,7 @@ struct ceph_mds_request {
int r_old_inode_drop, r_old_inode_unless;
struct ceph_msg *r_request; /* original request */
+ int r_request_release_offset;
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
int r_err;
@@ -206,8 +208,8 @@ struct ceph_mds_request {
int r_attempts; /* resend attempts */
int r_num_fwd; /* number of forward attempts */
- int r_num_stale;
int r_resend_mds; /* mds to resend to next, if any*/
+ u32 r_sent_on_mseq; /* cap mseq request was sent at*/
struct kref r_kref;
struct list_head r_wait;
@@ -266,6 +268,27 @@ struct ceph_mds_client {
spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq;
+ /*
+ * Cap reservations
+ *
+ * Maintain a global pool of preallocated struct ceph_caps, referenced
+ * by struct ceph_caps_reservations. This ensures that we preallocate
+ * memory needed to successfully process an MDS response. (If an MDS
+ * sends us cap information and we fail to process it, we will have
+ * problems due to the client and MDS being out of sync.)
+ *
+ * Reservations are 'owned' by a ceph_cap_reservation context.
+ */
+ spinlock_t caps_list_lock;
+ struct list_head caps_list; /* unused (reserved or
+ unreserved) */
+ int caps_total_count; /* total caps allocated */
+ int caps_use_count; /* in use */
+ int caps_reserve_count; /* unused, reserved */
+ int caps_avail_count; /* unused, unreserved */
+ int caps_min_count; /* keep at least this many
+ (unreserved) */
+
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_file;
#endif
@@ -322,6 +345,11 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
kref_put(&req->r_kref, ceph_mdsc_release_request);
}
+extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+
extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
@@ -336,4 +364,7 @@ extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
struct ceph_msg *msg);
+extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+
#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index c4c498e6dfe..040be6d1150 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -85,6 +85,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
struct ceph_entity_addr addr;
u32 num_export_targets;
void *pexport_targets = NULL;
+ struct ceph_timespec laggy_since;
ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad);
global_id = ceph_decode_64(p);
@@ -103,7 +104,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
state_seq = ceph_decode_64(p);
ceph_decode_copy(p, &addr, sizeof(addr));
ceph_decode_addr(&addr);
- *p += sizeof(struct ceph_timespec);
+ ceph_decode_copy(p, &laggy_since, sizeof(laggy_since));
*p += sizeof(u32);
ceph_decode_32_safe(p, end, namelen, bad);
*p += namelen;
@@ -122,6 +123,9 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_info[mds].global_id = global_id;
m->m_info[mds].state = state;
m->m_info[mds].addr = addr;
+ m->m_info[mds].laggy =
+ (laggy_since.tv_sec != 0 ||
+ laggy_since.tv_nsec != 0);
m->m_info[mds].num_export_targets = num_export_targets;
if (num_export_targets) {
m->m_info[mds].export_targets =
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
index eacc131aa5c..4c5cb0880bb 100644
--- a/fs/ceph/mdsmap.h
+++ b/fs/ceph/mdsmap.h
@@ -13,6 +13,7 @@ struct ceph_mds_info {
struct ceph_entity_addr addr;
s32 state;
int num_export_targets;
+ bool laggy;
u32 *export_targets;
};
@@ -47,6 +48,13 @@ static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
return m->m_info[w].state;
}
+static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+{
+ if (w >= 0 && w < m->m_max_mds)
+ return m->m_info[w].laggy;
+ return false;
+}
+
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 64b8b1f7863..2502d76fcec 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -43,7 +43,8 @@ static void ceph_fault(struct ceph_connection *con);
* nicely render a sockaddr as a string.
*/
#define MAX_ADDR_STR 20
-static char addr_str[MAX_ADDR_STR][40];
+#define MAX_ADDR_STR_LEN 60
+static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
static DEFINE_SPINLOCK(addr_str_lock);
static int last_addr_str;
@@ -52,7 +53,6 @@ const char *pr_addr(const struct sockaddr_storage *ss)
int i;
char *s;
struct sockaddr_in *in4 = (void *)ss;
- unsigned char *quad = (void *)&in4->sin_addr.s_addr;
struct sockaddr_in6 *in6 = (void *)ss;
spin_lock(&addr_str_lock);
@@ -64,25 +64,13 @@ const char *pr_addr(const struct sockaddr_storage *ss)
switch (ss->ss_family) {
case AF_INET:
- sprintf(s, "%u.%u.%u.%u:%u",
- (unsigned int)quad[0],
- (unsigned int)quad[1],
- (unsigned int)quad[2],
- (unsigned int)quad[3],
- (unsigned int)ntohs(in4->sin_port));
+ snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
+ (unsigned int)ntohs(in4->sin_port));
break;
case AF_INET6:
- sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
- in6->sin6_addr.s6_addr16[0],
- in6->sin6_addr.s6_addr16[1],
- in6->sin6_addr.s6_addr16[2],
- in6->sin6_addr.s6_addr16[3],
- in6->sin6_addr.s6_addr16[4],
- in6->sin6_addr.s6_addr16[5],
- in6->sin6_addr.s6_addr16[6],
- in6->sin6_addr.s6_addr16[7],
- (unsigned int)ntohs(in6->sin6_port));
+ snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
+ (unsigned int)ntohs(in6->sin6_port));
break;
default:
@@ -120,7 +108,7 @@ void ceph_msgr_exit(void)
destroy_workqueue(ceph_msgr_wq);
}
-void ceph_msgr_flush()
+void ceph_msgr_flush(void)
{
flush_workqueue(ceph_msgr_wq);
}
@@ -215,12 +203,13 @@ static void set_sock_callbacks(struct socket *sock,
*/
static struct socket *ceph_tcp_connect(struct ceph_connection *con)
{
- struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
+ struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
int ret;
BUG_ON(con->sock);
- ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
+ IPPROTO_TCP, &sock);
if (ret)
return ERR_PTR(ret);
con->sock = sock;
@@ -234,7 +223,8 @@ static struct socket *ceph_tcp_connect(struct ceph_connection *con)
dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
- ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
+ ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
+ O_NONBLOCK);
if (ret == -EINPROGRESS) {
dout("connect %s EINPROGRESS sk_state = %u\n",
pr_addr(&con->peer_addr.in_addr),
@@ -657,7 +647,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
- con->out_connect.features = CEPH_FEATURE_SUPPORTED_CLIENT;
+ con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
@@ -1009,19 +999,32 @@ int ceph_parse_ips(const char *c, const char *end,
struct sockaddr_in *in4 = (void *)ss;
struct sockaddr_in6 *in6 = (void *)ss;
int port;
+ char delim = ',';
+
+ if (*p == '[') {
+ delim = ']';
+ p++;
+ }
memset(ss, 0, sizeof(*ss));
if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
- ',', &ipend)) {
+ delim, &ipend))
ss->ss_family = AF_INET;
- } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
- ',', &ipend)) {
+ else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
+ delim, &ipend))
ss->ss_family = AF_INET6;
- } else {
+ else
goto bad;
- }
p = ipend;
+ if (delim == ']') {
+ if (*p != ']') {
+ dout("missing matching ']'\n");
+ goto bad;
+ }
+ p++;
+ }
+
/* port? */
if (p < end && *p == ':') {
port = 0;
@@ -1055,7 +1058,7 @@ int ceph_parse_ips(const char *c, const char *end,
return 0;
bad:
- pr_err("parse_ips bad ip '%s'\n", c);
+ pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
return -EINVAL;
}
@@ -1078,11 +1081,11 @@ static int process_banner(struct ceph_connection *con)
sizeof(con->peer_addr)) != 0 &&
!(addr_is_blank(&con->actual_peer_addr.in_addr) &&
con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
- pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
+ pr_warning("wrong peer, want %s/%d, got %s/%d\n",
pr_addr(&con->peer_addr.in_addr),
- le64_to_cpu(con->peer_addr.nonce),
+ (int)le32_to_cpu(con->peer_addr.nonce),
pr_addr(&con->actual_peer_addr.in_addr),
- le64_to_cpu(con->actual_peer_addr.nonce));
+ (int)le32_to_cpu(con->actual_peer_addr.nonce));
con->error_msg = "wrong peer at address";
return -1;
}
@@ -1120,8 +1123,8 @@ static void fail_protocol(struct ceph_connection *con)
static int process_connect(struct ceph_connection *con)
{
- u64 sup_feat = CEPH_FEATURE_SUPPORTED_CLIENT;
- u64 req_feat = CEPH_FEATURE_REQUIRED_CLIENT;
+ u64 sup_feat = CEPH_FEATURE_SUPPORTED;
+ u64 req_feat = CEPH_FEATURE_REQUIRED;
u64 server_feat = le64_to_cpu(con->in_reply.features);
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
@@ -1299,8 +1302,8 @@ static void process_ack(struct ceph_connection *con)
static int read_partial_message_section(struct ceph_connection *con,
- struct kvec *section, unsigned int sec_len,
- u32 *crc)
+ struct kvec *section,
+ unsigned int sec_len, u32 *crc)
{
int left;
int ret;
@@ -1396,10 +1399,12 @@ static int read_partial_message(struct ceph_connection *con)
if (!con->in_msg) {
dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
con->in_hdr.front_len, con->in_hdr.data_len);
+ skip = 0;
con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
+ BUG_ON(con->in_msg);
con->in_base_pos = -front_len - middle_len - data_len -
sizeof(m->footer);
con->in_tag = CEPH_MSGR_TAG_READY;
@@ -1429,7 +1434,8 @@ static int read_partial_message(struct ceph_connection *con)
/* middle */
if (m->middle) {
- ret = read_partial_message_section(con, &m->middle->vec, middle_len,
+ ret = read_partial_message_section(con, &m->middle->vec,
+ middle_len,
&con->in_middle_crc);
if (ret <= 0)
return ret;
@@ -1915,7 +1921,7 @@ out:
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
- */
+ */
if (con->auth_retry && con->ops->invalidate_authorizer) {
dout("calling invalidate_authorizer()\n");
con->ops->invalidate_authorizer(con);
@@ -2013,20 +2019,20 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
{
mutex_lock(&con->mutex);
if (!list_empty(&msg->list_head)) {
- dout("con_revoke %p msg %p\n", con, msg);
+ dout("con_revoke %p msg %p - was on queue\n", con, msg);
list_del_init(&msg->list_head);
ceph_msg_put(msg);
msg->hdr.seq = 0;
- if (con->out_msg == msg) {
- ceph_msg_put(con->out_msg);
- con->out_msg = NULL;
- }
+ }
+ if (con->out_msg == msg) {
+ dout("con_revoke %p msg %p - was sending\n", con, msg);
+ con->out_msg = NULL;
if (con->out_kvec_is_msg) {
con->out_skip = con->out_kvec_bytes;
con->out_kvec_is_msg = false;
}
- } else {
- dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
+ ceph_msg_put(msg);
+ msg->hdr.seq = 0;
}
mutex_unlock(&con->mutex);
}
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index 21c62e9b7d1..b2a5a3e4a67 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -345,11 +345,11 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
out:
mutex_unlock(&monc->mutex);
- wake_up(&client->auth_wq);
+ wake_up_all(&client->auth_wq);
}
/*
- * statfs
+ * generic requests (e.g., statfs, poolop)
*/
static struct ceph_mon_generic_request *__lookup_generic_req(
struct ceph_mon_client *monc, u64 tid)
@@ -400,6 +400,8 @@ static void release_generic_request(struct kref *kref)
ceph_msg_put(req->reply);
if (req->request)
ceph_msg_put(req->request);
+
+ kfree(req);
}
static void put_generic_request(struct ceph_mon_generic_request *req)
@@ -440,6 +442,35 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
return m;
}
+static int do_generic_request(struct ceph_mon_client *monc,
+ struct ceph_mon_generic_request *req)
+{
+ int err;
+
+ /* register request */
+ mutex_lock(&monc->mutex);
+ req->tid = ++monc->last_tid;
+ req->request->hdr.tid = cpu_to_le64(req->tid);
+ __insert_generic_request(monc, req);
+ monc->num_generic_requests++;
+ ceph_con_send(monc->con, ceph_msg_get(req->request));
+ mutex_unlock(&monc->mutex);
+
+ err = wait_for_completion_interruptible(&req->completion);
+
+ mutex_lock(&monc->mutex);
+ rb_erase(&req->node, &monc->generic_request_tree);
+ monc->num_generic_requests--;
+ mutex_unlock(&monc->mutex);
+
+ if (!err)
+ err = req->result;
+ return err;
+}
+
+/*
+ * statfs
+ */
static void handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
@@ -460,13 +491,13 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
}
mutex_unlock(&monc->mutex);
if (req) {
- complete(&req->completion);
+ complete_all(&req->completion);
put_generic_request(req);
}
return;
bad:
- pr_err("corrupt generic reply, no tid\n");
+ pr_err("corrupt generic reply, tid %llu\n", tid);
ceph_msg_dump(msg);
}
@@ -485,6 +516,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
kref_init(&req->kref);
req->buf = buf;
+ req->buf_len = sizeof(*buf);
init_completion(&req->completion);
err = -ENOMEM;
@@ -502,33 +534,134 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
- /* register request */
- mutex_lock(&monc->mutex);
- req->tid = ++monc->last_tid;
- req->request->hdr.tid = cpu_to_le64(req->tid);
- __insert_generic_request(monc, req);
- monc->num_generic_requests++;
- mutex_unlock(&monc->mutex);
+ err = do_generic_request(monc, req);
- /* send request and wait */
- ceph_con_send(monc->con, ceph_msg_get(req->request));
- err = wait_for_completion_interruptible(&req->completion);
+out:
+ kref_put(&req->kref, release_generic_request);
+ return err;
+}
+
+/*
+ * pool ops
+ */
+static int get_poolop_reply_buf(const char *src, size_t src_len,
+ char *dst, size_t dst_len)
+{
+ u32 buf_len;
+
+ if (src_len != sizeof(u32) + dst_len)
+ return -EINVAL;
+
+ buf_len = le32_to_cpu(*(u32 *)src);
+ if (buf_len != dst_len)
+ return -EINVAL;
+
+ memcpy(dst, src + sizeof(u32), dst_len);
+ return 0;
+}
+
+static void handle_poolop_reply(struct ceph_mon_client *monc,
+ struct ceph_msg *msg)
+{
+ struct ceph_mon_generic_request *req;
+ struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
+ u64 tid = le64_to_cpu(msg->hdr.tid);
+
+ if (msg->front.iov_len < sizeof(*reply))
+ goto bad;
+ dout("handle_poolop_reply %p tid %llu\n", msg, tid);
mutex_lock(&monc->mutex);
- rb_erase(&req->node, &monc->generic_request_tree);
- monc->num_generic_requests--;
+ req = __lookup_generic_req(monc, tid);
+ if (req) {
+ if (req->buf_len &&
+ get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
+ msg->front.iov_len - sizeof(*reply),
+ req->buf, req->buf_len) < 0) {
+ mutex_unlock(&monc->mutex);
+ goto bad;
+ }
+ req->result = le32_to_cpu(reply->reply_code);
+ get_generic_request(req);
+ }
mutex_unlock(&monc->mutex);
+ if (req) {
+ complete(&req->completion);
+ put_generic_request(req);
+ }
+ return;
- if (!err)
- err = req->result;
+bad:
+ pr_err("corrupt generic reply, tid %llu\n", tid);
+ ceph_msg_dump(msg);
+}
+
+/*
+ * Do a synchronous pool op.
+ */
+int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
+ u32 pool, u64 snapid,
+ char *buf, int len)
+{
+ struct ceph_mon_generic_request *req;
+ struct ceph_mon_poolop *h;
+ int err;
+
+ req = kzalloc(sizeof(*req), GFP_NOFS);
+ if (!req)
+ return -ENOMEM;
+
+ kref_init(&req->kref);
+ req->buf = buf;
+ req->buf_len = len;
+ init_completion(&req->completion);
+
+ err = -ENOMEM;
+ req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS);
+ if (!req->request)
+ goto out;
+ req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS);
+ if (!req->reply)
+ goto out;
+
+ /* fill out request */
+ req->request->hdr.version = cpu_to_le16(2);
+ h = req->request->front.iov_base;
+ h->monhdr.have_version = 0;
+ h->monhdr.session_mon = cpu_to_le16(-1);
+ h->monhdr.session_mon_tid = 0;
+ h->fsid = monc->monmap->fsid;
+ h->pool = cpu_to_le32(pool);
+ h->op = cpu_to_le32(op);
+ h->auid = 0;
+ h->snapid = cpu_to_le64(snapid);
+ h->name_len = 0;
+
+ err = do_generic_request(monc, req);
out:
kref_put(&req->kref, release_generic_request);
return err;
}
+int ceph_monc_create_snapid(struct ceph_mon_client *monc,
+ u32 pool, u64 *snapid)
+{
+ return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
+ pool, 0, (char *)snapid, sizeof(*snapid));
+
+}
+
+int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
+ u32 pool, u64 snapid)
+{
+ return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
+ pool, snapid, 0, 0);
+
+}
+
/*
- * Resend pending statfs requests.
+ * Resend pending generic requests.
*/
static void __resend_generic_request(struct ceph_mon_client *monc)
{
@@ -716,14 +849,15 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
monc->m_auth->front_max);
if (ret < 0) {
monc->client->auth_err = ret;
- wake_up(&monc->client->auth_wq);
+ wake_up_all(&monc->client->auth_wq);
} else if (ret > 0) {
__send_prepared_auth_request(monc, ret);
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
dout("authenticated, starting session\n");
monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
- monc->client->msgr->inst.name.num = monc->auth->global_id;
+ monc->client->msgr->inst.name.num =
+ cpu_to_le64(monc->auth->global_id);
__send_subscribe(monc);
__resend_generic_request(monc);
@@ -780,6 +914,10 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
handle_statfs_reply(monc, msg);
break;
+ case CEPH_MSG_POOLOP_REPLY:
+ handle_poolop_reply(monc, msg);
+ break;
+
case CEPH_MSG_MON_MAP:
ceph_monc_handle_map(monc, msg);
break;
@@ -817,6 +955,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
case CEPH_MSG_MON_SUBSCRIBE_ACK:
m = ceph_msg_get(monc->m_subscribe_ack);
break;
+ case CEPH_MSG_POOLOP_REPLY:
case CEPH_MSG_STATFS_REPLY:
return get_generic_reply(con, hdr, skip);
case CEPH_MSG_AUTH_REPLY:
diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h
index 174d794321d..8e396f2c096 100644
--- a/fs/ceph/mon_client.h
+++ b/fs/ceph/mon_client.h
@@ -50,6 +50,7 @@ struct ceph_mon_generic_request {
struct rb_node node;
int result;
void *buf;
+ int buf_len;
struct completion completion;
struct ceph_msg *request; /* original request */
struct ceph_msg *reply; /* and reply */
@@ -111,6 +112,10 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc);
extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
+extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
+ u32 pool, u64 *snapid);
+extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
+ u32 pool, u64 snapid);
#endif
diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h
index 892a0298dfd..680d3d648ca 100644
--- a/fs/ceph/msgr.h
+++ b/fs/ceph/msgr.h
@@ -1,5 +1,5 @@
-#ifndef __MSGR_H
-#define __MSGR_H
+#ifndef CEPH_MSGR_H
+#define CEPH_MSGR_H
/*
* Data types for message passing layer used by Ceph.
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index d25b4add85b..bed6391e52c 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
if (req->r_callback)
req->r_callback(req, msg);
else
- complete(&req->r_completion);
+ complete_all(&req->r_completion);
if (flags & CEPH_OSD_FLAG_ONDISK) {
if (req->r_safe_callback)
req->r_safe_callback(req, msg);
- complete(&req->r_safe_completion); /* fsync waiter */
+ complete_all(&req->r_safe_completion); /* fsync waiter */
}
done:
@@ -1083,7 +1083,7 @@ done:
if (newmap)
kick_requests(osdc, NULL);
up_read(&osdc->map_sem);
- wake_up(&osdc->client->auth_wq);
+ wake_up_all(&osdc->client->auth_wq);
return;
bad:
@@ -1276,8 +1276,6 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
/* it may be a short read due to an object boundary */
req->r_pages = pages;
- num_pages = calc_pages_for(off, *plen);
- req->r_num_pages = num_pages;
dout("readpages final extent is %llu~%llu (%d pages)\n",
off, *plen, req->r_num_pages);
@@ -1319,7 +1317,6 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
/* it may be a short write due to an object boundary */
req->r_pages = pages;
- req->r_num_pages = calc_pages_for(off, len);
dout("writepages %llu~%llu (%d pages)\n", off, len,
req->r_num_pages);
@@ -1344,7 +1341,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
int type = le16_to_cpu(msg->hdr.type);
if (!osd)
- return;
+ goto out;
osdc = osd->o_osdc;
switch (type) {
@@ -1359,6 +1356,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
+out:
ceph_msg_put(msg);
}
@@ -1475,8 +1473,8 @@ static void put_osd_con(struct ceph_connection *con)
* authentication
*/
static int get_authorizer(struct ceph_connection *con,
- void **buf, int *len, int *proto,
- void **reply_buf, int *reply_len, int force_new)
+ void **buf, int *len, int *proto,
+ void **reply_buf, int *reply_len, int force_new)
{
struct ceph_osd *o = con->private;
struct ceph_osd_client *osdc = o->o_osdc;
@@ -1496,7 +1494,7 @@ static int get_authorizer(struct ceph_connection *con,
&o->o_authorizer_reply_buf,
&o->o_authorizer_reply_buf_len);
if (ret)
- return ret;
+ return ret;
}
*proto = ac->protocol;
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index ddc656fb5c0..e31f118f139 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -424,12 +424,30 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
kfree(pi);
}
-void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
+static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
{
+ unsigned n, m;
+
ceph_decode_copy(p, &pi->v, sizeof(pi->v));
calc_pg_masks(pi);
- *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
+
+ /* num_snaps * snap_info_t */
+ n = le32_to_cpu(pi->v.num_snaps);
+ while (n--) {
+ ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
+ sizeof(struct ceph_timespec), bad);
+ *p += sizeof(u64) + /* key */
+ 1 + sizeof(u64) + /* u8, snapid */
+ sizeof(struct ceph_timespec);
+ m = ceph_decode_32(p); /* snap name */
+ *p += m;
+ }
+
*p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
+ return 0;
+
+bad:
+ return -EINVAL;
}
static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
@@ -568,9 +586,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
if (ev > CEPH_PG_POOL_VERSION) {
pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
ev, CEPH_PG_POOL_VERSION);
+ kfree(pi);
goto bad;
}
- __decode_pool(p, pi);
+ err = __decode_pool(p, end, pi);
+ if (err < 0)
+ goto bad;
__insert_pg_pool(&map->pg_pools, pi);
}
@@ -707,6 +728,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
newcrush = crush_decode(*p, min(*p+len, end));
if (IS_ERR(newcrush))
return ERR_CAST(newcrush);
+ *p += len;
}
/* new flags? */
@@ -758,7 +780,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
pi->id = pool;
__insert_pg_pool(&map->pg_pools, pi);
}
- __decode_pool(p, pi);
+ err = __decode_pool(p, end, pi);
+ if (err < 0)
+ goto bad;
}
if (version >= 5 && __decode_pool_names(p, end, map) < 0)
goto bad;
@@ -829,12 +853,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
/* remove any? */
while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
node)->pgid, pgid) <= 0) {
- struct rb_node *cur = rbp;
+ struct ceph_pg_mapping *cur =
+ rb_entry(rbp, struct ceph_pg_mapping, node);
+
rbp = rb_next(rbp);
- dout(" removed pg_temp %llx\n",
- *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
- node)->pgid);
- rb_erase(cur, &map->pg_temp);
+ dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
+ rb_erase(&cur->node, &map->pg_temp);
+ kfree(cur);
}
if (pglen) {
@@ -850,19 +875,22 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
for (j = 0; j < pglen; j++)
pg->osds[j] = ceph_decode_32(p);
err = __insert_pg_mapping(pg, &map->pg_temp);
- if (err)
+ if (err) {
+ kfree(pg);
goto bad;
+ }
dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
pglen);
}
}
while (rbp) {
- struct rb_node *cur = rbp;
+ struct ceph_pg_mapping *cur =
+ rb_entry(rbp, struct ceph_pg_mapping, node);
+
rbp = rb_next(rbp);
- dout(" removed pg_temp %llx\n",
- *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
- node)->pgid);
- rb_erase(cur, &map->pg_temp);
+ dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
+ rb_erase(&cur->node, &map->pg_temp);
+ kfree(cur);
}
/* ignore the rest */
@@ -1020,8 +1048,9 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
pool->v.type, pool->v.size);
if (ruleno < 0) {
- pr_err("no crush rule pool %d type %d size %d\n",
- poolid, pool->v.type, pool->v.size);
+ pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
+ poolid, pool->v.crush_ruleset, pool->v.type,
+ pool->v.size);
return NULL;
}
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
index 8fcc023056c..6d5247f2e81 100644
--- a/fs/ceph/rados.h
+++ b/fs/ceph/rados.h
@@ -1,5 +1,5 @@
-#ifndef __RADOS_H
-#define __RADOS_H
+#ifndef CEPH_RADOS_H
+#define CEPH_RADOS_H
/*
* Data types for the Ceph distributed object storage layer RADOS
@@ -203,6 +203,7 @@ enum {
CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
+ CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
/** attrs **/
/* read */
@@ -272,6 +273,10 @@ static inline int ceph_osd_op_mode_modify(int op)
return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
}
+/*
+ * note that the following tmap stuff is also defined in the ceph librados.h
+ * any modification here needs to be updated there
+ */
#define CEPH_OSD_TMAP_HDR 'h'
#define CEPH_OSD_TMAP_SET 's'
#define CEPH_OSD_TMAP_RM 'r'
@@ -297,6 +302,7 @@ enum {
CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
+ CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */
};
enum {
@@ -350,6 +356,9 @@ struct ceph_osd_op {
struct {
__le64 cookie, count;
} __attribute__ ((packed)) pgls;
+ struct {
+ __le64 snapid;
+ } __attribute__ ((packed)) snap;
};
__le32 payload_len;
} __attribute__ ((packed));
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 4e0bee240b9..9922628532b 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -2,6 +2,7 @@
#include "ceph_debug.h"
#include <linux/backing-dev.h>
+#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/inet.h>
#include <linux/in6.h>
@@ -89,7 +90,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = le64_to_cpu(st.num_objects);
buf->f_ffree = -1;
- buf->f_namelen = PATH_MAX;
+ buf->f_namelen = NAME_MAX;
buf->f_frsize = PAGE_CACHE_SIZE;
/* leave fsid little-endian, regardless of host endianness */
@@ -101,12 +102,21 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
}
-static int ceph_syncfs(struct super_block *sb, int wait)
+static int ceph_sync_fs(struct super_block *sb, int wait)
{
- dout("sync_fs %d\n", wait);
+ struct ceph_client *client = ceph_sb_to_client(sb);
+
+ if (!wait) {
+ dout("sync_fs (non-blocking)\n");
+ ceph_flush_dirty_caps(&client->mdsc);
+ dout("sync_fs (non-blocking) done\n");
+ return 0;
+ }
+
+ dout("sync_fs (blocking)\n");
ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc);
ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc);
- dout("sync_fs %d done\n", wait);
+ dout("sync_fs (blocking) done\n");
return 0;
}
@@ -150,9 +160,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
struct ceph_mount_args *args = client->mount_args;
if (args->flags & CEPH_OPT_FSID)
- seq_printf(m, ",fsidmajor=%llu,fsidminor%llu",
- le64_to_cpu(*(__le64 *)&args->fsid.fsid[0]),
- le64_to_cpu(*(__le64 *)&args->fsid.fsid[8]));
+ seq_printf(m, ",fsid=%pU", &args->fsid);
if (args->flags & CEPH_OPT_NOSHARE)
seq_puts(m, ",noshare");
if (args->flags & CEPH_OPT_DIRSTAT)
@@ -279,7 +287,7 @@ static const struct super_operations ceph_super_ops = {
.alloc_inode = ceph_alloc_inode,
.destroy_inode = ceph_destroy_inode,
.write_inode = ceph_write_inode,
- .sync_fs = ceph_syncfs,
+ .sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
.show_options = ceph_show_options,
.statfs = ceph_statfs,
@@ -322,9 +330,6 @@ const char *ceph_msg_type_name(int type)
* mount options
*/
enum {
- Opt_fsidmajor,
- Opt_fsidminor,
- Opt_monport,
Opt_wsize,
Opt_rsize,
Opt_osdtimeout,
@@ -339,6 +344,7 @@ enum {
Opt_congestion_kb,
Opt_last_int,
/* int args above */
+ Opt_fsid,
Opt_snapdirname,
Opt_name,
Opt_secret,
@@ -355,9 +361,6 @@ enum {
};
static match_table_t arg_tokens = {
- {Opt_fsidmajor, "fsidmajor=%ld"},
- {Opt_fsidminor, "fsidminor=%ld"},
- {Opt_monport, "monport=%d"},
{Opt_wsize, "wsize=%d"},
{Opt_rsize, "rsize=%d"},
{Opt_osdtimeout, "osdtimeout=%d"},
@@ -371,6 +374,7 @@ static match_table_t arg_tokens = {
{Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
{Opt_congestion_kb, "write_congestion_kb=%d"},
/* int args above */
+ {Opt_fsid, "fsid=%s"},
{Opt_snapdirname, "snapdirname=%s"},
{Opt_name, "name=%s"},
{Opt_secret, "secret=%s"},
@@ -386,6 +390,36 @@ static match_table_t arg_tokens = {
{-1, NULL}
};
+static int parse_fsid(const char *str, struct ceph_fsid *fsid)
+{
+ int i = 0;
+ char tmp[3];
+ int err = -EINVAL;
+ int d;
+
+ dout("parse_fsid '%s'\n", str);
+ tmp[2] = 0;
+ while (*str && i < 16) {
+ if (ispunct(*str)) {
+ str++;
+ continue;
+ }
+ if (!isxdigit(str[0]) || !isxdigit(str[1]))
+ break;
+ tmp[0] = str[0];
+ tmp[1] = str[1];
+ if (sscanf(tmp, "%x", &d) < 1)
+ break;
+ fsid->fsid[i] = d & 0xff;
+ i++;
+ str += 2;
+ }
+
+ if (i == 16)
+ err = 0;
+ dout("parse_fsid ret %d got fsid %pU", err, fsid);
+ return err;
+}
static struct ceph_mount_args *parse_mount_args(int flags, char *options,
const char *dev_name,
@@ -469,12 +503,6 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options,
dout("got token %d\n", token);
}
switch (token) {
- case Opt_fsidmajor:
- *(__le64 *)&args->fsid.fsid[0] = cpu_to_le64(intval);
- break;
- case Opt_fsidminor:
- *(__le64 *)&args->fsid.fsid[8] = cpu_to_le64(intval);
- break;
case Opt_ip:
err = ceph_parse_ips(argstr[0].from,
argstr[0].to,
@@ -485,6 +513,11 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options,
args->flags |= CEPH_OPT_MYIP;
break;
+ case Opt_fsid:
+ err = parse_fsid(argstr[0].from, &args->fsid);
+ if (err == 0)
+ args->flags |= CEPH_OPT_FSID;
+ break;
case Opt_snapdirname:
kfree(args->snapdir_name);
args->snapdir_name = kstrndup(argstr[0].from,
@@ -515,6 +548,9 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options,
case Opt_osdkeepalivetimeout:
args->osd_keepalive_timeout = intval;
break;
+ case Opt_osd_idle_ttl:
+ args->osd_idle_ttl = intval;
+ break;
case Opt_mount_timeout:
args->mount_timeout = intval;
break;
@@ -630,7 +666,6 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
/* caps */
client->min_caps = args->max_readdir;
- ceph_adjust_min_caps(client->min_caps);
/* subsystems */
err = ceph_monc_init(&client->monc, client);
@@ -680,8 +715,6 @@ static void ceph_destroy_client(struct ceph_client *client)
ceph_monc_stop(&client->monc);
- ceph_adjust_min_caps(-client->min_caps);
-
ceph_debugfs_client_cleanup(client);
destroy_workqueue(client->wb_wq);
destroy_workqueue(client->pg_inv_wq);
@@ -706,13 +739,13 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
{
if (client->have_fsid) {
if (ceph_fsid_compare(&client->fsid, fsid)) {
- pr_err("bad fsid, had " FSID_FORMAT " got " FSID_FORMAT,
- PR_FSID(&client->fsid), PR_FSID(fsid));
+ pr_err("bad fsid, had %pU got %pU",
+ &client->fsid, fsid);
return -1;
}
} else {
- pr_info("client%lld fsid " FSID_FORMAT "\n",
- client->monc.auth->global_id, PR_FSID(fsid));
+ pr_info("client%lld fsid %pU\n", client->monc.auth->global_id,
+ fsid);
memcpy(&client->fsid, fsid, sizeof(*fsid));
ceph_debugfs_client_init(client);
client->have_fsid = true;
@@ -926,7 +959,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
/*
* construct our own bdi so we can control readahead, etc.
*/
-static atomic_long_t bdi_seq = ATOMIC_INIT(0);
+static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
{
@@ -1043,8 +1076,6 @@ static int __init init_ceph(void)
if (ret)
goto out_msgr;
- ceph_caps_init();
-
ret = register_filesystem(&ceph_fs_type);
if (ret)
goto out_icache;
@@ -1069,7 +1100,6 @@ static void __exit exit_ceph(void)
{
dout("exit_ceph\n");
unregister_filesystem(&ceph_fs_type);
- ceph_caps_finalize();
destroy_caches();
ceph_msgr_exit();
ceph_debugfs_cleanup();
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 10a4a406e88..2482d696f0d 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -31,6 +31,12 @@
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
/*
+ * Supported features
+ */
+#define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK
+#define CEPH_FEATURE_REQUIRED CEPH_FEATURE_NOSRCADDR
+
+/*
* mount options
*/
#define CEPH_OPT_FSID (1<<0)
@@ -560,11 +566,13 @@ static inline int __ceph_caps_wanted(struct ceph_inode_info *ci)
/* what the mds thinks we want */
extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci);
-extern void ceph_caps_init(void);
-extern void ceph_caps_finalize(void);
-extern void ceph_adjust_min_caps(int delta);
-extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
-extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
+extern void ceph_caps_init(struct ceph_mds_client *mdsc);
+extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
+extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta);
+extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
+ struct ceph_cap_reservation *ctx, int need);
+extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
+ struct ceph_cap_reservation *ctx);
extern void ceph_reservation_status(struct ceph_client *client,
int *total, int *avail, int *used,
int *reserved, int *min);
@@ -738,13 +746,6 @@ extern struct kmem_cache *ceph_file_cachep;
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-#define FSID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \
- "%02x%02x%02x%02x%02x%02x"
-#define PR_FSID(f) (f)->fsid[0], (f)->fsid[1], (f)->fsid[2], (f)->fsid[3], \
- (f)->fsid[4], (f)->fsid[5], (f)->fsid[6], (f)->fsid[7], \
- (f)->fsid[8], (f)->fsid[9], (f)->fsid[10], (f)->fsid[11], \
- (f)->fsid[12], (f)->fsid[13], (f)->fsid[14], (f)->fsid[15]
-
/* inode.c */
extern const struct inode_operations ceph_file_iops;
@@ -806,13 +807,16 @@ static inline void ceph_remove_cap(struct ceph_cap *cap)
__ceph_remove_cap(cap);
spin_unlock(&inode->i_lock);
}
-extern void ceph_put_cap(struct ceph_cap *cap);
+extern void ceph_put_cap(struct ceph_mds_client *mdsc,
+ struct ceph_cap *cap);
extern void ceph_queue_caps_release(struct inode *inode);
extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
extern int ceph_fsync(struct file *file, int datasync);
extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
+extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
+ int mds);
extern int ceph_get_cap_mds(struct inode *inode);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
@@ -857,7 +861,7 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
extern const struct inode_operations ceph_dir_iops;
-extern struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
+extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
ceph_snapdir_dentry_ops;
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
@@ -888,6 +892,14 @@ extern void ceph_debugfs_cleanup(void);
extern int ceph_debugfs_client_init(struct ceph_client *client);
extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
+/* locks.c */
+extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
+extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
+extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num);
+extern int ceph_encode_locks(struct inode *i, struct ceph_pagelist *p,
+ int p_locks, int f_locks);
+extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c);
+
static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
{
if (dentry && dentry->d_parent)
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 68aeebc6968..097a2654c00 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -337,6 +337,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
}
static int __build_xattrs(struct inode *inode)
+ __releases(inode->i_lock)
+ __acquires(inode->i_lock)
{
u32 namelen;
u32 numattr = 0;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index d6db933df2b..f80a4f25123 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/backing-dev.h>
+#include <linux/tty.h>
#include "internal.h"
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 80f35259680..917b7d449bb 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,7 +2,6 @@ config CIFS
tristate "CIFS support (advanced network filesystem, SMBFS successor)"
depends on INET
select NLS
- select SLOW_WORK
help
This is the client VFS module for the Common Internet File System
(CIFS) protocol which is the successor to the Server Message Block
@@ -71,14 +70,14 @@ config CIFS_WEAK_PW_HASH
If unsure, say N.
config CIFS_UPCALL
- bool "Kerberos/SPNEGO advanced session setup"
- depends on CIFS && KEYS
- help
- Enables an upcall mechanism for CIFS which accesses
- userspace helper utilities to provide SPNEGO packaged (RFC 4178)
- Kerberos tickets which are needed to mount to certain secure servers
- (for which more secure Kerberos authentication is required). If
- unsure, say N.
+ bool "Kerberos/SPNEGO advanced session setup"
+ depends on CIFS && KEYS
+ select DNS_RESOLVER
+ help
+ Enables an upcall mechanism for CIFS which accesses userspace helper
+ utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
+ which are needed to mount to certain secure servers (for which more
+ secure Kerberos authentication is required). If unsure, say N.
config CIFS_XATTR
bool "CIFS extended attributes"
@@ -122,6 +121,7 @@ config CIFS_DEBUG2
config CIFS_DFS_UPCALL
bool "DFS feature support"
depends on CIFS && KEYS
+ select DNS_RESOLVER
help
Distributed File System (DFS) support is used to access shares
transparently in an enterprise name space, even if the share
@@ -131,6 +131,15 @@ config CIFS_DFS_UPCALL
IP addresses) which is needed for implicit mounts of DFS junction
points. If unsure, say N.
+config CIFS_FSCACHE
+ bool "Provide CIFS client caching support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
+ help
+ Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
+ to be cached locally on disk through the general filesystem cache
+ manager. If unsure, say N.
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 9948c0030e8..adefa60a9bd 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -11,3 +11,5 @@ cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
+
+cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
diff --git a/fs/cifs/README b/fs/cifs/README
index a727b7cb075..7099a526f77 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -301,6 +301,16 @@ A partial list of the supported mount options follows:
gid Set the default gid for inodes (similar to above).
file_mode If CIFS Unix extensions are not supported by the server
this overrides the default mode for file inodes.
+ fsc Enable local disk caching using FS-Cache (off by default). This
+ option could be useful to improve performance on a slow link,
+ heavily loaded server and/or network where reading from the
+ disk is faster than reading from the server (over the network).
+ This could also impact scalability positively as the
+ number of calls to the server are reduced. However, local
+ caching is not suitable for all workloads for e.g. read-once
+ type workloads. So, you need to consider carefully your
+ workload/scenario before using this option. Currently, local
+ disk caching is functional for CIFS files opened as read-only.
dir_mode If CIFS Unix extensions are not supported by the server
this overrides the default mode for directory inodes.
port attempt to contact the server on this tcp port, before
@@ -568,8 +578,9 @@ module can be displayed via modinfo.
Misc /proc/fs/cifs Flags and Debug Info
=======================================
Informational pseudo-files:
-DebugData Displays information about active CIFS sessions
- and shares, as well as the cifs.ko version.
+DebugData Displays information about active CIFS sessions and
+ shares, features enabled as well as the cifs.ko
+ version.
Stats Lists summary resource usage information as well as per
share statistics, if CONFIG_CIFS_STATS in enabled
in the kernel configuration.
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
new file mode 100644
index 00000000000..224d7bbd1fc
--- /dev/null
+++ b/fs/cifs/cache.c
@@ -0,0 +1,331 @@
+/*
+ * fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ *
+ * Copyright (c) 2010 Novell, Inc.
+ * Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "fscache.h"
+#include "cifs_debug.h"
+
+/*
+ * CIFS filesystem definition for FS-Cache
+ */
+struct fscache_netfs cifs_fscache_netfs = {
+ .name = "cifs",
+ .version = 0,
+};
+
+/*
+ * Register CIFS for caching with FS-Cache
+ */
+int cifs_fscache_register(void)
+{
+ return fscache_register_netfs(&cifs_fscache_netfs);
+}
+
+/*
+ * Unregister CIFS for caching
+ */
+void cifs_fscache_unregister(void)
+{
+ fscache_unregister_netfs(&cifs_fscache_netfs);
+}
+
+/*
+ * Key layout of CIFS server cache index object
+ */
+struct cifs_server_key {
+ uint16_t family; /* address family */
+ uint16_t port; /* IP port */
+ union {
+ struct in_addr ipv4_addr;
+ struct in6_addr ipv6_addr;
+ } addr[0];
+};
+
+/*
+ * Server object keyed by {IPaddress,port,family} tuple
+ */
+static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t maxbuf)
+{
+ const struct TCP_Server_Info *server = cookie_netfs_data;
+ const struct sockaddr *sa = (struct sockaddr *) &server->addr.sockAddr;
+ struct cifs_server_key *key = buffer;
+ uint16_t key_len = sizeof(struct cifs_server_key);
+
+ memset(key, 0, key_len);
+
+ /*
+ * Should not be a problem as sin_family/sin6_family overlays
+ * sa_family field
+ */
+ switch (sa->sa_family) {
+ case AF_INET:
+ key->family = server->addr.sockAddr.sin_family;
+ key->port = server->addr.sockAddr.sin_port;
+ key->addr[0].ipv4_addr = server->addr.sockAddr.sin_addr;
+ key_len += sizeof(key->addr[0].ipv4_addr);
+ break;
+
+ case AF_INET6:
+ key->family = server->addr.sockAddr6.sin6_family;
+ key->port = server->addr.sockAddr6.sin6_port;
+ key->addr[0].ipv6_addr = server->addr.sockAddr6.sin6_addr;
+ key_len += sizeof(key->addr[0].ipv6_addr);
+ break;
+
+ default:
+ cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family);
+ key_len = 0;
+ break;
+ }
+
+ return key_len;
+}
+
+/*
+ * Server object for FS-Cache
+ */
+const struct fscache_cookie_def cifs_fscache_server_index_def = {
+ .name = "CIFS.server",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = cifs_server_get_key,
+};
+
+/*
+ * Auxiliary data attached to CIFS superblock within the cache
+ */
+struct cifs_fscache_super_auxdata {
+ u64 resource_id; /* unique server resource id */
+};
+
+static char *extract_sharename(const char *treename)
+{
+ const char *src;
+ char *delim, *dst;
+ int len;
+
+ /* skip double chars at the beginning */
+ src = treename + 2;
+
+ /* share name is always preceded by '\\' now */
+ delim = strchr(src, '\\');
+ if (!delim)
+ return ERR_PTR(-EINVAL);
+ delim++;
+ len = strlen(delim);
+
+ /* caller has to free the memory */
+ dst = kstrndup(delim, len, GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ return dst;
+}
+
+/*
+ * Superblock object currently keyed by share name
+ */
+static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
+ uint16_t maxbuf)
+{
+ const struct cifsTconInfo *tcon = cookie_netfs_data;
+ char *sharename;
+ uint16_t len;
+
+ sharename = extract_sharename(tcon->treeName);
+ if (IS_ERR(sharename)) {
+ cFYI(1, "CIFS: couldn't extract sharename\n");
+ sharename = NULL;
+ return 0;
+ }
+
+ len = strlen(sharename);
+ if (len > maxbuf)
+ return 0;
+
+ memcpy(buffer, sharename, len);
+
+ kfree(sharename);
+
+ return len;
+}
+
+static uint16_t
+cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
+ uint16_t maxbuf)
+{
+ struct cifs_fscache_super_auxdata auxdata;
+ const struct cifsTconInfo *tcon = cookie_netfs_data;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.resource_id = tcon->resource_id;
+
+ if (maxbuf > sizeof(auxdata))
+ maxbuf = sizeof(auxdata);
+
+ memcpy(buffer, &auxdata, maxbuf);
+
+ return maxbuf;
+}
+
+static enum
+fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen)
+{
+ struct cifs_fscache_super_auxdata auxdata;
+ const struct cifsTconInfo *tcon = cookie_netfs_data;
+
+ if (datalen != sizeof(auxdata))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.resource_id = tcon->resource_id;
+
+ if (memcmp(data, &auxdata, datalen) != 0)
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ return FSCACHE_CHECKAUX_OKAY;
+}
+
+/*
+ * Superblock object for FS-Cache
+ */
+const struct fscache_cookie_def cifs_fscache_super_index_def = {
+ .name = "CIFS.super",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = cifs_super_get_key,
+ .get_aux = cifs_fscache_super_get_aux,
+ .check_aux = cifs_fscache_super_check_aux,
+};
+
+/*
+ * Auxiliary data attached to CIFS inode within the cache
+ */
+struct cifs_fscache_inode_auxdata {
+ struct timespec last_write_time;
+ struct timespec last_change_time;
+ u64 eof;
+};
+
+static uint16_t cifs_fscache_inode_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t maxbuf)
+{
+ const struct cifsInodeInfo *cifsi = cookie_netfs_data;
+ uint16_t keylen;
+
+ /* use the UniqueId as the key */
+ keylen = sizeof(cifsi->uniqueid);
+ if (keylen > maxbuf)
+ keylen = 0;
+ else
+ memcpy(buffer, &cifsi->uniqueid, keylen);
+
+ return keylen;
+}
+
+static void
+cifs_fscache_inode_get_attr(const void *cookie_netfs_data, uint64_t *size)
+{
+ const struct cifsInodeInfo *cifsi = cookie_netfs_data;
+
+ *size = cifsi->vfs_inode.i_size;
+}
+
+static uint16_t
+cifs_fscache_inode_get_aux(const void *cookie_netfs_data, void *buffer,
+ uint16_t maxbuf)
+{
+ struct cifs_fscache_inode_auxdata auxdata;
+ const struct cifsInodeInfo *cifsi = cookie_netfs_data;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.eof = cifsi->server_eof;
+ auxdata.last_write_time = cifsi->vfs_inode.i_mtime;
+ auxdata.last_change_time = cifsi->vfs_inode.i_ctime;
+
+ if (maxbuf > sizeof(auxdata))
+ maxbuf = sizeof(auxdata);
+
+ memcpy(buffer, &auxdata, maxbuf);
+
+ return maxbuf;
+}
+
+static enum
+fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen)
+{
+ struct cifs_fscache_inode_auxdata auxdata;
+ struct cifsInodeInfo *cifsi = cookie_netfs_data;
+
+ if (datalen != sizeof(auxdata))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.eof = cifsi->server_eof;
+ auxdata.last_write_time = cifsi->vfs_inode.i_mtime;
+ auxdata.last_change_time = cifsi->vfs_inode.i_ctime;
+
+ if (memcmp(data, &auxdata, datalen) != 0)
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ return FSCACHE_CHECKAUX_OKAY;
+}
+
+static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
+{
+ struct cifsInodeInfo *cifsi = cookie_netfs_data;
+ struct pagevec pvec;
+ pgoff_t first;
+ int loop, nr_pages;
+
+ pagevec_init(&pvec, 0);
+ first = 0;
+
+ cFYI(1, "cifs inode 0x%p now uncached", cifsi);
+
+ for (;;) {
+ nr_pages = pagevec_lookup(&pvec,
+ cifsi->vfs_inode.i_mapping, first,
+ PAGEVEC_SIZE - pagevec_count(&pvec));
+ if (!nr_pages)
+ break;
+
+ for (loop = 0; loop < nr_pages; loop++)
+ ClearPageFsCache(pvec.pages[loop]);
+
+ first = pvec.pages[nr_pages - 1]->index + 1;
+
+ pvec.nr = nr_pages;
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+}
+
+const struct fscache_cookie_def cifs_fscache_inode_object_def = {
+ .name = "CIFS.uniqueid",
+ .type = FSCACHE_COOKIE_TYPE_DATAFILE,
+ .get_key = cifs_fscache_inode_get_key,
+ .get_attr = cifs_fscache_inode_get_attr,
+ .get_aux = cifs_fscache_inode_get_aux,
+ .check_aux = cifs_fscache_inode_check_aux,
+ .now_uncached = cifs_fscache_inode_now_uncached,
+};
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 4fce6e61b34..eb1ba493489 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -119,6 +119,31 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
"Display Internal CIFS Data Structures for Debugging\n"
"---------------------------------------------------\n");
seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
+ seq_printf(m, "Features: ");
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ seq_printf(m, "dfs");
+ seq_putc(m, ' ');
+#endif
+#ifdef CONFIG_CIFS_FSCACHE
+ seq_printf(m, "fscache");
+ seq_putc(m, ' ');
+#endif
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ seq_printf(m, "lanman");
+ seq_putc(m, ' ');
+#endif
+#ifdef CONFIG_CIFS_POSIX
+ seq_printf(m, "posix");
+ seq_putc(m, ' ');
+#endif
+#ifdef CONFIG_CIFS_UPCALL
+ seq_printf(m, "spnego");
+ seq_putc(m, ' ');
+#endif
+#ifdef CONFIG_CIFS_XATTR
+ seq_printf(m, "xattr");
+#endif
+ seq_putc(m, '\n');
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
seq_printf(m, "Servers:");
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index ac19a6f3dae..d6ced7aa23c 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -141,7 +141,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
}
rc = dns_resolve_server_name_to_ip(*devname, &srvIP);
- if (rc != 0) {
+ if (rc < 0) {
cERROR(1, "%s: Failed to resolve server part of %s to IP: %d",
__func__, *devname, rc);
goto compose_mount_options_err;
@@ -150,8 +150,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
* assuming that we have 'unc=' and 'ip=' in
* the original sb_mountdata
*/
- md_len = strlen(sb_mountdata) + strlen(srvIP) +
- strlen(ref->node_name) + 12;
+ md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12;
mountdata = kzalloc(md_len+1, GFP_KERNEL);
if (mountdata == NULL) {
rc = -ENOMEM;
@@ -230,28 +229,22 @@ compose_mount_options_err:
goto compose_mount_options_out;
}
-
-static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent,
- struct dentry *dentry, const struct dfs_info3_param *ref)
+/**
+ * cifs_dfs_do_refmount - mounts specified path using provided refferal
+ * @cifs_sb: parent/root superblock
+ * @fullpath: full path in UNC format
+ * @ref: server's referral
+ */
+static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb,
+ const char *fullpath, const struct dfs_info3_param *ref)
{
- struct cifs_sb_info *cifs_sb;
struct vfsmount *mnt;
char *mountdata;
char *devname = NULL;
- char *fullpath;
-
- cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
- /*
- * this function gives us a path with a double backslash prefix. We
- * require a single backslash for DFS.
- */
- fullpath = build_path_from_dentry(dentry);
- if (!fullpath)
- return ERR_PTR(-ENOMEM);
+ /* strip first '\' from fullpath */
mountdata = cifs_compose_mount_options(cifs_sb->mountdata,
fullpath + 1, ref, &devname);
- kfree(fullpath);
if (IS_ERR(mountdata))
return (struct vfsmount *)mountdata;
@@ -357,8 +350,8 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
rc = -EINVAL;
goto out_err;
}
- mnt = cifs_dfs_do_refmount(nd->path.mnt,
- nd->path.dentry, referrals + i);
+ mnt = cifs_dfs_do_refmount(cifs_sb,
+ full_path, referrals + i);
cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__,
referrals[i].node_name, mnt);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 246a167cb91..9e771450c3b 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -35,6 +35,7 @@
#define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */
#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */
#define CIFS_MOUNT_NOSSYNC 0x4000 /* don't do slow SMBflush on every sync*/
+#define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 379bd7d9c05..87044906cd1 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -84,6 +84,9 @@ struct key_type cifs_spnego_key_type = {
/* strlen of ";uid=0x" */
#define UID_KEY_LEN 7
+/* strlen of ";creduid=0x" */
+#define CREDUID_KEY_LEN 11
+
/* strlen of ";user=" */
#define USER_KEY_LEN 6
@@ -107,6 +110,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
IP_KEY_LEN + INET6_ADDRSTRLEN +
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
+ CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
USER_KEY_LEN + strlen(sesInfo->userName) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
@@ -144,6 +148,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
sprintf(dp, ";uid=0x%x", sesInfo->linux_uid);
dp = description + strlen(description);
+ sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
+
+ dp = description + strlen(description);
sprintf(dp, ";user=%s", sesInfo->userName);
dp = description + strlen(description);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 78c02eb4cb1..b7431afdd76 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -45,8 +45,8 @@
#include "cifs_fs_sb.h"
#include <linux/mm.h>
#include <linux/key-type.h>
-#include "dns_resolve.h"
#include "cifs_spnego.h"
+#include "fscache.h"
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
int cifsFYI = 0;
@@ -329,6 +329,14 @@ cifs_destroy_inode(struct inode *inode)
}
static void
+cifs_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+ cifs_fscache_release_inode_cookie(inode);
+}
+
+static void
cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
{
seq_printf(s, ",addr=");
@@ -473,14 +481,24 @@ static int cifs_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
+static int cifs_drop_inode(struct inode *inode)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+
+ /* no serverino => unconditional eviction */
+ return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
+ generic_drop_inode(inode);
+}
+
static const struct super_operations cifs_super_ops = {
.put_super = cifs_put_super,
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
.destroy_inode = cifs_destroy_inode,
-/* .drop_inode = generic_delete_inode,
- .delete_inode = cifs_delete_inode, */ /* Do not need above two
- functions unless later we add lazy close of inodes or unless the
+ .drop_inode = cifs_drop_inode,
+ .evict_inode = cifs_evict_inode,
+/* .delete_inode = cifs_delete_inode, */ /* Do not need above
+ function unless later we add lazy close of inodes or unless the
kernel forgets to call us with the same number of releases (closes)
as opens */
.show_options = cifs_show_options,
@@ -892,6 +910,10 @@ init_cifs(void)
cFYI(1, "cifs_max_pending set to max of 256");
}
+ rc = cifs_fscache_register();
+ if (rc)
+ goto out;
+
rc = cifs_init_inodecache();
if (rc)
goto out_clean_proc;
@@ -912,27 +934,13 @@ init_cifs(void)
if (rc)
goto out_unregister_filesystem;
#endif
-#ifdef CONFIG_CIFS_DFS_UPCALL
- rc = register_key_type(&key_type_dns_resolver);
- if (rc)
- goto out_unregister_key_type;
-#endif
- rc = slow_work_register_user(THIS_MODULE);
- if (rc)
- goto out_unregister_resolver_key;
return 0;
- out_unregister_resolver_key:
-#ifdef CONFIG_CIFS_DFS_UPCALL
- unregister_key_type(&key_type_dns_resolver);
- out_unregister_key_type:
-#endif
#ifdef CONFIG_CIFS_UPCALL
- unregister_key_type(&cifs_spnego_key_type);
out_unregister_filesystem:
-#endif
unregister_filesystem(&cifs_fs_type);
+#endif
out_destroy_request_bufs:
cifs_destroy_request_bufs();
out_destroy_mids:
@@ -941,6 +949,8 @@ init_cifs(void)
cifs_destroy_inodecache();
out_clean_proc:
cifs_proc_clean();
+ cifs_fscache_unregister();
+ out:
return rc;
}
@@ -949,9 +959,9 @@ exit_cifs(void)
{
cFYI(DBG2, "exit_cifs");
cifs_proc_clean();
+ cifs_fscache_unregister();
#ifdef CONFIG_CIFS_DFS_UPCALL
cifs_dfs_release_automount_timer();
- unregister_key_type(&key_type_dns_resolver);
#endif
#ifdef CONFIG_CIFS_UPCALL
unregister_key_type(&cifs_spnego_key_type);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index a7eb65c84b1..d82f5fb4761 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -114,5 +114,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.64"
+#define CIFS_VERSION "1.65"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a88479ceaad..0cdfb8c32ac 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -16,10 +16,13 @@
* the GNU Lesser General Public License for more details.
*
*/
+#ifndef _CIFS_GLOB_H
+#define _CIFS_GLOB_H
+
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/slab.h>
-#include <linux/slow-work.h>
+#include <linux/workqueue.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
/*
@@ -34,7 +37,7 @@
#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 16
+#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
#define CIFS_MIN_RCV_POOL 4
@@ -80,8 +83,7 @@ enum statusEnum {
};
enum securityEnum {
- PLAINTXT = 0, /* Legacy with Plaintext passwords */
- LANMAN, /* Legacy LANMAN auth */
+ LANMAN = 0, /* Legacy LANMAN auth */
NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */
@@ -142,7 +144,6 @@ struct TCP_Server_Info {
struct list_head pending_mid_q;
void *Server_NlsInfo; /* BB - placeholder for future NLS info */
unsigned short server_codepage; /* codepage for the server */
- unsigned long ip_address; /* IP addr for the server if known */
enum protocolEnum protocolType;
char versionMajor;
char versionMinor;
@@ -190,19 +191,9 @@ struct TCP_Server_Info {
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_ntlmssp; /* supports NTLMSSP */
-};
-
-/*
- * The following is our shortcut to user information. We surface the uid,
- * and name. We always get the password on the fly in case it
- * has changed. We also hang a list of sessions owned by this user off here.
- */
-struct cifsUidInfo {
- struct list_head userList;
- struct list_head sessionList; /* SMB sessions for this user */
- uid_t linux_uid;
- char user[MAX_USERNAME_SIZE + 1]; /* ascii name of user */
- /* BB may need ptr or callback for PAM or WinBind info */
+#ifdef CONFIG_CIFS_FSCACHE
+ struct fscache_cookie *fscache; /* client index cache cookie */
+#endif
};
/*
@@ -212,9 +203,6 @@ struct cifsSesInfo {
struct list_head smb_ses_list;
struct list_head tcon_list;
struct mutex session_mutex;
-#if 0
- struct cifsUidInfo *uidInfo; /* pointer to user info */
-#endif
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
enum statusEnum status;
@@ -226,7 +214,8 @@ struct cifsSesInfo {
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
int Suid; /* remote smb uid */
- uid_t linux_uid; /* local Linux uid */
+ uid_t linux_uid; /* overriding owner of files on the mount */
+ uid_t cred_uid; /* owner of credentials */
int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
@@ -311,6 +300,10 @@ struct cifsTconInfo {
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool need_reconnect:1; /* connection reset, tid now invalid */
+#ifdef CONFIG_CIFS_FSCACHE
+ u64 resource_id; /* server resource id */
+ struct fscache_cookie *fscache; /* cookie for share */
+#endif
/* BB add field for back pointer to sb struct(s)? */
};
@@ -363,7 +356,7 @@ struct cifsFileInfo {
atomic_t count; /* reference count */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
- struct slow_work oplock_break; /* slow_work job for oplock breaks */
+ struct work_struct oplock_break; /* work for oplock breaks */
};
/* Take a reference on the file private data */
@@ -398,6 +391,9 @@ struct cifsInodeInfo {
bool invalid_mapping:1; /* pagecache is invalid */
u64 server_eof; /* current file size on server */
u64 uniqueid; /* server inode number */
+#ifdef CONFIG_CIFS_FSCACHE
+ struct fscache_cookie *fscache;
+#endif
struct inode vfs_inode;
};
@@ -732,4 +728,10 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
+void cifs_oplock_break(struct work_struct *work);
+void cifs_oplock_break_get(struct cifsFileInfo *cfile);
+void cifs_oplock_break_put(struct cifsFileInfo *cfile);
+
extern const struct slow_work_ops cifs_oplock_break_ops;
+
+#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index fb1657e0fdb..1f545081408 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -86,7 +86,9 @@ extern unsigned int smbCalcSize(struct smb_hdr *ptr);
extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server);
-extern int cifs_convert_address(char *src, void *dst);
+extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
+extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
+ unsigned short int port);
extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
@@ -106,7 +108,6 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
__u16 fileHandle, struct file *file,
struct vfsmount *mnt, unsigned int oflags);
extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt,
struct super_block *sb,
int mode, int oflags,
__u32 *poplock, __u16 *pnetfid, int xid);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2208f06e4c4..95c2ea67edf 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -48,6 +48,7 @@
#include "nterr.h"
#include "rfc1002pdu.h"
#include "cn_cifs.h"
+#include "fscache.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -66,6 +67,7 @@ struct smb_vol {
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[16]; /* netbios name of client */
char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */
+ uid_t cred_uid;
uid_t linux_uid;
gid_t linux_gid;
mode_t file_mode;
@@ -97,6 +99,7 @@ struct smb_vol {
bool noblocksnd:1;
bool noautotune:1;
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+ bool fsc:1; /* enable fscache */
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
@@ -830,7 +833,8 @@ cifs_parse_mount_options(char *options, const char *devname,
/* null target name indicates to use *SMBSERVR default called name
if we end up sending RFC1001 session initialize */
vol->target_rfc1001_name[0] = 0;
- vol->linux_uid = current_uid(); /* use current_euid() instead? */
+ vol->cred_uid = current_uid();
+ vol->linux_uid = current_uid();
vol->linux_gid = current_gid();
/* default to only allowing write access to owner of the mount */
@@ -1257,6 +1261,12 @@ cifs_parse_mount_options(char *options, const char *devname,
} else if ((strnicmp(data, "nocase", 6) == 0) ||
(strnicmp(data, "ignorecase", 10) == 0)) {
vol->nocase = 1;
+ } else if (strnicmp(data, "mand", 4) == 0) {
+ /* ignore */
+ } else if (strnicmp(data, "nomand", 6) == 0) {
+ /* ignore */
+ } else if (strnicmp(data, "_netdev", 7) == 0) {
+ /* ignore */
} else if (strnicmp(data, "brl", 3) == 0) {
vol->nobrl = 0;
} else if ((strnicmp(data, "nobrl", 5) == 0) ||
@@ -1331,6 +1341,8 @@ cifs_parse_mount_options(char *options, const char *devname,
printk(KERN_WARNING "CIFS: Mount option noac not "
"supported. Instead set "
"/proc/fs/cifs/LookupCacheEnabled to 0\n");
+ } else if (strnicmp(data, "fsc", 3) == 0) {
+ vol->fsc = true;
} else
printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
data);
@@ -1380,18 +1392,92 @@ cifs_parse_mount_options(char *options, const char *devname,
return 0;
}
+static bool
+match_address(struct TCP_Server_Info *server, struct sockaddr *addr)
+{
+ struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
+ struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ if (addr4->sin_addr.s_addr !=
+ server->addr.sockAddr.sin_addr.s_addr)
+ return false;
+ if (addr4->sin_port &&
+ addr4->sin_port != server->addr.sockAddr.sin_port)
+ return false;
+ break;
+ case AF_INET6:
+ if (!ipv6_addr_equal(&addr6->sin6_addr,
+ &server->addr.sockAddr6.sin6_addr))
+ return false;
+ if (addr6->sin6_scope_id !=
+ server->addr.sockAddr6.sin6_scope_id)
+ return false;
+ if (addr6->sin6_port &&
+ addr6->sin6_port != server->addr.sockAddr6.sin6_port)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool
+match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
+{
+ unsigned int secFlags;
+
+ if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ secFlags = vol->secFlg;
+ else
+ secFlags = global_secflags | vol->secFlg;
+
+ switch (server->secType) {
+ case LANMAN:
+ if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT)))
+ return false;
+ break;
+ case NTLMv2:
+ if (!(secFlags & CIFSSEC_MAY_NTLMV2))
+ return false;
+ break;
+ case NTLM:
+ if (!(secFlags & CIFSSEC_MAY_NTLM))
+ return false;
+ break;
+ case Kerberos:
+ if (!(secFlags & CIFSSEC_MAY_KRB5))
+ return false;
+ break;
+ case RawNTLMSSP:
+ if (!(secFlags & CIFSSEC_MAY_NTLMSSP))
+ return false;
+ break;
+ default:
+ /* shouldn't happen */
+ return false;
+ }
+
+ /* now check if signing mode is acceptible */
+ if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
+ (server->secMode & SECMODE_SIGN_REQUIRED))
+ return false;
+ else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
+ (server->secMode &
+ (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
+ return false;
+
+ return true;
+}
+
static struct TCP_Server_Info *
-cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port)
+cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
{
- struct list_head *tmp;
struct TCP_Server_Info *server;
- struct sockaddr_in *addr4 = (struct sockaddr_in *) addr;
- struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) addr;
write_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &cifs_tcp_ses_list) {
- server = list_entry(tmp, struct TCP_Server_Info,
- tcp_ses_list);
+ list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
/*
* the demux thread can exit on its own while still in CifsNew
* so don't accept any sockets in that state. Since the
@@ -1401,37 +1487,11 @@ cifs_find_tcp_session(struct sockaddr_storage *addr, unsigned short int port)
if (server->tcpStatus == CifsNew)
continue;
- switch (addr->ss_family) {
- case AF_INET:
- if (addr4->sin_addr.s_addr ==
- server->addr.sockAddr.sin_addr.s_addr) {
- addr4->sin_port = htons(port);
- /* user overrode default port? */
- if (addr4->sin_port) {
- if (addr4->sin_port !=
- server->addr.sockAddr.sin_port)
- continue;
- }
- break;
- } else
- continue;
+ if (!match_address(server, addr))
+ continue;
- case AF_INET6:
- if (ipv6_addr_equal(&addr6->sin6_addr,
- &server->addr.sockAddr6.sin6_addr) &&
- (addr6->sin6_scope_id ==
- server->addr.sockAddr6.sin6_scope_id)) {
- addr6->sin6_port = htons(port);
- /* user overrode default port? */
- if (addr6->sin6_port) {
- if (addr6->sin6_port !=
- server->addr.sockAddr6.sin6_port)
- continue;
- }
- break;
- } else
- continue;
- }
+ if (!match_security(server, vol))
+ continue;
++server->srv_count;
write_unlock(&cifs_tcp_ses_lock);
@@ -1460,6 +1520,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
+ cifs_fscache_release_client_cookie(server);
+
task = xchg(&server->tsk, NULL);
if (task)
force_sig(SIGKILL, task);
@@ -1479,7 +1541,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip);
if (volume_info->UNCip && volume_info->UNC) {
- rc = cifs_convert_address(volume_info->UNCip, &addr);
+ rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
+ volume_info->UNCip,
+ strlen(volume_info->UNCip),
+ volume_info->port);
if (!rc) {
/* we failed translating address */
rc = -EINVAL;
@@ -1499,7 +1564,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
}
/* see if we already have a matching tcp_ses */
- tcp_ses = cifs_find_tcp_session(&addr, volume_info->port);
+ tcp_ses = cifs_find_tcp_session((struct sockaddr *)&addr, volume_info);
if (tcp_ses)
return tcp_ses;
@@ -1543,12 +1608,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
cFYI(1, "attempting ipv6 connect");
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
- sin_server6->sin6_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr6, sin_server6,
sizeof(struct sockaddr_in6));
rc = ipv6_connect(tcp_ses);
} else {
- sin_server->sin_port = htons(volume_info->port);
memcpy(&tcp_ses->addr.sockAddr, sin_server,
sizeof(struct sockaddr_in));
rc = ipv4_connect(tcp_ses);
@@ -1577,6 +1640,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
write_unlock(&cifs_tcp_ses_lock);
+ cifs_fscache_get_client_cookie(tcp_ses);
+
return tcp_ses;
out_err:
@@ -1591,17 +1656,27 @@ out_err:
}
static struct cifsSesInfo *
-cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
+cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
- struct list_head *tmp;
struct cifsSesInfo *ses;
write_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
- if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
- continue;
-
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ switch (server->secType) {
+ case Kerberos:
+ if (vol->cred_uid != ses->cred_uid)
+ continue;
+ break;
+ default:
+ /* anything else takes username/password */
+ if (strncmp(ses->userName, vol->username,
+ MAX_USERNAME_SIZE))
+ continue;
+ if (strlen(vol->username) != 0 &&
+ strncmp(ses->password, vol->password,
+ MAX_PASSWORD_SIZE))
+ continue;
+ }
++ses->ses_count;
write_unlock(&cifs_tcp_ses_lock);
return ses;
@@ -1643,7 +1718,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
xid = GetXid();
- ses = cifs_find_smb_ses(server, volume_info->username);
+ ses = cifs_find_smb_ses(server, volume_info);
if (ses) {
cFYI(1, "Existing smb sess found (status=%d)", ses->status);
@@ -1706,6 +1781,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
if (ses->domainName)
strcpy(ses->domainName, volume_info->domainname);
}
+ ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
ses->overrideSecFlg = volume_info->secFlg;
@@ -1773,6 +1849,7 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
CIFSSMBTDis(xid, tcon);
_FreeXid(xid);
+ cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon);
cifs_put_smb_ses(ses);
}
@@ -1843,6 +1920,8 @@ cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
list_add(&tcon->tcon_list, &ses->tcon_list);
write_unlock(&cifs_tcp_ses_lock);
+ cifs_fscache_get_super_cookie(tcon);
+
return tcon;
out_fail:
@@ -2397,6 +2476,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
if (pvolume_info->dynperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
+ if (pvolume_info->fsc)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE;
if (pvolume_info->direct_io) {
cFYI(1, "mounting share using direct i/o");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 391816b461c..578d88c5b46 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/file.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
@@ -129,12 +130,6 @@ cifs_bp_rename_retry:
return full_path;
}
-/*
- * When called with struct file pointer set to NULL, there is no way we could
- * update file->private_data, but getting it stuck on openFileList provides a
- * way to access it from cifs_fill_filedata and thereby set file->private_data
- * from cifs_open.
- */
struct cifsFileInfo *
cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
struct file *file, struct vfsmount *mnt, unsigned int oflags)
@@ -162,7 +157,7 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
mutex_init(&pCifsFile->lock_mutex);
INIT_LIST_HEAD(&pCifsFile->llist);
atomic_set(&pCifsFile->count, 1);
- slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops);
+ INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
@@ -184,12 +179,13 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
}
write_unlock(&GlobalSMBSeslock);
+ file->private_data = pCifsFile;
+
return pCifsFile;
}
int cifs_posix_open(char *full_path, struct inode **pinode,
- struct vfsmount *mnt, struct super_block *sb,
- int mode, int oflags,
+ struct super_block *sb, int mode, int oflags,
__u32 *poplock, __u16 *pnetfid, int xid)
{
int rc;
@@ -258,19 +254,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
cifs_fattr_to_inode(*pinode, &fattr);
}
- /*
- * cifs_fill_filedata() takes care of setting cifsFileInfo pointer to
- * file->private_data.
- */
- if (mnt) {
- struct cifsFileInfo *pfile_info;
-
- pfile_info = cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt,
- oflags);
- if (pfile_info == NULL)
- rc = -ENOMEM;
- }
-
posix_open_ret:
kfree(presp_data);
return rc;
@@ -298,7 +281,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
int create_options = CREATE_NOT_DIR;
__u32 oplock = 0;
int oflags;
- bool posix_create = false;
/*
* BB below access is probably too much for mknod to request
* but we have to do query and setpathinfo so requesting
@@ -339,7 +321,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_open(full_path, &newinode,
- nd ? nd->path.mnt : NULL,
inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
/* EIO could indicate that (posix open) operation is not
supported, despite what server claimed in capability
@@ -347,7 +328,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
handled in posix open */
if (rc == 0) {
- posix_create = true;
if (newinode == NULL) /* query inode info */
goto cifs_create_get_file_info;
else /* success, no need to query */
@@ -478,21 +458,28 @@ cifs_create_set_dentry:
else
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
- /* nfsd case - nfs srv does not set nd */
- if ((nd == NULL) || (!(nd->flags & LOOKUP_OPEN))) {
- /* mknod case - do not leave file open */
- CIFSSMBClose(xid, tcon, fileHandle);
- } else if (!(posix_create) && (newinode)) {
+ if (newinode && nd && (nd->flags & LOOKUP_OPEN)) {
struct cifsFileInfo *pfile_info;
- /*
- * cifs_fill_filedata() takes care of setting cifsFileInfo
- * pointer to file->private_data.
- */
- pfile_info = cifs_new_fileinfo(newinode, fileHandle, NULL,
+ struct file *filp;
+
+ filp = lookup_instantiate_filp(nd, direntry, generic_file_open);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ CIFSSMBClose(xid, tcon, fileHandle);
+ goto cifs_create_out;
+ }
+
+ pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp,
nd->path.mnt, oflags);
- if (pfile_info == NULL)
+ if (pfile_info == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, tcon, fileHandle);
rc = -ENOMEM;
+ }
+ } else {
+ CIFSSMBClose(xid, tcon, fileHandle);
}
+
cifs_create_out:
kfree(buf);
kfree(full_path);
@@ -636,6 +623,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
+ struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
struct file *filp;
@@ -703,7 +691,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
(nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
(nd->intent.open.flags & O_CREAT)) {
- rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
+ rc = cifs_posix_open(full_path, &newInode,
parent_dir_inode->i_sb,
nd->intent.open.create_mode,
nd->intent.open.flags, &oplock,
@@ -733,8 +721,25 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
else
direntry->d_op = &cifs_dentry_ops;
d_add(direntry, newInode);
- if (posix_open)
- filp = lookup_instantiate_filp(nd, direntry, NULL);
+ if (posix_open) {
+ filp = lookup_instantiate_filp(nd, direntry,
+ generic_file_open);
+ if (IS_ERR(filp)) {
+ rc = PTR_ERR(filp);
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ goto lookup_out;
+ }
+
+ cfile = cifs_new_fileinfo(newInode, fileHandle, filp,
+ nd->path.mnt,
+ nd->intent.open.flags);
+ if (cfile == NULL) {
+ fput(filp);
+ CIFSSMBClose(xid, pTcon, fileHandle);
+ rc = -ENOMEM;
+ goto lookup_out;
+ }
+ }
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
@@ -755,6 +760,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
is a common return code */
}
+lookup_out:
kfree(full_path);
FreeXid(xid);
return ERR_PTR(rc);
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 4db2c5e7283..0eb87026cad 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -4,6 +4,8 @@
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
* Steve French (sfrench@us.ibm.com)
+ * Wang Lei (wang840925@gmail.com)
+ * David Howells (dhowells@redhat.com)
*
* Contains the CIFS DFS upcall routines used for hostname to
* IP address translation.
@@ -24,145 +26,73 @@
*/
#include <linux/slab.h>
-#include <keys/user-type.h>
+#include <linux/dns_resolver.h>
#include "dns_resolve.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
-/* Checks if supplied name is IP address
- * returns:
- * 1 - name is IP
- * 0 - name is not IP
- */
-static int
-is_ip(char *name)
-{
- struct sockaddr_storage ss;
-
- return cifs_convert_address(name, &ss);
-}
-
-static int
-dns_resolver_instantiate(struct key *key, const void *data,
- size_t datalen)
-{
- int rc = 0;
- char *ip;
-
- ip = kmalloc(datalen + 1, GFP_KERNEL);
- if (!ip)
- return -ENOMEM;
-
- memcpy(ip, data, datalen);
- ip[datalen] = '\0';
-
- /* make sure this looks like an address */
- if (!is_ip(ip)) {
- kfree(ip);
- return -EINVAL;
- }
-
- key->type_data.x[0] = datalen;
- key->payload.data = ip;
-
- return rc;
-}
-
-static void
-dns_resolver_destroy(struct key *key)
-{
- kfree(key->payload.data);
-}
-
-struct key_type key_type_dns_resolver = {
- .name = "dns_resolver",
- .def_datalen = sizeof(struct in_addr),
- .describe = user_describe,
- .instantiate = dns_resolver_instantiate,
- .destroy = dns_resolver_destroy,
- .match = user_match,
-};
-
-/* Resolves server name to ip address.
- * input:
- * unc - server UNC
- * output:
- * *ip_addr - pointer to server ip, caller responcible for freeing it.
- * return 0 on success
+/**
+ * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address.
+ * @unc: UNC path specifying the server
+ * @ip_addr: Where to return the IP address.
+ *
+ * The IP address will be returned in string form, and the caller is
+ * responsible for freeing it.
+ *
+ * Returns length of result on success, -ve on error.
*/
int
dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
{
- int rc = -EAGAIN;
- struct key *rkey = ERR_PTR(-EAGAIN);
+ struct sockaddr_storage ss;
+ const char *hostname, *sep;
char *name;
- char *data = NULL;
- int len;
+ int len, rc;
if (!ip_addr || !unc)
return -EINVAL;
- /* search for server name delimiter */
len = strlen(unc);
if (len < 3) {
cFYI(1, "%s: unc is too short: %s", __func__, unc);
return -EINVAL;
}
+
+ /* Discount leading slashes for cifs */
len -= 2;
- name = memchr(unc+2, '\\', len);
- if (!name) {
+ hostname = unc + 2;
+
+ /* Search for server name delimiter */
+ sep = memchr(hostname, '\\', len);
+ if (sep)
+ len = sep - unc;
+ else
cFYI(1, "%s: probably server name is whole unc: %s",
- __func__, unc);
- } else {
- len = (name - unc) - 2/* leading // */;
- }
+ __func__, unc);
+
+ /* Try to interpret hostname as an IPv4 or IPv6 address */
+ rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len);
+ if (rc > 0)
+ goto name_is_IP_address;
+
+ /* Perform the upcall */
+ rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL);
+ if (rc < 0)
+ cERROR(1, "%s: unable to resolve: %*.*s",
+ __func__, len, len, hostname);
+ else
+ cFYI(1, "%s: resolved: %*.*s to %s",
+ __func__, len, len, hostname, *ip_addr);
+ return rc;
- name = kmalloc(len+1, GFP_KERNEL);
- if (!name) {
- rc = -ENOMEM;
- return rc;
- }
- memcpy(name, unc+2, len);
+name_is_IP_address:
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ memcpy(name, hostname, len);
name[len] = 0;
-
- if (is_ip(name)) {
- cFYI(1, "%s: it is IP, skipping dns upcall: %s",
- __func__, name);
- data = name;
- goto skip_upcall;
- }
-
- rkey = request_key(&key_type_dns_resolver, name, "");
- if (!IS_ERR(rkey)) {
- len = rkey->type_data.x[0];
- data = rkey->payload.data;
- } else {
- cERROR(1, "%s: unable to resolve: %s", __func__, name);
- goto out;
- }
-
-skip_upcall:
- if (data) {
- *ip_addr = kmalloc(len + 1, GFP_KERNEL);
- if (*ip_addr) {
- memcpy(*ip_addr, data, len + 1);
- if (!IS_ERR(rkey))
- cFYI(1, "%s: resolved: %s to %s", __func__,
- name,
- *ip_addr
- );
- rc = 0;
- } else {
- rc = -ENOMEM;
- }
- if (!IS_ERR(rkey))
- key_put(rkey);
- }
-
-out:
- kfree(name);
- return rc;
+ cFYI(1, "%s: unc is IP, skipping dns upcall: %s", __func__, name);
+ *ip_addr = name;
+ return 0;
}
-
-
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 966e9288930..d3f5d27f4d0 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -24,8 +24,6 @@
#define _DNS_RESOLVE_H
#ifdef __KERNEL__
-#include <linux/key-type.h>
-extern struct key_type key_type_dns_resolver;
extern int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr);
#endif /* KERNEL */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index f1ff785b229..db11fdef0e9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -40,6 +40,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
+#include "fscache.h"
static inline int cifs_convert_flags(unsigned int flags)
{
@@ -162,44 +163,12 @@ psx_client_can_cache:
return 0;
}
-static struct cifsFileInfo *
-cifs_fill_filedata(struct file *file)
-{
- struct list_head *tmp;
- struct cifsFileInfo *pCifsFile = NULL;
- struct cifsInodeInfo *pCifsInode = NULL;
-
- /* search inode for this file and fill in file->private_data */
- pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- read_lock(&GlobalSMBSeslock);
- list_for_each(tmp, &pCifsInode->openFileList) {
- pCifsFile = list_entry(tmp, struct cifsFileInfo, flist);
- if ((pCifsFile->pfile == NULL) &&
- (pCifsFile->pid == current->tgid)) {
- /* mode set in cifs_create */
-
- /* needed for writepage */
- pCifsFile->pfile = file;
- file->private_data = pCifsFile;
- break;
- }
- }
- read_unlock(&GlobalSMBSeslock);
-
- if (file->private_data != NULL) {
- return pCifsFile;
- } else if ((file->f_flags & O_CREAT) && (file->f_flags & O_EXCL))
- cERROR(1, "could not find file instance for "
- "new file %p", file);
- return NULL;
-}
-
/* all arguments to this function must be checked for validity in caller */
-static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
- struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
+static inline int cifs_open_inode_helper(struct inode *inode,
struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
char *full_path, int xid)
{
+ struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
struct timespec temp;
int rc;
@@ -213,36 +182,35 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
/* if not oplocked, invalidate inode pages if mtime or file
size changed */
temp = cifs_NTtimeToUnix(buf->LastWriteTime);
- if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
- (file->f_path.dentry->d_inode->i_size ==
+ if (timespec_equal(&inode->i_mtime, &temp) &&
+ (inode->i_size ==
(loff_t)le64_to_cpu(buf->EndOfFile))) {
cFYI(1, "inode unchanged on server");
} else {
- if (file->f_path.dentry->d_inode->i_mapping) {
+ if (inode->i_mapping) {
/* BB no need to lock inode until after invalidate
since namei code should already have it locked? */
- rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
+ rc = filemap_write_and_wait(inode->i_mapping);
if (rc != 0)
- CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
+ pCifsInode->write_behind_rc = rc;
}
cFYI(1, "invalidating remote inode since open detected it "
"changed");
- invalidate_remote_inode(file->f_path.dentry->d_inode);
+ invalidate_remote_inode(inode);
}
client_can_cache:
if (pTcon->unix_ext)
- rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
- full_path, inode->i_sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
+ xid);
else
- rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
- full_path, buf, inode->i_sb, xid, NULL);
+ rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
+ xid, NULL);
if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
- cFYI(1, "Exclusive Oplock granted on inode %p",
- file->f_path.dentry->d_inode);
+ cFYI(1, "Exclusive Oplock granted on inode %p", inode);
} else if ((*oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
@@ -256,7 +224,7 @@ int cifs_open(struct inode *inode, struct file *file)
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
- struct cifsFileInfo *pCifsFile;
+ struct cifsFileInfo *pCifsFile = NULL;
struct cifsInodeInfo *pCifsInode;
char *full_path = NULL;
int desiredAccess;
@@ -270,12 +238,6 @@ int cifs_open(struct inode *inode, struct file *file)
tcon = cifs_sb->tcon;
pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
- pCifsFile = cifs_fill_filedata(file);
- if (pCifsFile) {
- rc = 0;
- FreeXid(xid);
- return rc;
- }
full_path = build_path_from_dentry(file->f_path.dentry);
if (full_path == NULL) {
@@ -299,8 +261,7 @@ int cifs_open(struct inode *inode, struct file *file)
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
oflags |= SMB_O_CREAT;
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
- inode->i_sb,
+ rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
@@ -308,9 +269,23 @@ int cifs_open(struct inode *inode, struct file *file)
/* no need for special case handling of setting mode
on read only files needed here */
- pCifsFile = cifs_fill_filedata(file);
- cifs_posix_open_inode_helper(inode, file, pCifsInode,
- oplock, netfid);
+ rc = cifs_posix_open_inode_helper(inode, file,
+ pCifsInode, oplock, netfid);
+ if (rc != 0) {
+ CIFSSMBClose(xid, tcon, netfid);
+ goto out;
+ }
+
+ pCifsFile = cifs_new_fileinfo(inode, netfid, file,
+ file->f_path.mnt,
+ oflags);
+ if (pCifsFile == NULL) {
+ CIFSSMBClose(xid, tcon, netfid);
+ rc = -ENOMEM;
+ }
+
+ cifs_fscache_set_inode_cookie(inode, file);
+
goto out;
} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
if (tcon->ses->serverNOS)
@@ -391,16 +366,18 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
+ rc = cifs_open_inode_helper(inode, tcon, &oplock, buf, full_path, xid);
+ if (rc != 0)
+ goto out;
+
pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
file->f_flags);
- file->private_data = pCifsFile;
- if (file->private_data == NULL) {
+ if (pCifsFile == NULL) {
rc = -ENOMEM;
goto out;
}
- rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
- &oplock, buf, full_path, xid);
+ cifs_fscache_set_inode_cookie(inode, file);
if (oplock & CIFS_CREATE_ACTION) {
/* time to set mode which we can not set earlier due to
@@ -456,7 +433,7 @@ static int cifs_reopen_file(struct file *file, bool can_flush)
__u16 netfid;
if (file->private_data)
- pCifsFile = (struct cifsFileInfo *)file->private_data;
+ pCifsFile = file->private_data;
else
return -EBADF;
@@ -513,8 +490,7 @@ reopen_error_exit:
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
int oflags = (int) cifs_posix_convert_flags(file->f_flags);
/* can not refresh inode info since size could be stale */
- rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
- inode->i_sb,
+ rc = cifs_posix_open(full_path, NULL, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
oflags, &oplock, &netfid, xid);
if (rc == 0) {
@@ -595,8 +571,7 @@ int cifs_close(struct inode *inode, struct file *file)
int xid, timeout;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
- struct cifsFileInfo *pSMBFile =
- (struct cifsFileInfo *)file->private_data;
+ struct cifsFileInfo *pSMBFile = file->private_data;
xid = GetXid();
@@ -671,8 +646,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
{
int rc = 0;
int xid;
- struct cifsFileInfo *pCFileStruct =
- (struct cifsFileInfo *)file->private_data;
+ struct cifsFileInfo *pCFileStruct = file->private_data;
char *ptmp;
cFYI(1, "Closedir inode = 0x%p", inode);
@@ -893,8 +867,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
length, pfLock,
posix_lock_type, wait_flag);
} else {
- struct cifsFileInfo *fid =
- (struct cifsFileInfo *)file->private_data;
+ struct cifsFileInfo *fid = file->private_data;
if (numLock) {
rc = CIFSSMBLock(xid, tcon, netfid, length,
@@ -995,7 +968,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
if (file->private_data == NULL)
return -EBADF;
- open_file = (struct cifsFileInfo *) file->private_data;
+ open_file = file->private_data;
rc = generic_write_checks(file, poffset, &write_size, 0);
if (rc)
@@ -1097,7 +1070,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
if (file->private_data == NULL)
return -EBADF;
- open_file = (struct cifsFileInfo *)file->private_data;
+ open_file = file->private_data;
xid = GetXid();
@@ -1681,8 +1654,7 @@ int cifs_fsync(struct file *file, int datasync)
int xid;
int rc = 0;
struct cifsTconInfo *tcon;
- struct cifsFileInfo *smbfile =
- (struct cifsFileInfo *)file->private_data;
+ struct cifsFileInfo *smbfile = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
xid = GetXid();
@@ -1786,7 +1758,7 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
FreeXid(xid);
return rc;
}
- open_file = (struct cifsFileInfo *)file->private_data;
+ open_file = file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1867,7 +1839,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
FreeXid(xid);
return rc;
}
- open_file = (struct cifsFileInfo *)file->private_data;
+ open_file = file->private_data;
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
@@ -1952,6 +1924,7 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
bytes_read -= PAGE_CACHE_SIZE;
continue;
}
+ page_cache_release(page);
target = kmap_atomic(page, KM_USER0);
@@ -1971,6 +1944,9 @@ static void cifs_copy_cache_pages(struct address_space *mapping,
SetPageUptodate(page);
unlock_page(page);
data += PAGE_CACHE_SIZE;
+
+ /* add page to FS-Cache */
+ cifs_readpage_to_fscache(mapping->host, page);
}
return;
}
@@ -1997,10 +1973,19 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
FreeXid(xid);
return rc;
}
- open_file = (struct cifsFileInfo *)file->private_data;
+ open_file = file->private_data;
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
pTcon = cifs_sb->tcon;
+ /*
+ * Reads as many pages as possible from fscache. Returns -ENOBUFS
+ * immediately if the cookie is negative
+ */
+ rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
+ &num_pages);
+ if (rc == 0)
+ goto read_complete;
+
cFYI(DBG2, "rpages: num pages %d", num_pages);
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
@@ -2111,6 +2096,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
smb_read_data = NULL;
}
+read_complete:
FreeXid(xid);
return rc;
}
@@ -2121,6 +2107,11 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
char *read_data;
int rc;
+ /* Is the page cached? */
+ rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
+ if (rc == 0)
+ goto read_complete;
+
page_cache_get(page);
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
@@ -2140,11 +2131,17 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
flush_dcache_page(page);
SetPageUptodate(page);
+
+ /* send this page to the cache */
+ cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
+
rc = 0;
io_error:
kunmap(page);
page_cache_release(page);
+
+read_complete:
return rc;
}
@@ -2294,8 +2291,23 @@ out:
return rc;
}
-static void
-cifs_oplock_break(struct slow_work *work)
+static int cifs_release_page(struct page *page, gfp_t gfp)
+{
+ if (PagePrivate(page))
+ return 0;
+
+ return cifs_fscache_release_page(page, gfp);
+}
+
+static void cifs_invalidate_page(struct page *page, unsigned long offset)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
+
+ if (offset == 0)
+ cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
+}
+
+void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
@@ -2332,33 +2344,30 @@ cifs_oplock_break(struct slow_work *work)
LOCKING_ANDX_OPLOCK_RELEASE, false);
cFYI(1, "Oplock release rc = %d", rc);
}
+
+ /*
+ * We might have kicked in before is_valid_oplock_break()
+ * finished grabbing reference for us. Make sure it's done by
+ * waiting for GlobalSMSSeslock.
+ */
+ write_lock(&GlobalSMBSeslock);
+ write_unlock(&GlobalSMBSeslock);
+
+ cifs_oplock_break_put(cfile);
}
-static int
-cifs_oplock_break_get(struct slow_work *work)
+void cifs_oplock_break_get(struct cifsFileInfo *cfile)
{
- struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
- oplock_break);
mntget(cfile->mnt);
cifsFileInfo_get(cfile);
- return 0;
}
-static void
-cifs_oplock_break_put(struct slow_work *work)
+void cifs_oplock_break_put(struct cifsFileInfo *cfile)
{
- struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
- oplock_break);
mntput(cfile->mnt);
cifsFileInfo_put(cfile);
}
-const struct slow_work_ops cifs_oplock_break_ops = {
- .get_ref = cifs_oplock_break_get,
- .put_ref = cifs_oplock_break_put,
- .execute = cifs_oplock_break,
-};
-
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
@@ -2367,6 +2376,8 @@ const struct address_space_operations cifs_addr_ops = {
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = __set_page_dirty_nobuffers,
+ .releasepage = cifs_release_page,
+ .invalidatepage = cifs_invalidate_page,
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
@@ -2383,6 +2394,8 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = __set_page_dirty_nobuffers,
+ .releasepage = cifs_release_page,
+ .invalidatepage = cifs_invalidate_page,
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
new file mode 100644
index 00000000000..9f3f5c4be16
--- /dev/null
+++ b/fs/cifs/fscache.c
@@ -0,0 +1,236 @@
+/*
+ * fs/cifs/fscache.c - CIFS filesystem cache interface
+ *
+ * Copyright (c) 2010 Novell, Inc.
+ * Author(s): Suresh Jayaraman (sjayaraman@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "fscache.h"
+#include "cifsglob.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+
+void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
+{
+ server->fscache =
+ fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
+ &cifs_fscache_server_index_def, server);
+ cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server,
+ server->fscache);
+}
+
+void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
+{
+ cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server,
+ server->fscache);
+ fscache_relinquish_cookie(server->fscache, 0);
+ server->fscache = NULL;
+}
+
+void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ tcon->fscache =
+ fscache_acquire_cookie(server->fscache,
+ &cifs_fscache_super_index_def, tcon);
+ cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)",
+ server->fscache, tcon->fscache);
+}
+
+void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon)
+{
+ cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
+ fscache_relinquish_cookie(tcon->fscache, 0);
+ tcon->fscache = NULL;
+}
+
+static void cifs_fscache_enable_inode_cookie(struct inode *inode)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+
+ if (cifsi->fscache)
+ return;
+
+ cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache,
+ &cifs_fscache_inode_object_def,
+ cifsi);
+ cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)",
+ cifs_sb->tcon->fscache, cifsi->fscache);
+}
+
+void cifs_fscache_release_inode_cookie(struct inode *inode)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ if (cifsi->fscache) {
+ cFYI(1, "CIFS releasing inode cookie (0x%p)",
+ cifsi->fscache);
+ fscache_relinquish_cookie(cifsi->fscache, 0);
+ cifsi->fscache = NULL;
+ }
+}
+
+static void cifs_fscache_disable_inode_cookie(struct inode *inode)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ if (cifsi->fscache) {
+ cFYI(1, "CIFS disabling inode cookie (0x%p)",
+ cifsi->fscache);
+ fscache_relinquish_cookie(cifsi->fscache, 1);
+ cifsi->fscache = NULL;
+ }
+}
+
+void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
+{
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ cifs_fscache_disable_inode_cookie(inode);
+ else {
+ cifs_fscache_enable_inode_cookie(inode);
+ cFYI(1, "CIFS: fscache inode cookie set");
+ }
+}
+
+void cifs_fscache_reset_inode_cookie(struct inode *inode)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct fscache_cookie *old = cifsi->fscache;
+
+ if (cifsi->fscache) {
+ /* retire the current fscache cache and get a new one */
+ fscache_relinquish_cookie(cifsi->fscache, 1);
+
+ cifsi->fscache = fscache_acquire_cookie(cifs_sb->tcon->fscache,
+ &cifs_fscache_inode_object_def,
+ cifsi);
+ cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p",
+ cifsi->fscache, old);
+ }
+}
+
+int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ if (PageFsCache(page)) {
+ struct inode *inode = page->mapping->host;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ cFYI(1, "CIFS: fscache release page (0x%p/0x%p)",
+ page, cifsi->fscache);
+ if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
+ return 0;
+ }
+
+ return 1;
+}
+
+static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
+ int error)
+{
+ cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)",
+ page, error);
+ if (!error)
+ SetPageUptodate(page);
+ unlock_page(page);
+}
+
+/*
+ * Retrieve a page from FS-Cache
+ */
+int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+{
+ int ret;
+
+ cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p",
+ CIFS_I(inode)->fscache, page, inode);
+ ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
+ cifs_readpage_from_fscache_complete,
+ NULL,
+ GFP_KERNEL);
+ switch (ret) {
+
+ case 0: /* page found in fscache, read submitted */
+ cFYI(1, "CIFS: readpage_from_fscache: submitted");
+ return ret;
+ case -ENOBUFS: /* page won't be cached */
+ case -ENODATA: /* page not in cache */
+ cFYI(1, "CIFS: readpage_from_fscache %d", ret);
+ return 1;
+
+ default:
+ cERROR(1, "unknown error ret = %d", ret);
+ }
+ return ret;
+}
+
+/*
+ * Retrieve a set of pages from FS-Cache
+ */
+int __cifs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ int ret;
+
+ cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)",
+ CIFS_I(inode)->fscache, *nr_pages, inode);
+ ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
+ pages, nr_pages,
+ cifs_readpage_from_fscache_complete,
+ NULL,
+ mapping_gfp_mask(mapping));
+ switch (ret) {
+ case 0: /* read submitted to the cache for all pages */
+ cFYI(1, "CIFS: readpages_from_fscache: submitted");
+ return ret;
+
+ case -ENOBUFS: /* some pages are not cached and can't be */
+ case -ENODATA: /* some pages are not cached */
+ cFYI(1, "CIFS: readpages_from_fscache: no page");
+ return 1;
+
+ default:
+ cFYI(1, "unknown error ret = %d", ret);
+ }
+
+ return ret;
+}
+
+void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+{
+ int ret;
+
+ cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p",
+ CIFS_I(inode)->fscache, page, inode);
+ ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL);
+ if (ret != 0)
+ fscache_uncache_page(CIFS_I(inode)->fscache, page);
+}
+
+void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
+{
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct fscache_cookie *cookie = cifsi->fscache;
+
+ cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie);
+ fscache_wait_on_page_write(cookie, page);
+ fscache_uncache_page(cookie, page);
+}
+
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
new file mode 100644
index 00000000000..31b88ec2341
--- /dev/null
+++ b/fs/cifs/fscache.h
@@ -0,0 +1,136 @@
+/*
+ * fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ *
+ * Copyright (c) 2010 Novell, Inc.
+ * Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _CIFS_FSCACHE_H
+#define _CIFS_FSCACHE_H
+
+#include <linux/fscache.h>
+
+#include "cifsglob.h"
+
+#ifdef CONFIG_CIFS_FSCACHE
+
+extern struct fscache_netfs cifs_fscache_netfs;
+extern const struct fscache_cookie_def cifs_fscache_server_index_def;
+extern const struct fscache_cookie_def cifs_fscache_super_index_def;
+extern const struct fscache_cookie_def cifs_fscache_inode_object_def;
+
+extern int cifs_fscache_register(void);
+extern void cifs_fscache_unregister(void);
+
+/*
+ * fscache.c
+ */
+extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
+extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
+extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *);
+extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *);
+
+extern void cifs_fscache_release_inode_cookie(struct inode *);
+extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
+extern void cifs_fscache_reset_inode_cookie(struct inode *);
+
+extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
+extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
+extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
+extern int __cifs_readpages_from_fscache(struct inode *,
+ struct address_space *,
+ struct list_head *,
+ unsigned *);
+
+extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
+
+static inline void cifs_fscache_invalidate_page(struct page *page,
+ struct inode *inode)
+{
+ if (PageFsCache(page))
+ __cifs_fscache_invalidate_page(page, inode);
+}
+
+static inline int cifs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
+{
+ if (CIFS_I(inode)->fscache)
+ return __cifs_readpage_from_fscache(inode, page);
+
+ return -ENOBUFS;
+}
+
+static inline int cifs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ if (CIFS_I(inode)->fscache)
+ return __cifs_readpages_from_fscache(inode, mapping, pages,
+ nr_pages);
+ return -ENOBUFS;
+}
+
+static inline void cifs_readpage_to_fscache(struct inode *inode,
+ struct page *page)
+{
+ if (PageFsCache(page))
+ __cifs_readpage_to_fscache(inode, page);
+}
+
+#else /* CONFIG_CIFS_FSCACHE */
+static inline int cifs_fscache_register(void) { return 0; }
+static inline void cifs_fscache_unregister(void) {}
+
+static inline void
+cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
+static inline void
+cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
+static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {}
+static inline void
+cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {}
+
+static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
+ struct file *filp) {}
+static inline void cifs_fscache_reset_inode_cookie(struct inode *inode) {}
+static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ return 1; /* May release page */
+}
+
+static inline void cifs_fscache_invalidate_page(struct page *page,
+ struct inode *inode) {}
+static inline int
+cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+{
+ return -ENOBUFS;
+}
+
+static inline int cifs_readpages_from_fscache(struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ return -ENOBUFS;
+}
+
+static inline void cifs_readpage_to_fscache(struct inode *inode,
+ struct page *page) {}
+
+#endif /* CONFIG_CIFS_FSCACHE */
+
+#endif /* _CIFS_FSCACHE_H */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 62b324f26a5..4bc47e5b5f2 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -29,6 +29,7 @@
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
+#include "fscache.h"
static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
@@ -288,7 +289,7 @@ int cifs_get_file_info_unix(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsTconInfo *tcon = cifs_sb->tcon;
- struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
+ struct cifsFileInfo *cfile = filp->private_data;
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -515,7 +516,7 @@ int cifs_get_file_info(struct file *filp)
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsTconInfo *tcon = cifs_sb->tcon;
- struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
+ struct cifsFileInfo *cfile = filp->private_data;
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -723,18 +724,17 @@ cifs_find_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
+ /* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
- /*
- * uh oh -- it's a directory. We can't use it since hardlinked dirs are
- * verboten. Disable serverino and return it as if it were found, the
- * caller can discard it, generate a uniqueid and retry the find
- */
- if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry)) {
+ /* don't match inode of different type */
+ if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT))
+ return 0;
+
+ /* if it's not a directory or has no dentries, then flag it */
+ if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry))
fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
- cifs_autodisable_serverino(CIFS_SB(inode->i_sb));
- }
return 1;
}
@@ -748,6 +748,27 @@ cifs_init_inode(struct inode *inode, void *opaque)
return 0;
}
+/*
+ * walk dentry list for an inode and report whether it has aliases that
+ * are hashed. We use this to determine if a directory inode can actually
+ * be used.
+ */
+static bool
+inode_has_hashed_dentries(struct inode *inode)
+{
+ struct dentry *dentry;
+
+ spin_lock(&dcache_lock);
+ list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
+ spin_unlock(&dcache_lock);
+ return true;
+ }
+ }
+ spin_unlock(&dcache_lock);
+ return false;
+}
+
/* Given fattrs, get a corresponding inode */
struct inode *
cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
@@ -763,12 +784,16 @@ retry_iget5_locked:
inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
if (inode) {
- /* was there a problematic inode number collision? */
+ /* was there a potentially problematic inode collision? */
if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
- iput(inode);
- fattr->cf_uniqueid = iunique(sb, ROOT_I);
fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
- goto retry_iget5_locked;
+
+ if (inode_has_hashed_dentries(inode)) {
+ cifs_autodisable_serverino(CIFS_SB(sb));
+ iput(inode);
+ fattr->cf_uniqueid = iunique(sb, ROOT_I);
+ goto retry_iget5_locked;
+ }
}
cifs_fattr_to_inode(inode, fattr);
@@ -776,6 +801,10 @@ retry_iget5_locked:
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
+#ifdef CONFIG_CIFS_FSCACHE
+ /* initialize per-inode cache cookie pointer */
+ CIFS_I(inode)->fscache = NULL;
+#endif
unlock_new_inode(inode);
}
}
@@ -807,6 +836,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
if (!inode)
return ERR_PTR(-ENOMEM);
+#ifdef CONFIG_CIFS_FSCACHE
+ /* populate tcon->resource_id */
+ cifs_sb->tcon->resource_id = CIFS_I(inode)->uniqueid;
+#endif
+
if (rc && cifs_sb->tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
@@ -1401,6 +1435,10 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
if (rc == 0 || rc != -ETXTBSY)
return rc;
+ /* open-file renames don't work across directories */
+ if (to_dentry->d_parent != from_dentry->d_parent)
+ return rc;
+
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
CREATE_NOT_DIR, &srcfid, &oplock, NULL,
@@ -1564,6 +1602,7 @@ cifs_invalidate_mapping(struct inode *inode)
cifs_i->write_behind_rc = rc;
}
invalidate_remote_inode(inode);
+ cifs_fscache_reset_inode_cookie(inode);
}
int cifs_revalidate_file(struct file *filp)
@@ -1659,26 +1698,16 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
return rc;
}
-static int cifs_vmtruncate(struct inode *inode, loff_t offset)
+static void cifs_setsize(struct inode *inode, loff_t offset)
{
loff_t oldsize;
- int err;
spin_lock(&inode->i_lock);
- err = inode_newsize_ok(inode, offset);
- if (err) {
- spin_unlock(&inode->i_lock);
- goto out;
- }
-
oldsize = inode->i_size;
i_size_write(inode, offset);
spin_unlock(&inode->i_lock);
+
truncate_pagecache(inode, oldsize, offset);
- if (inode->i_op->truncate)
- inode->i_op->truncate(inode);
-out:
- return err;
}
static int
@@ -1751,7 +1780,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
- rc = cifs_vmtruncate(inode, attrs->ia_size);
+ cifs_setsize(inode, attrs->ia_size);
cifs_truncate_page(inode->i_mapping, inode->i_size);
}
@@ -1776,14 +1805,12 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
xid = GetXid();
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
- /* check if we have permission to change attrs */
- rc = inode_change_ok(inode, attrs);
- if (rc < 0)
- goto out;
- else
- rc = 0;
- }
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ attrs->ia_valid |= ATTR_FORCE;
+
+ rc = inode_change_ok(inode, attrs);
+ if (rc < 0)
+ goto out;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -1869,18 +1896,24 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
- if (!rc) {
- rc = inode_setattr(inode, attrs);
+ if (rc)
+ goto out;
- /* force revalidate when any of these times are set since some
- of the fs types (eg ext3, fat) do not have fine enough
- time granularity to match protocol, and we do not have a
- a way (yet) to query the server fs's time granularity (and
- whether it rounds times down).
- */
- if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
- cifsInode->time = 0;
- }
+ if ((attrs->ia_valid & ATTR_SIZE) &&
+ attrs->ia_size != i_size_read(inode))
+ truncate_setsize(inode, attrs->ia_size);
+
+ setattr_copy(inode, attrs);
+ mark_inode_dirty(inode);
+
+ /* force revalidate when any of these times are set since some
+ of the fs types (eg ext3, fat) do not have fine enough
+ time granularity to match protocol, and we do not have a
+ a way (yet) to query the server fs's time granularity (and
+ whether it rounds times down).
+ */
+ if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))
+ cifsInode->time = 0;
out:
kfree(args);
kfree(full_path);
@@ -1905,14 +1938,13 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid);
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) == 0) {
- /* check if we have permission to change attrs */
- rc = inode_change_ok(inode, attrs);
- if (rc < 0) {
- FreeXid(xid);
- return rc;
- } else
- rc = 0;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ attrs->ia_valid |= ATTR_FORCE;
+
+ rc = inode_change_ok(inode, attrs);
+ if (rc < 0) {
+ FreeXid(xid);
+ return rc;
}
full_path = build_path_from_dentry(direntry);
@@ -2020,8 +2052,17 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
/* do not need local check to inode_check_ok since the server does
that */
- if (!rc)
- rc = inode_setattr(inode, attrs);
+ if (rc)
+ goto cifs_setattr_exit;
+
+ if ((attrs->ia_valid & ATTR_SIZE) &&
+ attrs->ia_size != i_size_read(inode))
+ truncate_setsize(inode, attrs->ia_size);
+
+ setattr_copy(inode, attrs);
+ mark_inode_dirty(inode);
+ return 0;
+
cifs_setattr_exit:
kfree(full_path);
FreeXid(xid);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 505926f1ee6..9d38a71c8e1 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -41,8 +41,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
__u64 ExtAttrMask = 0;
__u64 caps;
struct cifsTconInfo *tcon;
- struct cifsFileInfo *pSMBFile =
- (struct cifsFileInfo *)filep->private_data;
+ struct cifsFileInfo *pSMBFile = filep->private_data;
#endif /* CONFIG_CIFS_POSIX */
xid = GetXid();
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1394aa37f26..3ccadc1326d 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -498,7 +498,6 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
struct cifsTconInfo *tcon;
struct cifsInodeInfo *pCifsInode;
struct cifsFileInfo *netfile;
- int rc;
cFYI(1, "Checking for oplock break or dnotify response");
if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -583,13 +582,18 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
- rc = slow_work_enqueue(&netfile->oplock_break);
- if (rc) {
- cERROR(1, "failed to enqueue oplock "
- "break: %d\n", rc);
- } else {
- netfile->oplock_break_cancelled = false;
- }
+
+ /*
+ * cifs_oplock_break_put() can't be called
+ * from here. Get reference after queueing
+ * succeeded. cifs_oplock_break() will
+ * synchronize using GlobalSMSSeslock.
+ */
+ if (queue_work(system_nrt_wq,
+ &netfile->oplock_break))
+ cifs_oplock_break_get(netfile);
+ netfile->oplock_break_cancelled = false;
+
read_unlock(&GlobalSMBSeslock);
read_unlock(&cifs_tcp_ses_lock);
return true;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index d35d52889cb..f97851119e6 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -61,6 +61,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
{ERRremcd, -EACCES},
{ERRdiffdevice, -EXDEV},
{ERRnofiles, -ENOENT},
+ {ERRwriteprot, -EROFS},
{ERRbadshare, -ETXTBSY},
{ERRlock, -EACCES},
{ERRunsup, -EINVAL},
@@ -139,17 +140,18 @@ static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
* Returns 0 on failure.
*/
static int
-cifs_inet_pton(const int address_family, const char *cp, void *dst)
+cifs_inet_pton(const int address_family, const char *cp, int len, void *dst)
{
int ret = 0;
/* calculate length by finding first slash or NULL */
if (address_family == AF_INET)
- ret = in4_pton(cp, -1 /* len */, dst, '\\', NULL);
+ ret = in4_pton(cp, len, dst, '\\', NULL);
else if (address_family == AF_INET6)
- ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
+ ret = in6_pton(cp, len, dst , '\\', NULL);
- cFYI(DBG2, "address conversion returned %d for %s", ret, cp);
+ cFYI(DBG2, "address conversion returned %d for %*.*s",
+ ret, len, len, cp);
if (ret > 0)
ret = 1;
return ret;
@@ -164,43 +166,66 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
* Returns 0 on failure.
*/
int
-cifs_convert_address(char *src, void *dst)
+cifs_convert_address(struct sockaddr *dst, const char *src, int len)
{
- int rc;
- char *pct, *endp;
+ int rc, alen, slen;
+ const char *pct;
+ char *endp, scope_id[13];
struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
/* IPv4 address */
- if (cifs_inet_pton(AF_INET, src, &s4->sin_addr.s_addr)) {
+ if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) {
s4->sin_family = AF_INET;
return 1;
}
- /* temporarily terminate string */
- pct = strchr(src, '%');
- if (pct)
- *pct = '\0';
-
- rc = cifs_inet_pton(AF_INET6, src, &s6->sin6_addr.s6_addr);
-
- /* repair temp termination (if any) and make pct point to scopeid */
- if (pct)
- *pct++ = '%';
+ /* attempt to exclude the scope ID from the address part */
+ pct = memchr(src, '%', len);
+ alen = pct ? pct - src : len;
+ rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr);
if (!rc)
return rc;
s6->sin6_family = AF_INET6;
if (pct) {
+ /* grab the scope ID */
+ slen = len - (alen + 1);
+ if (slen <= 0 || slen > 12)
+ return 0;
+ memcpy(scope_id, pct + 1, slen);
+ scope_id[slen] = '\0';
+
s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0);
- if (!*pct || *endp)
+ if (endp != scope_id + slen)
return 0;
}
return rc;
}
+int
+cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
+ const unsigned short int port)
+{
+ if (!cifs_convert_address(dst, src, len))
+ return 0;
+
+ switch (dst->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)dst)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)dst)->sin6_port = htons(port);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
/*****************************************************************************
convert a NT status code to a dos class/code
*****************************************************************************/
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index daf1753af67..d5e591fab47 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -847,6 +847,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);
+ if (tmp_buf == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
+
for (i = 0; (i < num_to_fill) && (rc == 0); i++) {
if (current_entry == NULL) {
/* evaluate whether this case is an error */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7707389bdf2..0a57cb7db5d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -730,15 +730,7 @@ ssetup_ntlmssp_authenticate:
/* calculate session key */
setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
- if (first_time) /* should this be moved into common code
- with similar ntlmv2 path? */
- /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
- response BB FIXME, v2_sess_key); */
-
- /* copy session key */
-
- /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
- bcc_ptr += LM2_SESS_KEY_SIZE; */
+ /* FIXME: calculate MAC key */
memcpy(bcc_ptr, (char *)v2_sess_key,
sizeof(struct ntlmv2_resp));
bcc_ptr += sizeof(struct ntlmv2_resp);
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index c5084d27db7..7f16cb825fe 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -76,6 +76,7 @@
#define ERRnofiles 18 /* A File Search command can find no
more files matching the specified
criteria. */
+#define ERRwriteprot 19 /* media is write protected */
#define ERRgeneral 31
#define ERRbadshare 32 /* The sharing mode specified for an
Open conflicts with existing FIDs on
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index d97f9935a02..6526e6f21ec 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -35,7 +35,7 @@
#include "coda_int.h"
/* VFS super_block ops */
-static void coda_clear_inode(struct inode *);
+static void coda_evict_inode(struct inode *);
static void coda_put_super(struct super_block *);
static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -93,7 +93,7 @@ static const struct super_operations coda_super_operations =
{
.alloc_inode = coda_alloc_inode,
.destroy_inode = coda_destroy_inode,
- .clear_inode = coda_clear_inode,
+ .evict_inode = coda_evict_inode,
.put_super = coda_put_super,
.statfs = coda_statfs,
.remount_fs = coda_remount,
@@ -224,8 +224,10 @@ static void coda_put_super(struct super_block *sb)
printk("Coda: Bye bye.\n");
}
-static void coda_clear_inode(struct inode *inode)
+static void coda_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
coda_cache_clear_inode(inode);
}
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 66b9cf79c5b..de89645777c 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -177,7 +177,7 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
nbytes = req->uc_outSize; /* don't have more space! */
}
if (copy_from_user(req->uc_data, buf, nbytes)) {
- req->uc_flags |= REQ_ABORT;
+ req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
retval = -EFAULT;
goto out;
@@ -254,8 +254,8 @@ static ssize_t coda_psdev_read(struct file * file, char __user * buf,
retval = -EFAULT;
/* If request was not a signal, enqueue and don't free */
- if (!(req->uc_flags & REQ_ASYNC)) {
- req->uc_flags |= REQ_READ;
+ if (!(req->uc_flags & CODA_REQ_ASYNC)) {
+ req->uc_flags |= CODA_REQ_READ;
list_add_tail(&(req->uc_chain), &vcp->vc_processing);
goto out;
}
@@ -315,19 +315,19 @@ static int coda_psdev_release(struct inode * inode, struct file * file)
list_del(&req->uc_chain);
/* Async requests need to be freed here */
- if (req->uc_flags & REQ_ASYNC) {
+ if (req->uc_flags & CODA_REQ_ASYNC) {
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
kfree(req);
continue;
}
- req->uc_flags |= REQ_ABORT;
+ req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
}
list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) {
list_del(&req->uc_chain);
- req->uc_flags |= REQ_ABORT;
+ req->uc_flags |= CODA_REQ_ABORT;
wake_up(&req->uc_sleep);
}
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index f09c5ed76f6..b8893ab6f9e 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -604,7 +604,7 @@ static void coda_unblock_signals(sigset_t *old)
(((r)->uc_opcode != CODA_CLOSE && \
(r)->uc_opcode != CODA_STORE && \
(r)->uc_opcode != CODA_RELEASE) || \
- (r)->uc_flags & REQ_READ))
+ (r)->uc_flags & CODA_REQ_READ))
static inline void coda_waitfor_upcall(struct upc_req *req)
{
@@ -624,7 +624,7 @@ static inline void coda_waitfor_upcall(struct upc_req *req)
set_current_state(TASK_UNINTERRUPTIBLE);
/* got a reply */
- if (req->uc_flags & (REQ_WRITE | REQ_ABORT))
+ if (req->uc_flags & (CODA_REQ_WRITE | CODA_REQ_ABORT))
break;
if (blocked && time_after(jiffies, timeout) &&
@@ -708,7 +708,7 @@ static int coda_upcall(struct venus_comm *vcp,
coda_waitfor_upcall(req);
/* Op went through, interrupt or not... */
- if (req->uc_flags & REQ_WRITE) {
+ if (req->uc_flags & CODA_REQ_WRITE) {
out = (union outputArgs *)req->uc_data;
/* here we map positive Venus errors to kernel errors */
error = -out->oh.result;
@@ -717,13 +717,13 @@ static int coda_upcall(struct venus_comm *vcp,
}
error = -EINTR;
- if ((req->uc_flags & REQ_ABORT) || !signal_pending(current)) {
+ if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) {
printk(KERN_WARNING "coda: Unexpected interruption.\n");
goto exit;
}
/* Interrupted before venus read it. */
- if (!(req->uc_flags & REQ_READ))
+ if (!(req->uc_flags & CODA_REQ_READ))
goto exit;
/* Venus saw the upcall, make sure we can send interrupt signal */
@@ -747,7 +747,7 @@ static int coda_upcall(struct venus_comm *vcp,
sig_inputArgs->ih.opcode = CODA_SIGNAL;
sig_inputArgs->ih.unique = req->uc_unique;
- sig_req->uc_flags = REQ_ASYNC;
+ sig_req->uc_flags = CODA_REQ_ASYNC;
sig_req->uc_opcode = sig_inputArgs->ih.opcode;
sig_req->uc_unique = sig_inputArgs->ih.unique;
sig_req->uc_inSize = sizeof(struct coda_in_hdr);
diff --git a/fs/compat.c b/fs/compat.c
index f0b391c5055..718c7062aec 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -8,13 +8,14 @@
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
- * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
+ * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/compat.h>
@@ -76,7 +77,8 @@ int compat_printk(const char *fmt, ...)
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
*/
-asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __user *t)
+asmlinkage long compat_sys_utime(const char __user *filename,
+ struct compat_utimbuf __user *t)
{
struct timespec tv[2];
@@ -90,7 +92,7 @@ asmlinkage long compat_sys_utime(char __user *filename, struct compat_utimbuf __
return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}
-asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, struct compat_timespec __user *t, int flags)
+asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, struct compat_timespec __user *t, int flags)
{
struct timespec tv[2];
@@ -105,7 +107,7 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, st
return do_utimes(dfd, filename, t ? tv : NULL, flags);
}
-asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, struct compat_timeval __user *t)
+asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, struct compat_timeval __user *t)
{
struct timespec tv[2];
@@ -124,7 +126,7 @@ asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename, st
return do_utimes(dfd, filename, t ? tv : NULL, 0);
}
-asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval __user *t)
+asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_timeval __user *t)
{
return compat_sys_futimesat(AT_FDCWD, filename, t);
}
@@ -168,7 +170,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
return err;
}
-asmlinkage long compat_sys_newstat(char __user * filename,
+asmlinkage long compat_sys_newstat(const char __user * filename,
struct compat_stat __user *statbuf)
{
struct kstat stat;
@@ -180,7 +182,7 @@ asmlinkage long compat_sys_newstat(char __user * filename,
return cp_compat_stat(&stat, statbuf);
}
-asmlinkage long compat_sys_newlstat(char __user * filename,
+asmlinkage long compat_sys_newlstat(const char __user * filename,
struct compat_stat __user *statbuf)
{
struct kstat stat;
@@ -193,7 +195,8 @@ asmlinkage long compat_sys_newlstat(char __user * filename,
}
#ifndef __ARCH_WANT_STAT64
-asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user *filename,
+asmlinkage long compat_sys_newfstatat(unsigned int dfd,
+ const char __user *filename,
struct compat_stat __user *statbuf, int flag)
{
struct kstat stat;
@@ -266,7 +269,7 @@ asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_sta
error = user_path(pathname, &path);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(path.dentry, &tmp);
+ error = vfs_statfs(&path, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
path_put(&path);
@@ -284,7 +287,7 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_path.dentry, &tmp);
+ error = vfs_statfs(&file->f_path, &tmp);
if (!error)
error = put_compat_statfs(buf, &tmp);
fput(file);
@@ -334,7 +337,7 @@ asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t s
error = user_path(pathname, &path);
if (!error) {
struct kstatfs tmp;
- error = vfs_statfs(path.dentry, &tmp);
+ error = vfs_statfs(&path, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
path_put(&path);
@@ -355,7 +358,7 @@ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct c
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs(file->f_path.dentry, &tmp);
+ error = vfs_statfs(&file->f_path, &tmp);
if (!error)
error = put_compat_statfs64(buf, &tmp);
fput(file);
@@ -378,7 +381,7 @@ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
sb = user_get_super(new_decode_dev(dev));
if (!sb)
return -EINVAL;
- err = vfs_statfs(sb->s_root, &sbuf);
+ err = statfs_by_dentry(sb->s_root, &sbuf);
drop_super(sb);
if (err)
return err;
@@ -626,7 +629,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
tot_len += len;
if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
goto out;
- if (!access_ok(vrfy_dir(type), buf, len)) {
+ if (!access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
ret = -EFAULT;
goto out;
}
@@ -836,9 +839,10 @@ static int do_nfs4_super_data_conv(void *raw_data)
#define NCPFS_NAME "ncpfs"
#define NFS4_NAME "nfs4"
-asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
- char __user * type, unsigned long flags,
- void __user * data)
+asmlinkage long compat_sys_mount(const char __user * dev_name,
+ const char __user * dir_name,
+ const char __user * type, unsigned long flags,
+ const void __user * data)
{
char *kernel_type;
unsigned long data_page;
@@ -891,8 +895,6 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
return retval;
}
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
-
struct compat_old_linux_dirent {
compat_ulong_t d_ino;
compat_ulong_t d_offset;
@@ -981,7 +983,8 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
struct compat_linux_dirent __user * dirent;
struct compat_getdents_callback *buf = __buf;
compat_ulong_t d_ino;
- int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 2, sizeof(compat_long_t));
+ int reclen = ALIGN(offsetof(struct compat_linux_dirent, d_name) +
+ namlen + 2, sizeof(compat_long_t));
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
@@ -1068,8 +1071,8 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
{
struct linux_dirent64 __user *dirent;
struct compat_getdents_callback64 *buf = __buf;
- int jj = NAME_OFFSET(dirent);
- int reclen = ALIGN(jj + namlen + 1, sizeof(u64));
+ int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
+ sizeof(u64));
u64 off;
buf->error = -EINVAL; /* only used if we fail.. */
@@ -1193,11 +1196,10 @@ out:
if (iov != iovstack)
kfree(iov);
if ((ret + (type == READ)) > 0) {
- struct dentry *dentry = file->f_path.dentry;
if (type == READ)
- fsnotify_access(dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 641640dc7ae..03e59aa318e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -4,7 +4,7 @@
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
- * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
+ * Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
@@ -131,23 +131,6 @@ static int w_long(unsigned int fd, unsigned int cmd,
return err;
}
-static int rw_long(unsigned int fd, unsigned int cmd,
- compat_ulong_t __user *argp)
-{
- mm_segment_t old_fs = get_fs();
- int err;
- unsigned long val;
-
- if(get_user(val, argp))
- return -EFAULT;
- set_fs (KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long)&val);
- set_fs (old_fs);
- if (!err && put_user(val, argp))
- return -EFAULT;
- return err;
-}
-
struct compat_video_event {
int32_t type;
compat_time_t timestamp;
@@ -594,15 +577,12 @@ static int do_smb_getmountuid(unsigned int fd, unsigned int cmd,
return err;
}
-static int ioc_settimeout(unsigned int fd, unsigned int cmd,
- compat_ulong_t __user *argp)
-{
- return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, argp);
-}
-
/* Bluetooth ioctls */
-#define HCIUARTSETPROTO _IOW('U', 200, int)
-#define HCIUARTGETPROTO _IOR('U', 201, int)
+#define HCIUARTSETPROTO _IOW('U', 200, int)
+#define HCIUARTGETPROTO _IOR('U', 201, int)
+#define HCIUARTGETDEVICE _IOR('U', 202, int)
+#define HCIUARTSETFLAGS _IOW('U', 203, int)
+#define HCIUARTGETFLAGS _IOR('U', 204, int)
#define BNEPCONNADD _IOW('B', 200, int)
#define BNEPCONNDEL _IOW('B', 201, int)
@@ -966,6 +946,7 @@ COMPATIBLE_IOCTL(TIOCGPGRP)
COMPATIBLE_IOCTL(TIOCGPTN)
COMPATIBLE_IOCTL(TIOCSPTLCK)
COMPATIBLE_IOCTL(TIOCSERGETLSR)
+COMPATIBLE_IOCTL(TIOCSIG)
#ifdef TCGETS2
COMPATIBLE_IOCTL(TCGETS2)
COMPATIBLE_IOCTL(TCSETS2)
@@ -1281,13 +1262,6 @@ COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
COMPATIBLE_IOCTL(OSS_GETVERSION)
-/* AUTOFS */
-COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
-COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
-COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER)
-COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT)
/* Raw devices */
COMPATIBLE_IOCTL(RAW_SETBIND)
COMPATIBLE_IOCTL(RAW_GETBIND)
@@ -1328,6 +1302,8 @@ COMPATIBLE_IOCTL(HCISETLINKPOL)
COMPATIBLE_IOCTL(HCISETLINKMODE)
COMPATIBLE_IOCTL(HCISETACLMTU)
COMPATIBLE_IOCTL(HCISETSCOMTU)
+COMPATIBLE_IOCTL(HCIBLOCKADDR)
+COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
COMPATIBLE_IOCTL(HCIINQUIRY)
COMPATIBLE_IOCTL(HCIUARTSETPROTO)
COMPATIBLE_IOCTL(HCIUARTGETPROTO)
@@ -1552,9 +1528,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
case RAW_GETBIND:
return raw_ioctl(fd, cmd, argp);
#endif
-#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,unsigned int)
- case AUTOFS_IOC_SETTIMEOUT32:
- return ioc_settimeout(fd, cmd, argp);
/* One SMB ioctl needs translations. */
#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
case SMB_IOC_GETMOUNTUID_32:
@@ -1609,9 +1582,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
case KDSKBMETA:
case KDSKBLED:
case KDSETLED:
- /* AUTOFS */
- case AUTOFS_IOC_READY:
- case AUTOFS_IOC_FAIL:
/* NBD */
case NBD_SET_SOCK:
case NBD_SET_BLKSIZE:
@@ -1729,8 +1699,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
goto out_fput;
}
- if (!filp->f_op ||
- (!filp->f_op->ioctl && !filp->f_op->unlocked_ioctl))
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
goto do_ioctl;
break;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 41645142b88..cf78d44a8d6 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -72,10 +72,6 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (!sd)
return -EINVAL;
- error = simple_setattr(dentry, iattr);
- if (error)
- return error;
-
sd_iattr = sd->s_iattr;
if (!sd_iattr) {
/* setting attributes for the first time, allocate now */
@@ -89,9 +85,12 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME;
sd->s_iattr = sd_iattr;
}
-
/* attributes were changed atleast once in past */
+ error = simple_setattr(dentry, iattr);
+ if (error)
+ return error;
+
if (ia_valid & ATTR_UID)
sd_iattr->ia_uid = iattr->ia_uid;
if (ia_valid & ATTR_GID)
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index dd3634e4c96..a53b130b366 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -39,66 +39,55 @@ static DEFINE_MUTEX(read_mutex);
#define CRAMINO(x) (((x)->offset && (x)->size)?(x)->offset<<2:1)
#define OFFSET(x) ((x)->i_ino)
-
-static int cramfs_iget5_test(struct inode *inode, void *opaque)
-{
- struct cramfs_inode *cramfs_inode = opaque;
- return inode->i_ino == CRAMINO(cramfs_inode) && inode->i_ino != 1;
-}
-
-static int cramfs_iget5_set(struct inode *inode, void *opaque)
+static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode)
{
- struct cramfs_inode *cramfs_inode = opaque;
- inode->i_ino = CRAMINO(cramfs_inode);
- return 0;
+ static struct timespec zerotime;
+ inode->i_mode = cramfs_inode->mode;
+ inode->i_uid = cramfs_inode->uid;
+ inode->i_size = cramfs_inode->size;
+ inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+ inode->i_gid = cramfs_inode->gid;
+ /* Struct copy intentional */
+ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
+ /* inode->i_nlink is left 1 - arguably wrong for directories,
+ but it's the best we can do without reading the directory
+ contents. 1 yields the right result in GNU find, even
+ without -noleaf option. */
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_fop = &generic_ro_fops;
+ inode->i_data.a_ops = &cramfs_aops;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = &cramfs_dir_inode_operations;
+ inode->i_fop = &cramfs_directory_operations;
+ } else if (S_ISLNK(inode->i_mode)) {
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_data.a_ops = &cramfs_aops;
+ } else {
+ init_special_inode(inode, inode->i_mode,
+ old_decode_dev(cramfs_inode->size));
+ }
}
static struct inode *get_cramfs_inode(struct super_block *sb,
struct cramfs_inode * cramfs_inode)
{
- struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode),
- cramfs_iget5_test, cramfs_iget5_set,
- cramfs_inode);
- static struct timespec zerotime;
-
- if (inode && (inode->i_state & I_NEW)) {
- inode->i_mode = cramfs_inode->mode;
- inode->i_uid = cramfs_inode->uid;
- inode->i_size = cramfs_inode->size;
- inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
- inode->i_gid = cramfs_inode->gid;
- /* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
- /* inode->i_nlink is left 1 - arguably wrong for directories,
- but it's the best we can do without reading the directory
- contents. 1 yields the right result in GNU find, even
- without -noleaf option. */
- if (S_ISREG(inode->i_mode)) {
- inode->i_fop = &generic_ro_fops;
- inode->i_data.a_ops = &cramfs_aops;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_op = &cramfs_dir_inode_operations;
- inode->i_fop = &cramfs_directory_operations;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &page_symlink_inode_operations;
- inode->i_data.a_ops = &cramfs_aops;
- } else {
- init_special_inode(inode, inode->i_mode,
- old_decode_dev(cramfs_inode->size));
+ struct inode *inode;
+ if (CRAMINO(cramfs_inode) == 1) {
+ inode = new_inode(sb);
+ if (inode) {
+ inode->i_ino = 1;
+ setup_inode(inode, cramfs_inode);
+ }
+ } else {
+ inode = iget_locked(sb, CRAMINO(cramfs_inode));
+ if (inode) {
+ setup_inode(inode, cramfs_inode);
+ unlock_new_inode(inode);
}
- unlock_new_inode(inode);
}
return inode;
}
-static void cramfs_drop_inode(struct inode *inode)
-{
- if (inode->i_ino == 1)
- generic_delete_inode(inode);
- else
- generic_drop_inode(inode);
-}
-
/*
* We have our own block cache: don't fill up the buffer cache
* with the rom-image, because the way the filesystem is set
@@ -542,7 +531,6 @@ static const struct super_operations cramfs_ops = {
.put_super = cramfs_put_super,
.remount_fs = cramfs_remount,
.statfs = cramfs_statfs,
- .drop_inode = cramfs_drop_inode,
};
static int cramfs_get_sb(struct file_system_type *fs_type,
diff --git a/fs/dcache.c b/fs/dcache.c
index d96047b4a63..4d13bf50b7b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -536,7 +536,7 @@ restart:
*/
static void prune_dcache(int count)
{
- struct super_block *sb, *n;
+ struct super_block *sb, *p = NULL;
int w_count;
int unused = dentry_stat.nr_unused;
int prune_ratio;
@@ -550,7 +550,7 @@ static void prune_dcache(int count)
else
prune_ratio = unused / count;
spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+ list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_nr_dentry_unused == 0)
@@ -590,12 +590,16 @@ static void prune_dcache(int count)
up_read(&sb->s_umount);
}
spin_lock(&sb_lock);
+ if (p)
+ __put_super(p);
count -= pruned;
- __put_super(sb);
+ p = sb;
/* more work left to do? */
if (count <= 0)
break;
}
+ if (p)
+ __put_super(p);
spin_unlock(&sb_lock);
spin_unlock(&dcache_lock);
}
@@ -894,7 +898,7 @@ EXPORT_SYMBOL(shrink_dcache_parent);
*
* In this case we return -1 to tell the caller that we baled.
*/
-static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
if (nr) {
if (!(gfp_mask & __GFP_FS))
@@ -1901,48 +1905,30 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
}
/**
- * __d_path - return the path of a dentry
+ * Prepend path string to a buffer
+ *
* @path: the dentry/vfsmount to report
* @root: root vfsmnt/dentry (may be modified by this function)
- * @buffer: buffer to return value in
- * @buflen: buffer length
- *
- * Convert a dentry into an ASCII path name. If the entry has been deleted
- * the string " (deleted)" is appended. Note that this is ambiguous.
- *
- * Returns a pointer into the buffer or an error code if the
- * path was too long.
+ * @buffer: pointer to the end of the buffer
+ * @buflen: pointer to buffer length
*
- * "buflen" should be positive. Caller holds the dcache_lock.
+ * Caller holds the dcache_lock.
*
* If path is not reachable from the supplied root, then the value of
* root is changed (without modifying refcounts).
*/
-char *__d_path(const struct path *path, struct path *root,
- char *buffer, int buflen)
+static int prepend_path(const struct path *path, struct path *root,
+ char **buffer, int *buflen)
{
struct dentry *dentry = path->dentry;
struct vfsmount *vfsmnt = path->mnt;
- char *end = buffer + buflen;
- char *retval;
+ bool slash = false;
+ int error = 0;
spin_lock(&vfsmount_lock);
- prepend(&end, &buflen, "\0", 1);
- if (d_unlinked(dentry) &&
- (prepend(&end, &buflen, " (deleted)", 10) != 0))
- goto Elong;
-
- if (buflen < 1)
- goto Elong;
- /* Get '/' right */
- retval = end-1;
- *retval = '/';
-
- for (;;) {
+ while (dentry != root->dentry || vfsmnt != root->mnt) {
struct dentry * parent;
- if (dentry == root->dentry && vfsmnt == root->mnt)
- break;
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
/* Global root? */
if (vfsmnt->mnt_parent == vfsmnt) {
@@ -1954,28 +1940,88 @@ char *__d_path(const struct path *path, struct path *root,
}
parent = dentry->d_parent;
prefetch(parent);
- if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
- (prepend(&end, &buflen, "/", 1) != 0))
- goto Elong;
- retval = end;
+ error = prepend_name(buffer, buflen, &dentry->d_name);
+ if (!error)
+ error = prepend(buffer, buflen, "/", 1);
+ if (error)
+ break;
+
+ slash = true;
dentry = parent;
}
out:
+ if (!error && !slash)
+ error = prepend(buffer, buflen, "/", 1);
+
spin_unlock(&vfsmount_lock);
- return retval;
+ return error;
global_root:
- retval += 1; /* hit the slash */
- if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
- goto Elong;
+ /*
+ * Filesystems needing to implement special "root names"
+ * should do so with ->d_dname()
+ */
+ if (IS_ROOT(dentry) &&
+ (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
+ WARN(1, "Root dentry has weird name <%.*s>\n",
+ (int) dentry->d_name.len, dentry->d_name.name);
+ }
root->mnt = vfsmnt;
root->dentry = dentry;
goto out;
+}
-Elong:
- retval = ERR_PTR(-ENAMETOOLONG);
- goto out;
+/**
+ * __d_path - return the path of a dentry
+ * @path: the dentry/vfsmount to report
+ * @root: root vfsmnt/dentry (may be modified by this function)
+ * @buf: buffer to return value in
+ * @buflen: buffer length
+ *
+ * Convert a dentry into an ASCII path name.
+ *
+ * Returns a pointer into the buffer or an error code if the
+ * path was too long.
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ *
+ * If path is not reachable from the supplied root, then the value of
+ * root is changed (without modifying refcounts).
+ */
+char *__d_path(const struct path *path, struct path *root,
+ char *buf, int buflen)
+{
+ char *res = buf + buflen;
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
+ error = prepend_path(path, root, &res, &buflen);
+ if (error)
+ return ERR_PTR(error);
+
+ return res;
+}
+
+/*
+ * same as __d_path but appends "(deleted)" for unlinked files.
+ */
+static int path_with_deleted(const struct path *path, struct path *root,
+ char **buf, int *buflen)
+{
+ prepend(buf, buflen, "\0", 1);
+ if (d_unlinked(path->dentry)) {
+ int error = prepend(buf, buflen, " (deleted)", 10);
+ if (error)
+ return error;
+ }
+
+ return prepend_path(path, root, buf, buflen);
+}
+
+static int prepend_unreachable(char **buffer, int *buflen)
+{
+ return prepend(buffer, buflen, "(unreachable)", 13);
}
/**
@@ -1996,9 +2042,10 @@ Elong:
*/
char *d_path(const struct path *path, char *buf, int buflen)
{
- char *res;
+ char *res = buf + buflen;
struct path root;
struct path tmp;
+ int error;
/*
* We have various synthetic filesystems that never get mounted. On
@@ -2010,19 +2057,51 @@ char *d_path(const struct path *path, char *buf, int buflen)
if (path->dentry->d_op && path->dentry->d_op->d_dname)
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
- read_lock(&current->fs->lock);
- root = current->fs->root;
- path_get(&root);
- read_unlock(&current->fs->lock);
+ get_fs_root(current->fs, &root);
spin_lock(&dcache_lock);
tmp = root;
- res = __d_path(path, &tmp, buf, buflen);
+ error = path_with_deleted(path, &tmp, &res, &buflen);
+ if (error)
+ res = ERR_PTR(error);
spin_unlock(&dcache_lock);
path_put(&root);
return res;
}
EXPORT_SYMBOL(d_path);
+/**
+ * d_path_with_unreachable - return the path of a dentry
+ * @path: path to report
+ * @buf: buffer to return value in
+ * @buflen: buffer length
+ *
+ * The difference from d_path() is that this prepends "(unreachable)"
+ * to paths which are unreachable from the current process' root.
+ */
+char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
+{
+ char *res = buf + buflen;
+ struct path root;
+ struct path tmp;
+ int error;
+
+ if (path->dentry->d_op && path->dentry->d_op->d_dname)
+ return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
+
+ get_fs_root(current->fs, &root);
+ spin_lock(&dcache_lock);
+ tmp = root;
+ error = path_with_deleted(path, &tmp, &res, &buflen);
+ if (!error && !path_equal(&tmp, &root))
+ error = prepend_unreachable(&res, &buflen);
+ spin_unlock(&dcache_lock);
+ path_put(&root);
+ if (error)
+ res = ERR_PTR(error);
+
+ return res;
+}
+
/*
* Helper function for dentry_operations.d_dname() members
*/
@@ -2047,16 +2126,12 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
-char *dentry_path(struct dentry *dentry, char *buf, int buflen)
+char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
{
char *end = buf + buflen;
char *retval;
- spin_lock(&dcache_lock);
prepend(&end, &buflen, "\0", 1);
- if (d_unlinked(dentry) &&
- (prepend(&end, &buflen, "//deleted", 9) != 0))
- goto Elong;
if (buflen < 1)
goto Elong;
/* Get '/' right */
@@ -2074,7 +2149,28 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen)
retval = end;
dentry = parent;
}
+ return retval;
+Elong:
+ return ERR_PTR(-ENAMETOOLONG);
+}
+EXPORT_SYMBOL(__dentry_path);
+
+char *dentry_path(struct dentry *dentry, char *buf, int buflen)
+{
+ char *p = NULL;
+ char *retval;
+
+ spin_lock(&dcache_lock);
+ if (d_unlinked(dentry)) {
+ p = buf + buflen;
+ if (prepend(&p, &buflen, "//deleted", 10) != 0)
+ goto Elong;
+ buflen++;
+ }
+ retval = __dentry_path(dentry, buf, buflen);
spin_unlock(&dcache_lock);
+ if (!IS_ERR(retval) && p)
+ *p = '/'; /* restore '/' overriden with '\0' */
return retval;
Elong:
spin_unlock(&dcache_lock);
@@ -2108,27 +2204,30 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
if (!page)
return -ENOMEM;
- read_lock(&current->fs->lock);
- pwd = current->fs->pwd;
- path_get(&pwd);
- root = current->fs->root;
- path_get(&root);
- read_unlock(&current->fs->lock);
+ get_fs_root_and_pwd(current->fs, &root, &pwd);
error = -ENOENT;
spin_lock(&dcache_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
struct path tmp = root;
- char * cwd;
+ char *cwd = page + PAGE_SIZE;
+ int buflen = PAGE_SIZE;
- cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
+ prepend(&cwd, &buflen, "\0", 1);
+ error = prepend_path(&pwd, &tmp, &cwd, &buflen);
spin_unlock(&dcache_lock);
- error = PTR_ERR(cwd);
- if (IS_ERR(cwd))
+ if (error)
goto out;
+ /* Unreachable from current root */
+ if (!path_equal(&tmp, &root)) {
+ error = prepend_unreachable(&cwd, &buflen);
+ if (error)
+ goto out;
+ }
+
error = -ERANGE;
len = PAGE_SIZE + page - cwd;
if (len <= size) {
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 7600aacf531..51f270b479b 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
-static int dio_complete(struct dio *dio, loff_t offset, int ret)
+static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
{
ssize_t transferred = 0;
@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
transferred = dio->i_size - offset;
}
- if (dio->end_io && dio->result)
- dio->end_io(dio->iocb, offset, transferred,
- dio->map_bh.b_private);
-
- if (dio->flags & DIO_LOCKING)
- /* lockdep: non-owner release */
- up_read_non_owner(&dio->inode->i_alloc_sem);
-
if (ret == 0)
ret = dio->page_errors;
if (ret == 0)
@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
if (ret == 0)
ret = transferred;
+ if (dio->end_io && dio->result) {
+ dio->end_io(dio->iocb, offset, transferred,
+ dio->map_bh.b_private, ret, is_async);
+ } else if (is_async) {
+ aio_complete(dio->iocb, ret, 0);
+ }
+
+ if (dio->flags & DIO_LOCKING)
+ /* lockdep: non-owner release */
+ up_read_non_owner(&dio->inode->i_alloc_sem);
+
return ret;
}
@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (remaining == 0) {
- int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
- aio_complete(dio->iocb, ret, 0);
+ dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio);
}
}
@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (ret2 == 0) {
- ret = dio_complete(dio, offset, ret);
+ ret = dio_complete(dio, offset, ret, false);
kfree(dio);
} else
BUG_ON(ret != -EIOCBQUEUED);
@@ -1134,8 +1136,27 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
return ret;
}
+/*
+ * This is a library function for use by filesystem drivers.
+ *
+ * The locking rules are governed by the flags parameter:
+ * - if the flags value contains DIO_LOCKING we use a fancy locking
+ * scheme for dumb filesystems.
+ * For writes this function is called under i_mutex and returns with
+ * i_mutex held, for reads, i_mutex is not held on entry, but it is
+ * taken and dropped again before returning.
+ * For reads and writes i_alloc_sem is taken in shared mode and released
+ * on I/O completion (which may happen asynchronously after returning to
+ * the caller).
+ *
+ * - if the flags value does NOT contain DIO_LOCKING we don't use any
+ * internal locking but rather rely on the filesystem to synchronize
+ * direct I/O reads/writes versus each other and truncate.
+ * For reads and writes both i_mutex and i_alloc_sem are not held on
+ * entry and are never taken.
+ */
ssize_t
-__blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
+__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
@@ -1231,57 +1252,4 @@ __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
out:
return retval;
}
-EXPORT_SYMBOL(__blockdev_direct_IO_newtrunc);
-
-/*
- * This is a library function for use by filesystem drivers.
- *
- * The locking rules are governed by the flags parameter:
- * - if the flags value contains DIO_LOCKING we use a fancy locking
- * scheme for dumb filesystems.
- * For writes this function is called under i_mutex and returns with
- * i_mutex held, for reads, i_mutex is not held on entry, but it is
- * taken and dropped again before returning.
- * For reads and writes i_alloc_sem is taken in shared mode and released
- * on I/O completion (which may happen asynchronously after returning to
- * the caller).
- *
- * - if the flags value does NOT contain DIO_LOCKING we don't use any
- * internal locking but rather rely on the filesystem to synchronize
- * direct I/O reads/writes versus each other and truncate.
- * For reads and writes both i_mutex and i_alloc_sem are not held on
- * entry and are never taken.
- */
-ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, const struct iovec *iov, loff_t offset,
- unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
-{
- ssize_t retval;
-
- retval = __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov,
- offset, nr_segs, get_block, end_io, submit_io, flags);
- /*
- * In case of error extending write may have instantiated a few
- * blocks outside i_size. Trim these off again for DIO_LOCKING.
- * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this in
- * their own manner. This is a further example of where the old
- * truncate sequence is inadequate.
- *
- * NOTE: filesystems with their own locking have to handle this
- * on their own.
- */
- if (flags & DIO_LOCKING) {
- if (unlikely((rw & WRITE) && retval < 0)) {
- loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iov, nr_segs);
-
- if (end > isize)
- vmtruncate(inode, isize);
- }
- }
-
- return retval;
-}
EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index c0d35c62052..37a34c2c622 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -248,7 +248,7 @@ static struct connection *assoc2con(int assoc_id)
for (i = 0 ; i < CONN_HASH_SIZE; i++) {
hlist_for_each_entry(con, h, &connection_hash[i], list) {
- if (con && con->sctp_assoc == assoc_id) {
+ if (con->sctp_assoc == assoc_id) {
mutex_unlock(&connections_lock);
return con;
}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 2c6ad518100..ef17e0169da 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -81,24 +81,11 @@ static struct genl_ops dlm_nl_ops = {
int __init dlm_netlink_init(void)
{
- int rv;
-
- rv = genl_register_family(&family);
- if (rv)
- return rv;
-
- rv = genl_register_ops(&family, &dlm_nl_ops);
- if (rv < 0)
- goto err;
- return 0;
- err:
- genl_unregister_family(&family);
- return rv;
+ return genl_register_family_with_ops(&family, &dlm_nl_ops, 1);
}
void dlm_netlink_exit(void)
{
- genl_unregister_ops(&family, &dlm_nl_ops);
genl_unregister_family(&family);
}
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 83c4f600786..2195c213ab2 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
if (inode->i_mapping->nrpages == 0)
continue;
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 1cc087635a5..a2e3b562e65 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -762,7 +762,7 @@ ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
/**
* ecryptfs_init_crypt_ctx
- * @crypt_stat: Uninitilized crypt stats structure
+ * @crypt_stat: Uninitialized crypt stats structure
*
* Initialize the crypto context.
*
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index e8fcf4e2ed7..622c9514080 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -199,7 +199,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
"the persistent file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
- goto out;
+ goto out_free;
}
}
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
@@ -207,7 +207,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
rc = -EPERM;
printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
"file must hence be opened RO\n", __func__);
- goto out;
+ goto out_free;
}
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -292,12 +292,40 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag)
return rc;
}
-static int ecryptfs_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long
+ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct file *lower_file = NULL;
+ long rc = -ENOTTY;
+
+ if (ecryptfs_file_to_private(file))
+ lower_file = ecryptfs_file_to_lower(file);
+ if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl)
+ rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct file *lower_file = NULL;
+ long rc = -ENOIOCTLCMD;
+
+ if (ecryptfs_file_to_private(file))
+ lower_file = ecryptfs_file_to_lower(file);
+ if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl)
+ rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+ return rc;
+}
+#endif
const struct file_operations ecryptfs_dir_fops = {
.readdir = ecryptfs_readdir,
- .ioctl = ecryptfs_ioctl,
+ .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+#endif
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
@@ -313,7 +341,10 @@ const struct file_operations ecryptfs_main_fops = {
.write = do_sync_write,
.aio_write = generic_file_aio_write,
.readdir = ecryptfs_readdir,
- .ioctl = ecryptfs_ioctl,
+ .unlocked_ioctl = ecryptfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ecryptfs_compat_ioctl,
+#endif
.mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
@@ -322,20 +353,3 @@ const struct file_operations ecryptfs_main_fops = {
.fasync = ecryptfs_fasync,
.splice_read = generic_file_splice_read,
};
-
-static int
-ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int rc = 0;
- struct file *lower_file = NULL;
-
- if (ecryptfs_file_to_private(file))
- lower_file = ecryptfs_file_to_lower(file);
- if (lower_file && lower_file->f_op && lower_file->f_op->ioctl)
- rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode),
- lower_file, cmd, arg);
- else
- rc = -ENOTTY;
- return rc;
-}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 31ef5252f0f..6c55113e722 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -264,7 +264,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to allocate ecryptfs_dentry_info struct\n",
__func__);
- goto out_dput;
+ goto out_put;
}
ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry);
ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt);
@@ -339,14 +339,85 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
out_free_kmem:
kmem_cache_free(ecryptfs_header_cache_2, page_virt);
goto out;
-out_dput:
+out_put:
dput(lower_dentry);
+ mntput(lower_mnt);
d_drop(ecryptfs_dentry);
out:
return rc;
}
/**
+ * ecryptfs_new_lower_dentry
+ * @ename: The name of the new dentry.
+ * @lower_dir_dentry: Parent directory of the new dentry.
+ * @nd: nameidata from last lookup.
+ *
+ * Create a new dentry or get it from lower parent dir.
+ */
+static struct dentry *
+ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
+ struct nameidata *nd)
+{
+ struct dentry *new_dentry;
+ struct dentry *tmp;
+ struct inode *lower_dir_inode;
+
+ lower_dir_inode = lower_dir_dentry->d_inode;
+
+ tmp = d_alloc(lower_dir_dentry, name);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&lower_dir_inode->i_mutex);
+ new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
+ mutex_unlock(&lower_dir_inode->i_mutex);
+
+ if (!new_dentry)
+ new_dentry = tmp;
+ else
+ dput(tmp);
+
+ return new_dentry;
+}
+
+
+/**
+ * ecryptfs_lookup_one_lower
+ * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
+ * @lower_dir_dentry: lower parent directory
+ *
+ * Get the lower dentry from vfs. If lower dentry does not exist yet,
+ * create it.
+ */
+static struct dentry *
+ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
+ struct dentry *lower_dir_dentry)
+{
+ struct nameidata nd;
+ struct vfsmount *lower_mnt;
+ struct qstr *name;
+ int err;
+
+ name = &ecryptfs_dentry->d_name;
+ lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
+ ecryptfs_dentry->d_parent));
+ err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
+ mntput(lower_mnt);
+
+ if (!err) {
+ /* we dont need the mount */
+ mntput(nd.path.mnt);
+ return nd.path.dentry;
+ }
+ if (err != -ENOENT)
+ return ERR_PTR(err);
+
+ /* create a new lower dentry */
+ return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
+}
+
+/**
* ecryptfs_lookup
* @ecryptfs_dir_inode: The eCryptfs directory inode
* @ecryptfs_dentry: The eCryptfs dentry that we are looking up
@@ -373,14 +444,12 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
goto out_d_drop;
}
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
- mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
- lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
- lower_dir_dentry,
- ecryptfs_dentry->d_name.len);
- mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
+
+ lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
+ lower_dir_dentry);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
@@ -402,14 +471,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
"filename; rc = [%d]\n", __func__, rc);
goto out_d_drop;
}
- mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
- lower_dentry = lookup_one_len(encrypted_and_encoded_name,
- lower_dir_dentry,
- encrypted_and_encoded_name_size - 1);
- mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
+ lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
+ lower_dir_dentry);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
- ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
+ ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
@@ -804,10 +870,20 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
size_t num_zeros = (PAGE_CACHE_SIZE
- (ia->ia_size & ~PAGE_CACHE_MASK));
+
+ /*
+ * XXX(truncate) this should really happen at the begginning
+ * of ->setattr. But the code is too messy to that as part
+ * of a larger patch. ecryptfs is also totally missing out
+ * on the inode_change_ok check at the beginning of
+ * ->setattr while would include this.
+ */
+ rc = inode_newsize_ok(inode, ia->ia_size);
+ if (rc)
+ goto out;
+
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = simple_setsize(inode, ia->ia_size);
- if (rc)
- goto out;
+ truncate_setsize(inode, ia->ia_size);
lower_ia->ia_size = ia->ia_size;
lower_ia->ia_valid |= ATTR_SIZE;
goto out;
@@ -830,7 +906,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
goto out;
}
}
- simple_setsize(inode, ia->ia_size);
+ truncate_setsize(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index 2d8dbce9d48..bcb68c0cb1f 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -31,9 +31,9 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
static struct hlist_head *ecryptfs_daemon_hash;
struct mutex ecryptfs_daemon_hash_mux;
-static int ecryptfs_hash_buckets;
+static int ecryptfs_hash_bits;
#define ecryptfs_uid_hash(uid) \
- hash_long((unsigned long)uid, ecryptfs_hash_buckets)
+ hash_long((unsigned long)uid, ecryptfs_hash_bits)
static u32 ecryptfs_msg_counter;
static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -274,7 +274,7 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
struct user_namespace *user_ns, struct pid *pid,
u32 seq)
{
- struct ecryptfs_daemon *daemon;
+ struct ecryptfs_daemon *uninitialized_var(daemon);
struct ecryptfs_msg_ctx *msg_ctx;
size_t msg_size;
struct nsproxy *nsproxy;
@@ -486,18 +486,19 @@ int ecryptfs_init_messaging(void)
}
mutex_init(&ecryptfs_daemon_hash_mux);
mutex_lock(&ecryptfs_daemon_hash_mux);
- ecryptfs_hash_buckets = 1;
- while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
- ecryptfs_hash_buckets++;
+ ecryptfs_hash_bits = 1;
+ while (ecryptfs_number_of_users >> ecryptfs_hash_bits)
+ ecryptfs_hash_bits++;
ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
- * ecryptfs_hash_buckets), GFP_KERNEL);
+ * (1 << ecryptfs_hash_bits)),
+ GFP_KERNEL);
if (!ecryptfs_daemon_hash) {
rc = -ENOMEM;
printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
mutex_unlock(&ecryptfs_daemon_hash_mux);
goto out;
}
- for (i = 0; i < ecryptfs_hash_buckets; i++)
+ for (i = 0; i < (1 << ecryptfs_hash_bits); i++)
INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
mutex_unlock(&ecryptfs_daemon_hash_mux);
ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
@@ -554,7 +555,7 @@ void ecryptfs_release_messaging(void)
int i;
mutex_lock(&ecryptfs_daemon_hash_mux);
- for (i = 0; i < ecryptfs_hash_buckets; i++) {
+ for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
int rc;
hlist_for_each_entry(daemon, elem,
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 0435886e4a9..f7fc286a3aa 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -118,11 +118,15 @@ void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode)
*/
static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
- return vfs_statfs(ecryptfs_dentry_to_lower(dentry), buf);
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+
+ if (!lower_dentry->d_sb->s_op->statfs)
+ return -ENOSYS;
+ return lower_dentry->d_sb->s_op->statfs(lower_dentry, buf);
}
/**
- * ecryptfs_clear_inode
+ * ecryptfs_evict_inode
* @inode - The ecryptfs inode
*
* Called by iput() when the inode reference count reached zero
@@ -131,8 +135,10 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* on the inode free list. We use this to drop out reference to the
* lower inode.
*/
-static void ecryptfs_clear_inode(struct inode *inode)
+static void ecryptfs_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
iput(ecryptfs_inode_to_lower(inode));
}
@@ -184,6 +190,6 @@ const struct super_operations ecryptfs_sops = {
.drop_inode = generic_delete_inode,
.statfs = ecryptfs_statfs,
.remount_fs = NULL,
- .clear_inode = ecryptfs_clear_inode,
+ .evict_inode = ecryptfs_evict_inode,
.show_options = ecryptfs_show_options
};
diff --git a/fs/exec.c b/fs/exec.c
index e19de6a8033..7761837e450 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
-#include <linux/smp_lock.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -129,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
error = -ENOEXEC;
if(file->f_op) {
@@ -653,6 +652,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
else
stack_base = vma->vm_start - stack_expand;
#endif
+ current->mm->start_stack = bprm->p;
ret = expand_stack(vma, stack_base);
if (ret)
ret = -EFAULT;
@@ -683,7 +683,7 @@ struct file *open_exec(const char *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
err = deny_write_access(file);
if (err)
@@ -1891,13 +1891,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
*/
clear_thread_flag(TIF_SIGPENDING);
- /*
- * lock_kernel() because format_corename() is controlled by sysctl, which
- * uses lock_kernel()
- */
- lock_kernel();
ispipe = format_corename(corename, signr);
- unlock_kernel();
if (ispipe) {
int dump_count;
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index 22721b2fd89..2dc925fa101 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -256,7 +256,6 @@ static inline int exofs_oi_read(struct exofs_i_info *oi,
}
/* inode.c */
-void exofs_truncate(struct inode *inode);
int exofs_setattr(struct dentry *, struct iattr *);
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -264,7 +263,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
extern struct inode *exofs_iget(struct super_block *, unsigned long);
struct inode *exofs_new_inode(struct inode *, int);
extern int exofs_write_inode(struct inode *, struct writeback_control *wbc);
-extern void exofs_delete_inode(struct inode *);
+extern void exofs_evict_inode(struct inode *);
/* dir.c: */
int exofs_add_link(struct dentry *, struct inode *);
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index fef6899be39..68cb23e3bb9 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -30,9 +30,6 @@
* along with exofs; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-
-#include <linux/buffer_head.h>
-
#include "exofs.h"
static int exofs_release_file(struct inode *inode, struct file *filp)
@@ -40,19 +37,27 @@ static int exofs_release_file(struct inode *inode, struct file *filp)
return 0;
}
+/* exofs_file_fsync - flush the inode to disk
+ *
+ * Note, in exofs all metadata is written as part of inode, regardless.
+ * The writeout is synchronous
+ */
static int exofs_file_fsync(struct file *filp, int datasync)
{
int ret;
- struct address_space *mapping = filp->f_mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = filp->f_mapping->host;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0, /* metadata-only; caller takes care of data */
+ };
struct super_block *sb;
- ret = filemap_write_and_wait(mapping);
- if (ret)
- return ret;
+ if (!(inode->i_state & I_DIRTY))
+ return 0;
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ return 0;
- /* sync the inode attributes */
- ret = write_inode_now(inode, 1);
+ ret = sync_inode(inode, &wbc);
/* This is a good place to write the sb */
/* TODO: Sechedule an sb-sync on create */
@@ -65,9 +70,9 @@ static int exofs_file_fsync(struct file *filp, int datasync)
static int exofs_flush(struct file *file, fl_owner_t id)
{
- exofs_file_fsync(file, 1);
+ int ret = vfs_fsync(file, 0);
/* TODO: Flush the OSD target */
- return 0;
+ return ret;
}
const struct file_operations exofs_file_operations = {
@@ -86,6 +91,5 @@ const struct file_operations exofs_file_operations = {
};
const struct inode_operations exofs_file_inode_operations = {
- .truncate = exofs_truncate,
.setattr = exofs_setattr,
};
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 4bb6ef822e4..eb7368ebd8c 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -32,9 +32,6 @@
*/
#include <linux/slab.h>
-#include <linux/writeback.h>
-#include <linux/buffer_head.h>
-#include <scsi/scsi_device.h>
#include "exofs.h"
@@ -697,6 +694,13 @@ static int exofs_writepage(struct page *page, struct writeback_control *wbc)
return write_exec(&pcol);
}
+/* i_mutex held using inode->i_size directly */
+static void _write_failed(struct inode *inode, loff_t to)
+{
+ if (to > inode->i_size)
+ truncate_pagecache(inode, to, inode->i_size);
+}
+
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -710,7 +714,7 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
fsdata);
if (ret) {
EXOFS_DBGMSG("simple_write_begin faild\n");
- return ret;
+ goto out;
}
page = *pagep;
@@ -725,6 +729,9 @@ int exofs_write_begin(struct file *file, struct address_space *mapping,
EXOFS_DBGMSG("__readpage_filler faild\n");
}
}
+out:
+ if (unlikely(ret))
+ _write_failed(mapping->host, pos + len);
return ret;
}
@@ -750,6 +757,10 @@ static int exofs_write_end(struct file *file, struct address_space *mapping,
int ret;
ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
+ if (unlikely(ret))
+ _write_failed(inode, pos + len);
+
+ /* TODO: once simple_write_end marks inode dirty remove */
if (i_size != inode->i_size)
mark_inode_dirty(inode);
return ret;
@@ -759,15 +770,13 @@ static int exofs_releasepage(struct page *page, gfp_t gfp)
{
EXOFS_DBGMSG("page 0x%lx\n", page->index);
WARN_ON(1);
- return try_to_free_buffers(page);
+ return 0;
}
static void exofs_invalidatepage(struct page *page, unsigned long offset)
{
- EXOFS_DBGMSG("page_has_buffers=>%d\n", page_has_buffers(page));
+ EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
WARN_ON(1);
-
- block_invalidatepage(page, offset);
}
const struct address_space_operations exofs_aops = {
@@ -808,87 +817,55 @@ static inline int exofs_inode_is_fast_symlink(struct inode *inode)
return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
}
-/*
- * get_block_t - Fill in a buffer_head
- * An OSD takes care of block allocation so we just fake an allocation by
- * putting in the inode's sector_t in the buffer_head.
- * TODO: What about the case of create==0 and @iblock does not exist in the
- * object?
- */
-static int exofs_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- map_bh(bh_result, inode->i_sb, iblock);
- return 0;
-}
-
const struct osd_attr g_attr_logical_length = ATTR_DEF(
OSD_APAGE_OBJECT_INFORMATION, OSD_ATTR_OI_LOGICAL_LENGTH, 8);
-static int _do_truncate(struct inode *inode)
+static int _do_truncate(struct inode *inode, loff_t newsize)
{
struct exofs_i_info *oi = exofs_i(inode);
- loff_t isize = i_size_read(inode);
int ret;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- nobh_truncate_page(inode->i_mapping, isize, exofs_get_block);
+ ret = exofs_oi_truncate(oi, (u64)newsize);
+ if (likely(!ret))
+ truncate_setsize(inode, newsize);
- ret = exofs_oi_truncate(oi, (u64)isize);
- EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode->i_ino, isize);
+ EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
+ inode->i_ino, newsize, ret);
return ret;
}
/*
- * Truncate a file to the specified size - all we have to do is set the size
- * attribute. We make sure the object exists first.
- */
-void exofs_truncate(struct inode *inode)
-{
- struct exofs_i_info *oi = exofs_i(inode);
- int ret;
-
- if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
- || S_ISLNK(inode->i_mode)))
- return;
- if (exofs_inode_is_fast_symlink(inode))
- return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return;
-
- /* if we are about to truncate an object, and it hasn't been
- * created yet, wait
- */
- if (unlikely(wait_obj_created(oi)))
- goto fail;
-
- ret = _do_truncate(inode);
- if (ret)
- goto fail;
-
-out:
- mark_inode_dirty(inode);
- return;
-fail:
- make_bad_inode(inode);
- goto out;
-}
-
-/*
- * Set inode attributes - just call generic functions.
+ * Set inode attributes - update size attribute on OSD if needed,
+ * otherwise just call generic functions.
*/
int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int error;
+ /* if we are about to modify an object, and it hasn't been
+ * created yet, wait
+ */
+ error = wait_obj_created(exofs_i(inode));
+ if (unlikely(error))
+ return error;
+
error = inode_change_ok(inode, iattr);
- if (error)
+ if (unlikely(error))
return error;
- error = inode_setattr(inode, iattr);
- return error;
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(inode)) {
+ error = _do_truncate(inode, iattr->ia_size);
+ if (unlikely(error))
+ return error;
+ }
+
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
+ return 0;
}
static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
@@ -1325,7 +1302,7 @@ static void delete_done(struct exofs_io_state *ios, void *p)
* from the OSD here. We make sure the object was created before we try and
* delete it.
*/
-void exofs_delete_inode(struct inode *inode)
+void exofs_evict_inode(struct inode *inode)
{
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
@@ -1335,30 +1312,27 @@ void exofs_delete_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
- if (is_bad_inode(inode))
+ /* TODO: should do better here */
+ if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
- mark_inode_dirty(inode);
- exofs_update_inode(inode, inode_needs_sync(inode));
-
inode->i_size = 0;
- if (inode->i_blocks)
- exofs_truncate(inode);
+ end_writeback(inode);
- clear_inode(inode);
+ /* if we are deleting an obj that hasn't been created yet, wait */
+ if (!obj_created(oi)) {
+ BUG_ON(!obj_2bcreated(oi));
+ wait_event(oi->i_wq, obj_created(oi));
+ /* ignore the error attempt a remove anyway */
+ }
+ /* Now Remove the OSD objects */
ret = exofs_get_io_state(&sbi->layout, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__);
return;
}
- /* if we are deleting an obj that hasn't been created yet, wait */
- if (!obj_created(oi)) {
- BUG_ON(!obj_2bcreated(oi));
- wait_event(oi->i_wq, obj_created(oi));
- }
-
ios->obj.id = exofs_oi_objno(oi);
ios->done = delete_done;
ios->private = sbi;
@@ -1374,5 +1348,5 @@ void exofs_delete_inode(struct inode *inode)
return;
no_delete:
- clear_inode(inode);
+ end_writeback(inode);
}
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index 4337cad7777..6550bf70e41 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -305,8 +305,6 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid)
struct _striping_info {
u64 obj_offset;
u64 group_length;
- u64 total_group_length;
- u64 Major;
unsigned dev;
unsigned unit_off;
};
@@ -343,8 +341,6 @@ static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset,
(M * group_depth * stripe_unit);
si->group_length = T - H;
- si->total_group_length = T;
- si->Major = M;
}
static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
@@ -392,20 +388,19 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
}
static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
- struct _striping_info *si, unsigned first_comp)
+ struct _striping_info *si)
{
unsigned stripe_unit = ios->layout->stripe_unit;
unsigned mirrors_p1 = ios->layout->mirrors_p1;
unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
unsigned dev = si->dev;
unsigned first_dev = dev - (dev % devs_in_group);
- unsigned comp = first_comp + (dev - first_dev);
unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
unsigned cur_pg = ios->pages_consumed;
int ret = 0;
while (length) {
- struct exofs_per_dev_state *per_dev = &ios->per_dev[comp];
+ struct exofs_per_dev_state *per_dev = &ios->per_dev[dev];
unsigned cur_len, page_off = 0;
if (!per_dev->length) {
@@ -424,11 +419,8 @@ static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
cur_len = stripe_unit;
}
- if (max_comp < comp)
- max_comp = comp;
-
- dev += mirrors_p1;
- dev = (dev % devs_in_group) + first_dev;
+ if (max_comp < dev)
+ max_comp = dev;
} else {
cur_len = stripe_unit;
}
@@ -440,8 +432,8 @@ static int _prepare_one_group(struct exofs_io_state *ios, u64 length,
if (unlikely(ret))
goto out;
- comp += mirrors_p1;
- comp = (comp % devs_in_group) + first_comp;
+ dev += mirrors_p1;
+ dev = (dev % devs_in_group) + first_dev;
length -= cur_len;
}
@@ -454,18 +446,15 @@ out:
static int _prepare_for_striping(struct exofs_io_state *ios)
{
u64 length = ios->length;
+ u64 offset = ios->offset;
struct _striping_info si;
- unsigned devs_in_group = ios->layout->group_width *
- ios->layout->mirrors_p1;
- unsigned first_comp = 0;
int ret = 0;
- _calc_stripe_info(ios, ios->offset, &si);
-
if (!ios->pages) {
if (ios->kern_buff) {
struct exofs_per_dev_state *per_dev = &ios->per_dev[0];
+ _calc_stripe_info(ios, ios->offset, &si);
per_dev->offset = si.obj_offset;
per_dev->dev = si.dev;
@@ -479,26 +468,17 @@ static int _prepare_for_striping(struct exofs_io_state *ios)
}
while (length) {
+ _calc_stripe_info(ios, offset, &si);
+
if (length < si.group_length)
si.group_length = length;
- ret = _prepare_one_group(ios, si.group_length, &si, first_comp);
+ ret = _prepare_one_group(ios, si.group_length, &si);
if (unlikely(ret))
goto out;
+ offset += si.group_length;
length -= si.group_length;
-
- si.group_length = si.total_group_length;
- si.unit_off = 0;
- ++si.Major;
- si.obj_offset = si.Major * ios->layout->stripe_unit *
- ios->layout->group_depth;
-
- si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group;
- si.dev %= ios->layout->s_numdevs;
-
- first_comp += devs_in_group;
- first_comp %= ios->layout->s_numdevs;
}
out:
@@ -599,7 +579,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
} else {
bio = master_dev->bio;
/* FIXME: bio_set_dir() */
- bio->bi_rw |= (1 << BIO_RW);
+ bio->bi_rw |= REQ_WRITE;
}
osd_req_write(or, &ios->obj, per_dev->offset, bio,
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 03149b9a517..047e92fa3af 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -31,7 +31,6 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/smp_lock.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/vfs.h>
@@ -743,7 +742,7 @@ static const struct super_operations exofs_sops = {
.alloc_inode = exofs_alloc_inode,
.destroy_inode = exofs_destroy_inode,
.write_inode = exofs_write_inode,
- .delete_inode = exofs_delete_inode,
+ .evict_inode = exofs_evict_inode,
.put_super = exofs_put_super,
.write_super = exofs_write_super,
.sync_fs = exofs_sync_fs,
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index ca7e2a0ed98..2bcc0431bad 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -200,6 +200,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
else {
inode->i_mode = mode;
+ inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
if (error == 0)
acl = NULL;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index e8766a39677..c6c684b44ea 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -571,7 +571,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
- dquot_free_block(inode, freed);
+ dquot_free_block_nodirty(inode, freed);
}
/**
@@ -1418,7 +1418,8 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
- dquot_free_block(inode, *count-num);
+ dquot_free_block_nodirty(inode, *count-num);
+ mark_inode_dirty(inode);
*count = num;
return ret_block;
@@ -1428,8 +1429,10 @@ out:
/*
* Undo the block allocation
*/
- if (!performed_allocation)
- dquot_free_block(inode, *count);
+ if (!performed_allocation) {
+ dquot_free_block_nodirty(inode, *count);
+ mark_inode_dirty(inode);
+ }
brelse(bitmap_bh);
return 0;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 7516957273e..764109886ec 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -448,6 +448,11 @@ ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child)
return res;
}
+static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len)
+{
+ return __block_write_begin(page, pos, len, ext2_get_block);
+}
+
/* Releases the page */
void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
struct page *page, struct inode *inode, int update_times)
@@ -458,8 +463,7 @@ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
int err;
lock_page(page);
- err = __ext2_write_begin(NULL, page->mapping, pos, len,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = ext2_prepare_chunk(page, pos, len);
BUG_ON(err);
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type(de, inode);
@@ -542,8 +546,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
got_it:
pos = page_offset(page) +
(char*)de - (char*)page_address(page);
- err = __ext2_write_begin(NULL, page->mapping, pos, rec_len, 0,
- &page, NULL);
+ err = ext2_prepare_chunk(page, pos, rec_len);
if (err)
goto out_unlock;
if (de->inode) {
@@ -576,8 +579,7 @@ out_unlock:
*/
int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = mapping->host;
+ struct inode *inode = page->mapping->host;
char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
unsigned to = ((char *)dir - kaddr) +
@@ -601,8 +603,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
from = (char*)pde - (char*)page_address(page);
pos = page_offset(page) + from;
lock_page(page);
- err = __ext2_write_begin(NULL, page->mapping, pos, to - from, 0,
- &page, NULL);
+ err = ext2_prepare_chunk(page, pos, to - from);
BUG_ON(err);
if (pde)
pde->rec_len = ext2_rec_len_to_disk(to - from);
@@ -621,8 +622,7 @@ out:
*/
int ext2_make_empty(struct inode *inode, struct inode *parent)
{
- struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
+ struct page *page = grab_cache_page(inode->i_mapping, 0);
unsigned chunk_size = ext2_chunk_size(inode);
struct ext2_dir_entry_2 * de;
int err;
@@ -631,8 +631,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
if (!page)
return -ENOMEM;
- err = __ext2_write_begin(NULL, page->mapping, 0, chunk_size, 0,
- &page, NULL);
+ err = ext2_prepare_chunk(page, 0, chunk_size);
if (err) {
unlock_page(page);
goto fail;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 52b34f1d273..416daa62242 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -119,7 +119,7 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, struct writeback_control *);
-extern void ext2_delete_inode (struct inode *);
+extern void ext2_evict_inode(struct inode *);
extern int ext2_sync_inode (struct inode *);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern int ext2_setattr (struct dentry *, struct iattr *);
@@ -127,9 +127,6 @@ extern void ext2_set_inode_flags(struct inode *inode);
extern void ext2_get_inode_flags(struct ext2_inode_info *);
extern int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
-int __ext2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
/* ioctl.c */
extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 938dbc739d0..ad70479aabf 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -118,19 +118,14 @@ void ext2_free_inode (struct inode * inode)
* Note: we must free any quota before locking the superblock,
* as writing the quota to disk may need the lock as well.
*/
- if (!is_bad_inode(inode)) {
- /* Quota is already initialized in iput() */
- ext2_xattr_delete_inode(inode);
- dquot_free_inode(inode);
- dquot_drop(inode);
- }
+ /* Quota is already initialized in iput() */
+ ext2_xattr_delete_inode(inode);
+ dquot_free_inode(inode);
+ dquot_drop(inode);
es = EXT2_SB(sb)->s_es;
is_directory = S_ISDIR(inode->i_mode);
- /* Do this BEFORE marking the inode not in use or returning an error */
- clear_inode (inode);
-
if (ino < EXT2_FIRST_INO(sb) ||
ino > le32_to_cpu(es->s_inodes_count)) {
ext2_error (sb, "ext2_free_inode",
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 19214435b75..940c9616886 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -69,26 +69,42 @@ static void ext2_write_failed(struct address_space *mapping, loff_t to)
/*
* Called at the last iput() if i_nlink is zero.
*/
-void ext2_delete_inode (struct inode * inode)
+void ext2_evict_inode(struct inode * inode)
{
- if (!is_bad_inode(inode))
+ struct ext2_block_alloc_info *rsv;
+ int want_delete = 0;
+
+ if (!inode->i_nlink && !is_bad_inode(inode)) {
+ want_delete = 1;
dquot_initialize(inode);
+ } else {
+ dquot_drop(inode);
+ }
+
truncate_inode_pages(&inode->i_data, 0);
- if (is_bad_inode(inode))
- goto no_delete;
- EXT2_I(inode)->i_dtime = get_seconds();
- mark_inode_dirty(inode);
- __ext2_write_inode(inode, inode_needs_sync(inode));
+ if (want_delete) {
+ /* set dtime */
+ EXT2_I(inode)->i_dtime = get_seconds();
+ mark_inode_dirty(inode);
+ __ext2_write_inode(inode, inode_needs_sync(inode));
+ /* truncate to 0 */
+ inode->i_size = 0;
+ if (inode->i_blocks)
+ ext2_truncate_blocks(inode, 0);
+ }
- inode->i_size = 0;
- if (inode->i_blocks)
- ext2_truncate_blocks(inode, 0);
- ext2_free_inode (inode);
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
- return;
-no_delete:
- clear_inode(inode); /* We must guarantee clearing of inode... */
+ ext2_discard_reservation(inode);
+ rsv = EXT2_I(inode)->i_block_alloc_info;
+ EXT2_I(inode)->i_block_alloc_info = NULL;
+ if (unlikely(rsv))
+ kfree(rsv);
+
+ if (want_delete)
+ ext2_free_inode(inode);
}
typedef struct {
@@ -423,6 +439,8 @@ static int ext2_alloc_blocks(struct inode *inode,
failed_out:
for (i = 0; i <index; i++)
ext2_free_blocks(inode, new_blocks[i], 1);
+ if (index)
+ mark_inode_dirty(inode);
return ret;
}
@@ -765,14 +783,6 @@ ext2_readpages(struct file *file, struct address_space *mapping,
return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
}
-int __ext2_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- return block_write_begin_newtrunc(file, mapping, pos, len, flags,
- pagep, fsdata, ext2_get_block);
-}
-
static int
ext2_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
@@ -780,8 +790,8 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
{
int ret;
- *pagep = NULL;
- ret = __ext2_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
+ ret = block_write_begin(mapping, pos, len, flags, pagep,
+ ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
@@ -806,13 +816,8 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
{
int ret;
- /*
- * Dir-in-pagecache still uses ext2_write_begin. Would have to rework
- * directory handling code to pass around offsets rather than struct
- * pages in order to make this work easily.
- */
- ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags, pagep,
- fsdata, ext2_get_block);
+ ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
+ ext2_get_block);
if (ret < 0)
ext2_write_failed(mapping, pos + len);
return ret;
@@ -838,7 +843,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = mapping->host;
ssize_t ret;
- ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
iov, offset, nr_segs, ext2_get_block, NULL);
if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
@@ -1006,8 +1011,8 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
else if (block_to_free == nr - count)
count++;
else {
- mark_inode_dirty(inode);
ext2_free_blocks (inode, block_to_free, count);
+ mark_inode_dirty(inode);
free_this:
block_to_free = nr;
count = 1;
@@ -1015,8 +1020,8 @@ static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q)
}
}
if (count > 0) {
- mark_inode_dirty(inode);
ext2_free_blocks (inode, block_to_free, count);
+ mark_inode_dirty(inode);
}
}
@@ -1169,15 +1174,10 @@ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
__ext2_truncate_blocks(inode, offset);
}
-int ext2_setsize(struct inode *inode, loff_t newsize)
+static int ext2_setsize(struct inode *inode, loff_t newsize)
{
- loff_t oldsize;
int error;
- error = inode_newsize_ok(inode, newsize);
- if (error)
- return error;
-
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
@@ -1197,10 +1197,7 @@ int ext2_setsize(struct inode *inode, loff_t newsize)
if (error)
return error;
- oldsize = inode->i_size;
- i_size_write(inode, newsize);
- truncate_pagecache(inode, oldsize, newsize);
-
+ truncate_setsize(inode, newsize);
__ext2_truncate_blocks(inode, newsize);
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
@@ -1552,12 +1549,12 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
return error;
}
- if (iattr->ia_valid & ATTR_SIZE) {
+ if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) {
error = ext2_setsize(inode, iattr->ia_size);
if (error)
return error;
}
- generic_setattr(inode, iattr);
+ setattr_copy(inode, iattr);
if (iattr->ia_valid & ATTR_MODE)
error = ext2_acl_chmod(inode);
mark_inode_dirty(inode);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7ff43f4a59c..1ec602673ea 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -195,17 +195,6 @@ static void destroy_inodecache(void)
kmem_cache_destroy(ext2_inode_cachep);
}
-static void ext2_clear_inode(struct inode *inode)
-{
- struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info;
-
- dquot_drop(inode);
- ext2_discard_reservation(inode);
- EXT2_I(inode)->i_block_alloc_info = NULL;
- if (unlikely(rsv))
- kfree(rsv);
-}
-
static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
struct super_block *sb = vfs->mnt_sb;
@@ -299,13 +288,12 @@ static const struct super_operations ext2_sops = {
.alloc_inode = ext2_alloc_inode,
.destroy_inode = ext2_destroy_inode,
.write_inode = ext2_write_inode,
- .delete_inode = ext2_delete_inode,
+ .evict_inode = ext2_evict_inode,
.put_super = ext2_put_super,
.write_super = ext2_write_super,
.sync_fs = ext2_sync_fs,
.statfs = ext2_statfs,
.remount_fs = ext2_remount,
- .clear_inode = ext2_clear_inode,
.show_options = ext2_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext2_quota_read,
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 7c3915780b1..8c29ae15129 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -674,6 +674,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
new_bh = sb_getblk(sb, block);
if (!new_bh) {
ext2_free_blocks(inode, block, 1);
+ mark_inode_dirty(inode);
error = -EIO;
goto cleanup;
}
@@ -703,8 +704,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* written (only some dirty data were not) so we just proceed
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
- if (new_bh && new_bh != old_bh)
- dquot_free_block(inode, 1);
+ if (new_bh && new_bh != old_bh) {
+ dquot_free_block_nodirty(inode, 1);
+ mark_inode_dirty(inode);
+ }
goto cleanup;
}
} else
@@ -727,6 +730,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
mb_cache_entry_free(ce);
ea_bdebug(old_bh, "freeing");
ext2_free_blocks(inode, old_bh->b_blocknr, 1);
+ mark_inode_dirty(inode);
/* We let our caller release old_bh, so we
* need to duplicate the buffer before. */
get_bh(old_bh);
@@ -736,7 +740,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
- dquot_free_block(inode, 1);
+ dquot_free_block_nodirty(inode, 1);
+ mark_inode_dirty(inode);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
@@ -799,7 +804,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
- dquot_free_block(inode, 1);
+ dquot_free_block_nodirty(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
@@ -838,7 +843,7 @@ ext2_xattr_cache_insert(struct buffer_head *bh)
ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
if (!ce)
return -ENOMEM;
- error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+ error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
if (error) {
mb_cache_entry_free(ce);
if (error == -EBUSY) {
@@ -912,8 +917,8 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
- ce = mb_cache_entry_find_first(ext2_xattr_cache, 0,
- inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev,
+ hash);
while (ce) {
struct buffer_head *bh;
@@ -945,7 +950,7 @@ again:
unlock_buffer(bh);
brelse(bh);
}
- ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
}
return NULL;
}
@@ -1021,9 +1026,7 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header,
int __init
init_ext2_xattr(void)
{
- ext2_xattr_cache = mb_cache_create("ext2_xattr", NULL,
- sizeof(struct mb_cache_entry) +
- sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+ ext2_xattr_cache = mb_cache_create("ext2_xattr", 6);
if (!ext2_xattr_cache)
return -ENOMEM;
return 0;
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
index 522b15498f4..e8c6ba0e4a3 100644
--- a/fs/ext3/Kconfig
+++ b/fs/ext3/Kconfig
@@ -31,6 +31,7 @@ config EXT3_FS
config EXT3_DEFAULTS_TO_ORDERED
bool "Default to 'data=ordered' in ext3"
depends on EXT3_FS
+ default y
help
The journal mode options for ext3 have different tradeoffs
between when data is guaranteed to be on disk and
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 01552abbca3..8a11fe21218 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -205,6 +205,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
return error;
else {
inode->i_mode = mode;
+ inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
if (error == 0)
acl = NULL;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 498021eb88f..4ab72db3559 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -119,20 +119,8 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)
ino = inode->i_ino;
ext3_debug ("freeing inode %lu\n", ino);
- /*
- * Note: we must free any quota before locking the superblock,
- * as writing the quota to disk may need the lock as well.
- */
- dquot_initialize(inode);
- ext3_xattr_delete_inode(handle, inode);
- dquot_free_inode(inode);
- dquot_drop(inode);
-
is_directory = S_ISDIR(inode->i_mode);
- /* Do this BEFORE marking the inode not in use or returning an error */
- clear_inode (inode);
-
es = EXT3_SB(sb)->s_es;
if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
ext3_error (sb, "ext3_free_inode",
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 735f0190ec2..5e0faf4cda7 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -190,18 +190,28 @@ static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
}
/*
- * Called at the last iput() if i_nlink is zero.
+ * Called at inode eviction from icache
*/
-void ext3_delete_inode (struct inode * inode)
+void ext3_evict_inode (struct inode *inode)
{
+ struct ext3_block_alloc_info *rsv;
handle_t *handle;
+ int want_delete = 0;
- if (!is_bad_inode(inode))
+ if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
+ want_delete = 1;
+ }
truncate_inode_pages(&inode->i_data, 0);
- if (is_bad_inode(inode))
+ ext3_discard_reservation(inode);
+ rsv = EXT3_I(inode)->i_block_alloc_info;
+ EXT3_I(inode)->i_block_alloc_info = NULL;
+ if (unlikely(rsv))
+ kfree(rsv);
+
+ if (!want_delete)
goto no_delete;
handle = start_transaction(inode);
@@ -238,15 +248,22 @@ void ext3_delete_inode (struct inode * inode)
* having errors), but we can't free the inode if the mark_dirty
* fails.
*/
- if (ext3_mark_inode_dirty(handle, inode))
- /* If that failed, just do the required in-core inode clear. */
- clear_inode(inode);
- else
+ if (ext3_mark_inode_dirty(handle, inode)) {
+ /* If that failed, just dquot_drop() and be done with that */
+ dquot_drop(inode);
+ end_writeback(inode);
+ } else {
+ ext3_xattr_delete_inode(handle, inode);
+ dquot_free_inode(inode);
+ dquot_drop(inode);
+ end_writeback(inode);
ext3_free_inode(handle, inode);
+ }
ext3_journal_stop(handle);
return;
no_delete:
- clear_inode(inode); /* We must guarantee clearing of inode... */
+ end_writeback(inode);
+ dquot_drop(inode);
}
typedef struct {
@@ -1149,9 +1166,25 @@ static int walk_page_buffers( handle_t *handle,
static int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
+ int dirty = buffer_dirty(bh);
+ int ret;
+
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
- return ext3_journal_get_write_access(handle, bh);
+ /*
+ * __block_prepare_write() could have dirtied some buffers. Clean
+ * the dirty bit as jbd2_journal_get_write_access() could complain
+ * otherwise about fs integrity issues. Setting of the dirty bit
+ * by __block_prepare_write() isn't a real problem here as we clear
+ * the bit before releasing a page lock and thus writeback cannot
+ * ever write the buffer.
+ */
+ if (dirty)
+ clear_buffer_dirty(bh);
+ ret = ext3_journal_get_write_access(handle, bh);
+ if (!ret && dirty)
+ ret = ext3_journal_dirty_metadata(handle, bh);
+ return ret;
}
/*
@@ -1196,8 +1229,7 @@ retry:
ret = PTR_ERR(handle);
goto out;
}
- ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext3_get_block);
+ ret = __block_write_begin(page, pos, len, ext3_get_block);
if (ret)
goto write_begin_failed;
@@ -1625,10 +1657,7 @@ static int ext3_writeback_writepage(struct page *page,
goto out_fail;
}
- if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
- ret = nobh_writepage(page, ext3_get_block, wbc);
- else
- ret = block_write_full_page(page, ext3_get_block, wbc);
+ ret = block_write_full_page(page, ext3_get_block, wbc);
err = ext3_journal_stop(handle);
if (!ret)
@@ -1785,6 +1814,17 @@ retry:
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext3_get_block, NULL);
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -1922,17 +1962,6 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
length = blocksize - (offset & (blocksize - 1));
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- /*
- * For "nobh" option, we can only work if we don't need to
- * read-in the page - otherwise we create buffers to do the IO.
- */
- if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
- ext3_should_writeback_data(inode) && PageUptodate(page)) {
- zero_user(page, offset, length);
- set_page_dirty(page);
- goto unlock;
- }
-
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
@@ -2284,27 +2313,6 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
depth);
/*
- * We've probably journalled the indirect block several
- * times during the truncate. But it's no longer
- * needed and we now drop it from the transaction via
- * journal_revoke().
- *
- * That's easy if it's exclusively part of this
- * transaction. But if it's part of the committing
- * transaction then journal_forget() will simply
- * brelse() it. That means that if the underlying
- * block is reallocated in ext3_get_block(),
- * unmap_underlying_metadata() will find this block
- * and will try to get rid of it. damn, damn.
- *
- * If this block has already been committed to the
- * journal, a revoke record will be written. And
- * revoke records must be emitted *before* clearing
- * this block's bit in the bitmaps.
- */
- ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-
- /*
* Everything below this this pointer has been
* released. Now let this top-of-subtree go.
*
@@ -2327,6 +2335,31 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
truncate_restart_transaction(handle, inode);
}
+ /*
+ * We've probably journalled the indirect block several
+ * times during the truncate. But it's no longer
+ * needed and we now drop it from the transaction via
+ * journal_revoke().
+ *
+ * That's easy if it's exclusively part of this
+ * transaction. But if it's part of the committing
+ * transaction then journal_forget() will simply
+ * brelse() it. That means that if the underlying
+ * block is reallocated in ext3_get_block(),
+ * unmap_underlying_metadata() will find this block
+ * and will try to get rid of it. damn, damn. Thus
+ * we don't allow a block to be reallocated until
+ * a transaction freeing it has fully committed.
+ *
+ * We also have to make sure journal replay after a
+ * crash does not overwrite non-journaled data blocks
+ * with old metadata when the block got reallocated for
+ * data. Thus we have to store a revoke record for a
+ * block in the same transaction in which we free the
+ * block.
+ */
+ ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
+
ext3_free_blocks(handle, inode, nr, 1);
if (parent_bh) {
@@ -2554,7 +2587,7 @@ out_stop:
* If this was a simple ftruncate(), and the file will remain alive
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
- * ext3_delete_inode(), and we allow that function to clean up the
+ * ext3_evict_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
@@ -3198,9 +3231,17 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
ext3_journal_stop(handle);
}
- rc = inode_setattr(inode, attr);
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ rc = vmtruncate(inode, attr->ia_size);
+ if (rc)
+ goto err_out;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
- if (!rc && (ia_valid & ATTR_MODE))
+ if (ia_valid & ATTR_MODE)
rc = ext3_acl_chmod(inode);
err_out:
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index ee184084ca4..2b35ddb70d6 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1447,7 +1447,6 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- unsigned long offset;
struct buffer_head * bh;
struct ext3_dir_entry_2 *de;
struct super_block * sb;
@@ -1469,7 +1468,7 @@ static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
ext3_mark_inode_dirty(handle, dir);
}
blocks = dir->i_size >> sb->s_blocksize_bits;
- for (block = 0, offset = 0; block < blocks; block++) {
+ for (block = 0; block < blocks; block++) {
bh = ext3_bread(handle, dir, block, 0, &retval);
if(!bh)
return retval;
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 54351ac7cef..0ccd7b12b73 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -964,7 +964,6 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
ext3_fsblk_t n_blocks_count)
{
ext3_fsblk_t o_blocks_count;
- unsigned long o_groups_count;
ext3_grpblk_t last;
ext3_grpblk_t add;
struct buffer_head * bh;
@@ -976,7 +975,6 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
* yet: we're going to revalidate es->s_blocks_count after
* taking the s_resize_lock below. */
o_blocks_count = le32_to_cpu(es->s_blocks_count);
- o_groups_count = EXT3_SB(sb)->s_groups_count;
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK" uto "E3FSBLK" blocks\n",
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 6c953bb255e..5dbf4dba03c 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -527,17 +527,6 @@ static void destroy_inodecache(void)
kmem_cache_destroy(ext3_inode_cachep);
}
-static void ext3_clear_inode(struct inode *inode)
-{
- struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info;
-
- dquot_drop(inode);
- ext3_discard_reservation(inode);
- EXT3_I(inode)->i_block_alloc_info = NULL;
- if (unlikely(rsv))
- kfree(rsv);
-}
-
static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
{
#if defined(CONFIG_QUOTA)
@@ -661,9 +650,6 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
*/
seq_puts(seq, ",barrier=");
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
- if (test_opt(sb, NOBH))
- seq_puts(seq, ",nobh");
-
seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
if (test_opt(sb, DATA_ERR_ABORT))
seq_puts(seq, ",data_err=abort");
@@ -783,14 +769,13 @@ static const struct super_operations ext3_sops = {
.destroy_inode = ext3_destroy_inode,
.write_inode = ext3_write_inode,
.dirty_inode = ext3_dirty_inode,
- .delete_inode = ext3_delete_inode,
+ .evict_inode = ext3_evict_inode,
.put_super = ext3_put_super,
.sync_fs = ext3_sync_fs,
.freeze_fs = ext3_freeze,
.unfreeze_fs = ext3_unfreeze,
.statfs = ext3_statfs,
.remount_fs = ext3_remount,
- .clear_inode = ext3_clear_inode,
.show_options = ext3_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext3_quota_read,
@@ -1255,10 +1240,12 @@ set_qf_format:
*n_blocks_count = option;
break;
case Opt_nobh:
- set_opt(sbi->s_mount_opt, NOBH);
+ ext3_msg(sb, KERN_WARNING,
+ "warning: ignoring deprecated nobh option");
break;
case Opt_bh:
- clear_opt(sbi->s_mount_opt, NOBH);
+ ext3_msg(sb, KERN_WARNING,
+ "warning: ignoring deprecated bh option");
break;
default:
ext3_msg(sb, KERN_ERR,
@@ -2001,14 +1988,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
break;
}
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) {
- ext3_msg(sb, KERN_WARNING,
- "warning: ignoring nobh option - "
- "it is supported only with writeback mode");
- clear_opt(sbi->s_mount_opt, NOBH);
- }
- }
/*
* The journal_load will have done any necessary log recovery,
* so we can safely mount the rest of the filesystem now.
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 71fb8d65e54..e69dc6dfaa8 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -1139,7 +1139,7 @@ ext3_xattr_cache_insert(struct buffer_head *bh)
ea_bdebug(bh, "out of memory");
return;
}
- error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+ error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
if (error) {
mb_cache_entry_free(ce);
if (error == -EBUSY) {
@@ -1211,8 +1211,8 @@ ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
- ce = mb_cache_entry_find_first(ext3_xattr_cache, 0,
- inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev,
+ hash);
while (ce) {
struct buffer_head *bh;
@@ -1237,7 +1237,7 @@ again:
return bh;
}
brelse(bh);
- ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
}
return NULL;
}
@@ -1313,9 +1313,7 @@ static void ext3_xattr_rehash(struct ext3_xattr_header *header,
int __init
init_ext3_xattr(void)
{
- ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL,
- sizeof(struct mb_cache_entry) +
- sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+ ext3_xattr_cache = mb_cache_create("ext3_xattr", 6);
if (!ext3_xattr_cache)
return -ENOMEM;
return 0;
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index feaf498feaa..5e2ed4504ea 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -204,6 +204,7 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
return error;
else {
inode->i_mode = mode;
+ inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (error == 0)
acl = NULL;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 95b7594c76f..bd30799a43e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -377,14 +377,11 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
ext4_grpblk_t bit;
unsigned int i;
struct ext4_group_desc *desc;
- struct ext4_super_block *es;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int err = 0, ret, blk_free_count;
ext4_grpblk_t blocks_freed;
struct ext4_group_info *grp;
- sbi = EXT4_SB(sb);
- es = sbi->s_es;
ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
@@ -477,7 +474,6 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
if (!err)
err = ret;
- sb->s_dirt = 1;
error_return:
brelse(bitmap_bh);
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 5b6973fbf1b..3db5084db9b 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -229,16 +229,20 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
(start_blk + count < start_blk) ||
- (start_blk + count > ext4_blocks_count(sbi->s_es)))
+ (start_blk + count > ext4_blocks_count(sbi->s_es))) {
+ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
return 0;
+ }
while (n) {
entry = rb_entry(n, struct ext4_system_zone, node);
if (start_blk + count - 1 < entry->start_blk)
n = n->rb_left;
else if (start_blk >= (entry->start_blk + entry->count))
n = n->rb_right;
- else
+ else {
+ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
return 0;
+ }
}
return 1;
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ea5e6cb7e2a..374510f72ba 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -61,10 +61,11 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
}
-int ext4_check_dir_entry(const char *function, struct inode *dir,
- struct ext4_dir_entry_2 *de,
- struct buffer_head *bh,
- unsigned int offset)
+int __ext4_check_dir_entry(const char *function, unsigned int line,
+ struct inode *dir,
+ struct ext4_dir_entry_2 *de,
+ struct buffer_head *bh,
+ unsigned int offset)
{
const char *error_msg = NULL;
const int rlen = ext4_rec_len_from_disk(de->rec_len,
@@ -83,11 +84,10 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
error_msg = "inode out of bounds";
if (error_msg != NULL)
- ext4_error_inode(function, dir,
- "bad entry in directory: %s - block=%llu"
+ ext4_error_inode(dir, function, line, bh->b_blocknr,
+ "bad entry in directory: %s - "
"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned long long) bh->b_blocknr,
- (unsigned) (offset%bh->b_size), offset,
+ error_msg, (unsigned) (offset%bh->b_size), offset,
le32_to_cpu(de->inode),
rlen, de->name_len);
return error_msg == NULL ? 1 : 0;
@@ -121,7 +121,8 @@ static int ext4_readdir(struct file *filp,
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
- ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
+ ext4_clear_inode_flag(filp->f_path.dentry->d_inode,
+ EXT4_INODE_INDEX);
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
@@ -193,7 +194,7 @@ revalidate:
while (!error && filp->f_pos < inode->i_size
&& offset < sb->s_blocksize) {
de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
- if (!ext4_check_dir_entry("ext4_readdir", inode, de,
+ if (!ext4_check_dir_entry(inode, de,
bh, offset)) {
/*
* On error, skip the f_pos to the next block
@@ -343,7 +344,7 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
struct dir_private_info *info;
int len;
- info = (struct dir_private_info *) dir_file->private_data;
+ info = dir_file->private_data;
p = &info->root.rb_node;
/* Create and allocate the fname structure */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 19a4de57128..889ec9d5e6a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -57,10 +57,13 @@
#endif
#define EXT4_ERROR_INODE(inode, fmt, a...) \
- ext4_error_inode(__func__, (inode), (fmt), ## a)
+ ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
+
+#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \
+ ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
#define EXT4_ERROR_FILE(file, fmt, a...) \
- ext4_error_file(__func__, (file), (fmt), ## a)
+ ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
/* data type for block offset of block group */
typedef int ext4_grpblk_t;
@@ -167,13 +170,15 @@ struct mpage_da_data {
};
#define EXT4_IO_UNWRITTEN 0x1
typedef struct ext4_io_end {
- struct list_head list; /* per-file finished AIO list */
+ struct list_head list; /* per-file finished IO list */
struct inode *inode; /* file being written to */
unsigned int flag; /* unwritten or not */
struct page *page; /* page struct for buffer write */
loff_t offset; /* offset in the file */
ssize_t size; /* size of the extent */
struct work_struct work; /* data work queue */
+ struct kiocb *iocb; /* iocb struct for AIO */
+ int result; /* error value for AIO */
} ext4_io_end_t;
/*
@@ -460,7 +465,7 @@ struct ext4_new_group_data {
};
/*
- * Flags used by ext4_get_blocks()
+ * Flags used by ext4_map_blocks()
*/
/* Allocate any needed blocks and/or convert an unitialized
extent to be an initialized ext4 */
@@ -873,7 +878,6 @@ struct ext4_inode_info {
#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
-#define EXT4_MOUNT_NOBH 0x40000 /* No bufferheads */
#define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
@@ -982,7 +986,7 @@ struct ext4_super_block {
__le32 s_last_orphan; /* start of list of inodes to delete */
__le32 s_hash_seed[4]; /* HTREE hash seed */
__u8 s_def_hash_version; /* Default hash version to use */
- __u8 s_reserved_char_pad;
+ __u8 s_jnl_backup_type;
__le16 s_desc_size; /* size of group descriptor */
/*100*/ __le32 s_default_mount_opts;
__le32 s_first_meta_bg; /* First metablock block group */
@@ -1000,12 +1004,34 @@ struct ext4_super_block {
__le64 s_mmp_block; /* Block for multi-mount protection */
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
- __u8 s_reserved_char_pad2;
+ __u8 s_reserved_char_pad;
__le16 s_reserved_pad;
__le64 s_kbytes_written; /* nr of lifetime kilobytes written */
- __u32 s_reserved[160]; /* Padding to the end of the block */
+ __le32 s_snapshot_inum; /* Inode number of active snapshot */
+ __le32 s_snapshot_id; /* sequential ID of active snapshot */
+ __le64 s_snapshot_r_blocks_count; /* reserved blocks for active
+ snapshot's future use */
+ __le32 s_snapshot_list; /* inode number of the head of the
+ on-disk snapshot list */
+#define EXT4_S_ERR_START offsetof(struct ext4_super_block, s_error_count)
+ __le32 s_error_count; /* number of fs errors */
+ __le32 s_first_error_time; /* first time an error happened */
+ __le32 s_first_error_ino; /* inode involved in first error */
+ __le64 s_first_error_block; /* block involved of first error */
+ __u8 s_first_error_func[32]; /* function where the error happened */
+ __le32 s_first_error_line; /* line number where error happened */
+ __le32 s_last_error_time; /* most recent time of an error */
+ __le32 s_last_error_ino; /* inode involved in last error */
+ __le32 s_last_error_line; /* line number where error happened */
+ __le64 s_last_error_block; /* block involved of last error */
+ __u8 s_last_error_func[32]; /* function where the error happened */
+#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
+ __u8 s_mount_opts[64];
+ __le32 s_reserved[112]; /* Padding to the end of the block */
};
+#define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
+
#ifdef __KERNEL__
/*
@@ -1143,6 +1169,9 @@ struct ext4_sb_info {
/* workqueue for dio unwritten */
struct workqueue_struct *dio_unwritten_wq;
+
+ /* timer for periodic error stats printing */
+ struct timer_list s_err_report;
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1313,6 +1342,10 @@ EXT4_INODE_BIT_FNS(state, state_flags)
#define EXT4_DEFM_JMODE_DATA 0x0020
#define EXT4_DEFM_JMODE_ORDERED 0x0040
#define EXT4_DEFM_JMODE_WBACK 0x0060
+#define EXT4_DEFM_NOBARRIER 0x0100
+#define EXT4_DEFM_BLOCK_VALIDITY 0x0200
+#define EXT4_DEFM_DISCARD 0x0400
+#define EXT4_DEFM_NODELALLOC 0x0800
/*
* Default journal batch times
@@ -1379,6 +1412,43 @@ struct ext4_dir_entry_2 {
#define EXT4_MAX_REC_LEN ((1<<16)-1)
/*
+ * If we ever get support for fs block sizes > page_size, we'll need
+ * to remove the #if statements in the next two functions...
+ */
+static inline unsigned int
+ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
+{
+ unsigned len = le16_to_cpu(dlen);
+
+#if (PAGE_CACHE_SIZE >= 65536)
+ if (len == EXT4_MAX_REC_LEN || len == 0)
+ return blocksize;
+ return (len & 65532) | ((len & 3) << 16);
+#else
+ return len;
+#endif
+}
+
+static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
+{
+ if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
+ BUG();
+#if (PAGE_CACHE_SIZE >= 65536)
+ if (len < 65536)
+ return cpu_to_le16(len);
+ if (len == blocksize) {
+ if (blocksize == 65536)
+ return cpu_to_le16(EXT4_MAX_REC_LEN);
+ else
+ return cpu_to_le16(0);
+ }
+ return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
+#else
+ return cpu_to_le16(len);
+#endif
+}
+
+/*
* Hash Tree Directory indexing
* (c) Daniel Phillips, 2001
*/
@@ -1510,9 +1580,11 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
ext4_init_block_bitmap(sb, NULL, group, desc)
/* dir.c */
-extern int ext4_check_dir_entry(const char *, struct inode *,
- struct ext4_dir_entry_2 *,
- struct buffer_head *, unsigned int);
+extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+ struct ext4_dir_entry_2 *,
+ struct buffer_head *, unsigned int);
+#define ext4_check_dir_entry(dir, de, bh, offset) \
+ __ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent);
@@ -1571,7 +1643,8 @@ extern int ext4_write_inode(struct inode *, struct writeback_control *);
extern int ext4_setattr(struct dentry *, struct iattr *);
extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
-extern void ext4_delete_inode(struct inode *);
+extern void ext4_evict_inode(struct inode *);
+extern void ext4_clear_inode(struct inode *);
extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *);
extern int ext4_change_inode_journal_flag(struct inode *, int);
@@ -1601,8 +1674,6 @@ extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
extern int ext4_ext_migrate(struct inode *);
/* namei.c */
-extern unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize);
-extern __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize);
extern int ext4_orphan_add(handle_t *, struct inode *);
extern int ext4_orphan_del(handle_t *, struct inode *);
extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
@@ -1616,25 +1687,38 @@ extern int ext4_group_extend(struct super_block *sb,
ext4_fsblk_t n_blocks_count);
/* super.c */
-extern void __ext4_error(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-#define ext4_error(sb, message...) __ext4_error(sb, __func__, ## message)
-extern void ext4_error_inode(const char *, struct inode *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-extern void ext4_error_file(const char *, struct file *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-extern void __ext4_std_error(struct super_block *, const char *, int);
-extern void ext4_abort(struct super_block *, const char *, const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-extern void __ext4_warning(struct super_block *, const char *,
+extern void __ext4_error(struct super_block *, const char *, unsigned int,
+ const char *, ...)
+ __attribute__ ((format (printf, 4, 5)));
+#define ext4_error(sb, message...) __ext4_error(sb, __func__, \
+ __LINE__, ## message)
+extern void ext4_error_inode(struct inode *, const char *, unsigned int,
+ ext4_fsblk_t, const char *, ...)
+ __attribute__ ((format (printf, 5, 6)));
+extern void ext4_error_file(struct file *, const char *, unsigned int,
+ const char *, ...)
+ __attribute__ ((format (printf, 4, 5)));
+extern void __ext4_std_error(struct super_block *, const char *,
+ unsigned int, int);
+extern void __ext4_abort(struct super_block *, const char *, unsigned int,
+ const char *, ...)
+ __attribute__ ((format (printf, 4, 5)));
+#define ext4_abort(sb, message...) __ext4_abort(sb, __func__, \
+ __LINE__, ## message)
+extern void __ext4_warning(struct super_block *, const char *, unsigned int,
const char *, ...)
- __attribute__ ((format (printf, 3, 4)));
-#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, ## message)
+ __attribute__ ((format (printf, 4, 5)));
+#define ext4_warning(sb, message...) __ext4_warning(sb, __func__, \
+ __LINE__, ## message)
extern void ext4_msg(struct super_block *, const char *, const char *, ...)
__attribute__ ((format (printf, 3, 4)));
-extern void ext4_grp_locked_error(struct super_block *, ext4_group_t,
- const char *, const char *, ...)
- __attribute__ ((format (printf, 4, 5)));
+extern void __ext4_grp_locked_error(const char *, unsigned int, \
+ struct super_block *, ext4_group_t, \
+ unsigned long, ext4_fsblk_t, \
+ const char *, ...)
+ __attribute__ ((format (printf, 7, 8)));
+#define ext4_grp_locked_error(sb, grp, message...) \
+ __ext4_grp_locked_error(__func__, __LINE__, (sb), (grp), ## message)
extern void ext4_update_dynamic_rev(struct super_block *sb);
extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
__u32 compat);
@@ -1768,7 +1852,7 @@ static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi)
#define ext4_std_error(sb, errno) \
do { \
if ((errno)) \
- __ext4_std_error((sb), __func__, (errno)); \
+ __ext4_std_error((sb), __func__, __LINE__, (errno)); \
} while (0)
#ifdef CONFIG_SMP
@@ -1860,6 +1944,12 @@ static inline void ext4_unlock_group(struct super_block *sb,
spin_unlock(ext4_group_lock_ptr(sb, group));
}
+static inline void ext4_mark_super_dirty(struct super_block *sb)
+{
+ if (EXT4_SB(sb)->s_journal == NULL)
+ sb->s_dirt =1;
+}
+
/*
* Inodes and files operations
*/
@@ -1905,9 +1995,6 @@ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
-extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
- sector_t block, unsigned int max_blocks,
- struct buffer_head *bh, int flags);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
/* move_extent.c */
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 53d2764d71c..6e272ef6ba9 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -6,29 +6,29 @@
#include <trace/events/ext4.h>
-int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
+int __ext4_journal_get_undo_access(const char *where, unsigned int line,
+ handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_undo_access(handle, bh);
if (err)
- ext4_journal_abort_handle(where, __func__, bh,
+ ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
}
return err;
}
-int __ext4_journal_get_write_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
+int __ext4_journal_get_write_access(const char *where, unsigned int line,
+ handle_t *handle, struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
if (err)
- ext4_journal_abort_handle(where, __func__, bh,
+ ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
}
return err;
@@ -46,9 +46,9 @@ int __ext4_journal_get_write_access(const char *where, handle_t *handle,
* If the handle isn't valid we're not journaling, but we still need to
* call into ext4_journal_revoke() to put the buffer head.
*/
-int __ext4_forget(const char *where, handle_t *handle, int is_metadata,
- struct inode *inode, struct buffer_head *bh,
- ext4_fsblk_t blocknr)
+int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
+ int is_metadata, struct inode *inode,
+ struct buffer_head *bh, ext4_fsblk_t blocknr)
{
int err;
@@ -79,8 +79,8 @@ int __ext4_forget(const char *where, handle_t *handle, int is_metadata,
BUFFER_TRACE(bh, "call jbd2_journal_forget");
err = jbd2_journal_forget(handle, bh);
if (err)
- ext4_journal_abort_handle(where, __func__, bh,
- handle, err);
+ ext4_journal_abort_handle(where, line, __func__,
+ bh, handle, err);
return err;
}
return 0;
@@ -92,15 +92,16 @@ int __ext4_forget(const char *where, handle_t *handle, int is_metadata,
BUFFER_TRACE(bh, "call jbd2_journal_revoke");
err = jbd2_journal_revoke(handle, blocknr, bh);
if (err) {
- ext4_journal_abort_handle(where, __func__, bh, handle, err);
- ext4_abort(inode->i_sb, __func__,
+ ext4_journal_abort_handle(where, line, __func__,
+ bh, handle, err);
+ __ext4_abort(inode->i_sb, where, line,
"error %d when attempting revoke", err);
}
BUFFER_TRACE(bh, "exit");
return err;
}
-int __ext4_journal_get_create_access(const char *where,
+int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh)
{
int err = 0;
@@ -108,22 +109,23 @@ int __ext4_journal_get_create_access(const char *where,
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_create_access(handle, bh);
if (err)
- ext4_journal_abort_handle(where, __func__, bh,
- handle, err);
+ ext4_journal_abort_handle(where, line, __func__,
+ bh, handle, err);
}
return err;
}
-int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
- struct inode *inode, struct buffer_head *bh)
+int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ handle_t *handle, struct inode *inode,
+ struct buffer_head *bh)
{
int err = 0;
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
if (err)
- ext4_journal_abort_handle(where, __func__, bh,
- handle, err);
+ ext4_journal_abort_handle(where, line, __func__,
+ bh, handle, err);
} else {
if (inode)
mark_buffer_dirty_inode(bh, inode);
@@ -132,14 +134,33 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
if (inode && inode_needs_sync(inode)) {
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
- ext4_error(inode->i_sb,
- "IO error syncing inode, "
- "inode=%lu, block=%llu",
- inode->i_ino,
- (unsigned long long) bh->b_blocknr);
+ struct ext4_super_block *es;
+
+ es = EXT4_SB(inode->i_sb)->s_es;
+ es->s_last_error_block =
+ cpu_to_le64(bh->b_blocknr);
+ ext4_error_inode(inode, where, line,
+ bh->b_blocknr,
+ "IO error syncing itable block");
err = -EIO;
}
}
}
return err;
}
+
+int __ext4_handle_dirty_super(const char *where, unsigned int line,
+ handle_t *handle, struct super_block *sb)
+{
+ struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
+ int err = 0;
+
+ if (ext4_handle_valid(handle)) {
+ err = jbd2_journal_dirty_metadata(handle, bh);
+ if (err)
+ ext4_journal_abort_handle(where, line, __func__,
+ bh, handle, err);
+ } else
+ sb->s_dirt = 1;
+ return err;
+}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index dade0c02479..b0bd792c58c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -122,39 +122,47 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
/*
* Wrapper functions with which ext4 calls into JBD.
*/
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+void ext4_journal_abort_handle(const char *caller, unsigned int line,
+ const char *err_fn,
struct buffer_head *bh, handle_t *handle, int err);
-int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
- struct buffer_head *bh);
+int __ext4_journal_get_undo_access(const char *where, unsigned int line,
+ handle_t *handle, struct buffer_head *bh);
-int __ext4_journal_get_write_access(const char *where, handle_t *handle,
- struct buffer_head *bh);
+int __ext4_journal_get_write_access(const char *where, unsigned int line,
+ handle_t *handle, struct buffer_head *bh);
-int __ext4_forget(const char *where, handle_t *handle, int is_metadata,
- struct inode *inode, struct buffer_head *bh,
- ext4_fsblk_t blocknr);
+int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
+ int is_metadata, struct inode *inode,
+ struct buffer_head *bh, ext4_fsblk_t blocknr);
-int __ext4_journal_get_create_access(const char *where,
+int __ext4_journal_get_create_access(const char *where, unsigned int line,
handle_t *handle, struct buffer_head *bh);
-int __ext4_handle_dirty_metadata(const char *where, handle_t *handle,
- struct inode *inode, struct buffer_head *bh);
+int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
+ handle_t *handle, struct inode *inode,
+ struct buffer_head *bh);
+
+int __ext4_handle_dirty_super(const char *where, unsigned int line,
+ handle_t *handle, struct super_block *sb);
#define ext4_journal_get_undo_access(handle, bh) \
- __ext4_journal_get_undo_access(__func__, (handle), (bh))
+ __ext4_journal_get_undo_access(__func__, __LINE__, (handle), (bh))
#define ext4_journal_get_write_access(handle, bh) \
- __ext4_journal_get_write_access(__func__, (handle), (bh))
+ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
#define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
- __ext4_forget(__func__, (handle), (is_metadata), (inode), (bh),\
- (block_nr))
+ __ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \
+ (bh), (block_nr))
#define ext4_journal_get_create_access(handle, bh) \
- __ext4_journal_get_create_access(__func__, (handle), (bh))
+ __ext4_journal_get_create_access(__func__, __LINE__, (handle), (bh))
#define ext4_handle_dirty_metadata(handle, inode, bh) \
- __ext4_handle_dirty_metadata(__func__, (handle), (inode), (bh))
+ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
+ (bh))
+#define ext4_handle_dirty_super(handle, sb) \
+ __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext4_journal_stop(const char *where, handle_t *handle);
+int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
#define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096)
@@ -207,7 +215,7 @@ static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
}
#define ext4_journal_stop(handle) \
- __ext4_journal_stop(__func__, (handle))
+ __ext4_journal_stop(__func__, __LINE__, (handle))
static inline handle_t *ext4_journal_current_handle(void)
{
@@ -308,17 +316,15 @@ static inline int ext4_should_writeback_data(struct inode *inode)
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
* i_mutex for direct I/O reads. This only works for extent-based
- * files, and it doesn't work for nobh or if data journaling is
- * enabled, since the dioread_nolock code uses b_private to pass
- * information back to the I/O completion handler, and this conflicts
- * with the jbd's use of b_private.
+ * files, and it doesn't work if data journaling is enabled, since the
+ * dioread_nolock code uses b_private to pass information back to the
+ * I/O completion handler, and this conflicts with the jbd's use of
+ * b_private.
*/
static inline int ext4_should_dioread_nolock(struct inode *inode)
{
if (!test_opt(inode->i_sb, DIOREAD_NOLOCK))
return 0;
- if (test_opt(inode->i_sb, NOBH))
- return 0;
if (!S_ISREG(inode->i_mode))
return 0;
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 377309c1af6..06328d3e571 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -401,9 +401,9 @@ static int ext4_valid_extent_entries(struct inode *inode,
return 1;
}
-static int __ext4_ext_check(const char *function, struct inode *inode,
- struct ext4_extent_header *eh,
- int depth)
+static int __ext4_ext_check(const char *function, unsigned int line,
+ struct inode *inode, struct ext4_extent_header *eh,
+ int depth)
{
const char *error_msg;
int max = 0;
@@ -436,7 +436,7 @@ static int __ext4_ext_check(const char *function, struct inode *inode,
return 0;
corrupted:
- ext4_error_inode(function, inode,
+ ext4_error_inode(inode, function, line, 0,
"bad header/extent: %s - magic %x, "
"entries %u, max %u(%u), depth %u(%u)",
error_msg, le16_to_cpu(eh->eh_magic),
@@ -447,7 +447,7 @@ corrupted:
}
#define ext4_ext_check(inode, eh, depth) \
- __ext4_ext_check(__func__, inode, eh, depth)
+ __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
int ext4_ext_check_inode(struct inode *inode)
{
@@ -1083,7 +1083,6 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
{
struct ext4_ext_path *curp = path;
struct ext4_extent_header *neh;
- struct ext4_extent_idx *fidx;
struct buffer_head *bh;
ext4_fsblk_t newblock;
int err = 0;
@@ -1144,10 +1143,10 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_idx_store_pblock(curp->p_idx, newblock);
neh = ext_inode_hdr(inode);
- fidx = EXT_FIRST_INDEX(neh);
ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
- le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
+ le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
+ idx_pblock(EXT_FIRST_INDEX(neh)));
neh->eh_depth = cpu_to_le16(path->p_depth + 1);
err = ext4_ext_dirty(handle, inode, curp);
@@ -2937,7 +2936,7 @@ fix_extent_len:
* One of more index blocks maybe needed if the extent tree grow after
* the unintialized extent split. To prevent ENOSPC occur at the IO
* complete, we need to split the uninitialized extent before DIO submit
- * the IO. The uninitilized extent called at this time will be split
+ * the IO. The uninitialized extent called at this time will be split
* into three uninitialized extent(at most). After IO complete, the part
* being filled will be convert to initialized by the end_io callback function
* via ext4_convert_unwritten_extents().
@@ -2954,7 +2953,6 @@ static int ext4_split_unwritten_extents(handle_t *handle,
struct ext4_extent *ex1 = NULL;
struct ext4_extent *ex2 = NULL;
struct ext4_extent *ex3 = NULL;
- struct ext4_extent_header *eh;
ext4_lblk_t ee_block, eof_block;
unsigned int allocated, ee_len, depth;
ext4_fsblk_t newblock;
@@ -2971,7 +2969,6 @@ static int ext4_split_unwritten_extents(handle_t *handle,
eof_block = map->m_lblk + map->m_len;
depth = ext_depth(inode);
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
@@ -3058,7 +3055,6 @@ static int ext4_split_unwritten_extents(handle_t *handle,
err = PTR_ERR(path);
goto out;
}
- eh = path[depth].p_hdr;
ex = path[depth].p_ext;
if (ex2 != &newex)
ex2 = ex;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5313ae4cda2..ee92b66d455 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -70,7 +70,8 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
size_t length = iov_length(iov, nr_segs);
- if (pos > sbi->s_bitmap_maxbytes)
+ if ((pos > sbi->s_bitmap_maxbytes ||
+ (pos == sbi->s_bitmap_maxbytes && length > 0)))
return -EFBIG;
if (pos + length > sbi->s_bitmap_maxbytes) {
@@ -123,7 +124,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
if (!IS_ERR(cp)) {
memcpy(sbi->s_es->s_last_mounted, cp,
sizeof(sbi->s_es->s_last_mounted));
- sb->s_dirt = 1;
+ ext4_mark_super_dirty(sb);
}
}
return dquot_file_open(inode, filp);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 25c4b3173fd..45853e0d1f2 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -222,7 +222,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
is_directory = S_ISDIR(inode->i_mode);
/* Do this BEFORE marking the inode not in use or returning an error */
- clear_inode(inode);
+ ext4_clear_inode(inode);
es = EXT4_SB(sb)->s_es;
if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
@@ -279,7 +279,7 @@ out:
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
- sb->s_dirt = 1;
+ ext4_mark_super_dirty(sb);
} else
ext4_error(sb, "bit already cleared for inode %lu", ino);
@@ -965,7 +965,7 @@ got:
percpu_counter_dec(&sbi->s_freeinodes_counter);
if (S_ISDIR(mode))
percpu_counter_inc(&sbi->s_dirs_counter);
- sb->s_dirt = 1;
+ ext4_mark_super_dirty(sb);
if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 19df61c321f..4b8debeb396 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -167,11 +167,16 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
/*
* Called at the last iput() if i_nlink is zero.
*/
-void ext4_delete_inode(struct inode *inode)
+void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
+ if (inode->i_nlink) {
+ truncate_inode_pages(&inode->i_data, 0);
+ goto no_delete;
+ }
+
if (!is_bad_inode(inode))
dquot_initialize(inode);
@@ -221,6 +226,7 @@ void ext4_delete_inode(struct inode *inode)
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
+ ext4_orphan_del(NULL, inode);
goto no_delete;
}
}
@@ -245,13 +251,13 @@ void ext4_delete_inode(struct inode *inode)
*/
if (ext4_mark_inode_dirty(handle, inode))
/* If that failed, just do the required in-core inode clear. */
- clear_inode(inode);
+ ext4_clear_inode(inode);
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
return;
no_delete:
- clear_inode(inode); /* We must guarantee clearing of inode... */
+ ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
typedef struct {
@@ -337,9 +343,11 @@ static int ext4_block_to_path(struct inode *inode,
return n;
}
-static int __ext4_check_blockref(const char *function, struct inode *inode,
+static int __ext4_check_blockref(const char *function, unsigned int line,
+ struct inode *inode,
__le32 *p, unsigned int max)
{
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
__le32 *bref = p;
unsigned int blk;
@@ -348,8 +356,9 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
if (blk &&
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
blk, 1))) {
- ext4_error_inode(function, inode,
- "invalid block reference %u", blk);
+ es->s_last_error_block = cpu_to_le64(blk);
+ ext4_error_inode(inode, function, line, blk,
+ "invalid block");
return -EIO;
}
}
@@ -358,11 +367,13 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
#define ext4_check_indirect_blockref(inode, bh) \
- __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data, \
+ __ext4_check_blockref(__func__, __LINE__, inode, \
+ (__le32 *)(bh)->b_data, \
EXT4_ADDR_PER_BLOCK((inode)->i_sb))
#define ext4_check_inode_blockref(inode) \
- __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data, \
+ __ext4_check_blockref(__func__, __LINE__, inode, \
+ EXT4_I(inode)->i_data, \
EXT4_NDIR_BLOCKS)
/**
@@ -1128,20 +1139,24 @@ void ext4_da_update_reserve_space(struct inode *inode,
ext4_discard_preallocations(inode);
}
-static int check_block_validity(struct inode *inode, const char *func,
+static int __check_block_validity(struct inode *inode, const char *func,
+ unsigned int line,
struct ext4_map_blocks *map)
{
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
- ext4_error_inode(func, inode,
- "lblock %lu mapped to illegal pblock %llu "
- "(length %d)", (unsigned long) map->m_lblk,
- map->m_pblk, map->m_len);
+ ext4_error_inode(inode, func, line, map->m_pblk,
+ "lblock %lu mapped to illegal pblock "
+ "(length %d)", (unsigned long) map->m_lblk,
+ map->m_len);
return -EIO;
}
return 0;
}
+#define check_block_validity(inode, map) \
+ __check_block_validity((inode), __func__, __LINE__, (map))
+
/*
* Return the number of contiguous dirty pages in a given inode
* starting at page frame idx.
@@ -1244,7 +1259,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
up_read((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode, __func__, map);
+ int ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
@@ -1324,9 +1339,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
- int ret = check_block_validity(inode,
- "ext4_map_blocks_after_alloc",
- map);
+ int ret = check_block_validity(inode, map);
if (ret != 0)
return ret;
}
@@ -1519,9 +1532,25 @@ static int walk_page_buffers(handle_t *handle,
static int do_journal_get_write_access(handle_t *handle,
struct buffer_head *bh)
{
+ int dirty = buffer_dirty(bh);
+ int ret;
+
if (!buffer_mapped(bh) || buffer_freed(bh))
return 0;
- return ext4_journal_get_write_access(handle, bh);
+ /*
+ * __block_prepare_write() could have dirtied some buffers. Clean
+ * the dirty bit as jbd2_journal_get_write_access() could complain
+ * otherwise about fs integrity issues. Setting of the dirty bit
+ * by __block_prepare_write() isn't a real problem here as we clear
+ * the bit before releasing a page lock and thus writeback cannot
+ * ever write the buffer.
+ */
+ if (dirty)
+ clear_buffer_dirty(bh);
+ ret = ext4_journal_get_write_access(handle, bh);
+ if (!ret && dirty)
+ ret = ext4_handle_dirty_metadata(handle, NULL, bh);
+ return ret;
}
/*
@@ -1578,11 +1607,9 @@ retry:
*pagep = page;
if (ext4_should_dioread_nolock(inode))
- ret = block_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata, ext4_get_block_write);
+ ret = __block_write_begin(page, pos, len, ext4_get_block_write);
else
- ret = block_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata, ext4_get_block);
+ ret = __block_write_begin(page, pos, len, ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
@@ -1593,7 +1620,7 @@ retry:
unlock_page(page);
page_cache_release(page);
/*
- * block_write_begin may have instantiated a few blocks
+ * __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*
@@ -2194,7 +2221,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
BUG_ON(!handle);
/*
- * Call ext4_get_blocks() to allocate any delayed allocation
+ * Call ext4_map_blocks() to allocate any delayed allocation
* blocks, or to convert an uninitialized extent to be
* initialized (in the case where we have written into
* one or more preallocated blocks).
@@ -2203,7 +2230,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
* indicate that we are on the delayed allocation path. This
* affects functions in many different parts of the allocation
* call path. This flag exists primarily because we don't
- * want to change *many* call functions, so ext4_get_blocks()
+ * want to change *many* call functions, so ext4_map_blocks()
* will set the magic i_delalloc_reserved_flag once the
* inode's allocation semaphore is taken.
*
@@ -2221,6 +2248,8 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
if (blks < 0) {
+ struct super_block *sb = mpd->inode->i_sb;
+
err = blks;
/*
* If get block returns with error we simply
@@ -2231,7 +2260,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
return 0;
if (err == -ENOSPC &&
- ext4_count_free_blocks(mpd->inode->i_sb)) {
+ ext4_count_free_blocks(sb)) {
mpd->retval = err;
return 0;
}
@@ -2243,16 +2272,17 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
* writepage and writepages will again try to write
* the same.
*/
- ext4_msg(mpd->inode->i_sb, KERN_CRIT,
- "delayed block allocation failed for inode %lu at "
- "logical offset %llu with max blocks %zd with "
- "error %d", mpd->inode->i_ino,
- (unsigned long long) next,
- mpd->b_size >> mpd->inode->i_blkbits, err);
- printk(KERN_CRIT "This should not happen!! "
- "Data will be lost\n");
- if (err == -ENOSPC) {
- ext4_print_free_blocks(mpd->inode);
+ if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
+ ext4_msg(sb, KERN_CRIT,
+ "delayed block allocation failed for inode %lu "
+ "at logical offset %llu with max blocks %zd "
+ "with error %d", mpd->inode->i_ino,
+ (unsigned long long) next,
+ mpd->b_size >> mpd->inode->i_blkbits, err);
+ ext4_msg(sb, KERN_CRIT,
+ "This should not happen!! Data will be lost\n");
+ if (err == -ENOSPC)
+ ext4_print_free_blocks(mpd->inode);
}
/* invalidate all the pages */
ext4_da_block_invalidatepages(mpd, next,
@@ -2320,7 +2350,7 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
* XXX Don't go larger than mballoc is willing to allocate
* This is a stopgap solution. We eventually need to fold
* mpage_da_submit_io() into this function and then call
- * ext4_get_blocks() multiple times in a loop
+ * ext4_map_blocks() multiple times in a loop
*/
if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
goto flush_it;
@@ -2553,18 +2583,16 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
/*
* This function is used as a standard get_block_t calback function
* when there is no desire to allocate any blocks. It is used as a
- * callback function for block_prepare_write(), nobh_writepage(), and
- * block_write_full_page(). These functions should only try to map a
- * single block at a time.
+ * callback function for block_prepare_write() and block_write_full_page().
+ * These functions should only try to map a single block at a time.
*
* Since this function doesn't do block allocations even if the caller
* requests it by passing in create=1, it is critically important that
* any caller checks to make sure that any buffer heads are returned
* by this function are either all already mapped or marked for
- * delayed allocation before calling nobh_writepage() or
- * block_write_full_page(). Otherwise, b_blocknr could be left
- * unitialized, and the page write functions will be taken by
- * surprise.
+ * delayed allocation before calling block_write_full_page(). Otherwise,
+ * b_blocknr could be left unitialized, and the page write functions will
+ * be taken by surprise.
*/
static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
@@ -2749,9 +2777,7 @@ static int ext4_writepage(struct page *page,
return __ext4_journalled_writepage(page, len);
}
- if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
- ret = nobh_writepage(page, noalloc_get_block_write, wbc);
- else if (page_bufs && buffer_uninit(page_bufs)) {
+ if (page_bufs && buffer_uninit(page_bufs)) {
ext4_set_bh_endio(page_bufs, inode);
ret = block_write_full_page_endio(page, noalloc_get_block_write,
wbc, ext4_end_io_buffer_write);
@@ -3146,13 +3172,10 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
int ret, retries = 0;
struct page *page;
pgoff_t index;
- unsigned from, to;
struct inode *inode = mapping->host;
handle_t *handle;
index = pos >> PAGE_CACHE_SHIFT;
- from = pos & (PAGE_CACHE_SIZE - 1);
- to = from + len;
if (ext4_nonda_switch(inode->i_sb)) {
*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
@@ -3185,8 +3208,7 @@ retry:
}
*pagep = page;
- ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ext4_da_get_block_prep);
+ ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
@@ -3545,15 +3567,24 @@ static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
retry:
if (rw == READ && ext4_should_dioread_nolock(inode))
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
+ ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
- ext4_get_block, NULL);
- else
+ ext4_get_block, NULL, NULL, 0);
+ else {
ret = blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL);
+
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+ }
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
@@ -3668,6 +3699,8 @@ static int ext4_end_io_nolock(ext4_io_end_t *io)
return ret;
}
+ if (io->iocb)
+ aio_complete(io->iocb, io->result, 0);
/* clear the DIO AIO unwritten flag */
io->flag = 0;
return ret;
@@ -3767,6 +3800,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
io->offset = 0;
io->size = 0;
io->page = NULL;
+ io->iocb = NULL;
+ io->result = 0;
INIT_WORK(&io->work, ext4_end_io_work);
INIT_LIST_HEAD(&io->list);
}
@@ -3775,7 +3810,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
- ssize_t size, void *private)
+ ssize_t size, void *private, int ret,
+ bool is_async)
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
@@ -3784,7 +3820,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
- return;
+ goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p"
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
@@ -3795,12 +3831,18 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
if (io_end->flag != EXT4_IO_UNWRITTEN){
ext4_free_io_end(io_end);
iocb->private = NULL;
+out:
+ if (is_async)
+ aio_complete(iocb, ret, 0);
return;
}
io_end->offset = offset;
io_end->size = size;
- io_end->flag = EXT4_IO_UNWRITTEN;
+ if (is_async) {
+ io_end->iocb = iocb;
+ io_end->result = ret;
+ }
wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
/* queue the work to convert unwritten extents to written */
@@ -3937,7 +3979,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
return -ENOMEM;
/*
* we save the io structure for current async
- * direct IO, so that later ext4_get_blocks()
+ * direct IO, so that later ext4_map_blocks()
* could flag the io structure whether there
* is a unwritten extents needs to be converted
* when IO is completed.
@@ -4128,17 +4170,6 @@ int ext4_block_truncate_page(handle_t *handle,
length = blocksize - (offset & (blocksize - 1));
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- /*
- * For "nobh" option, we can only work if we don't need to
- * read-in the page - otherwise we create buffers to do the IO.
- */
- if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
- ext4_should_writeback_data(inode) && PageUptodate(page)) {
- zero_user(page, offset, length);
- set_page_dirty(page);
- goto unlock;
- }
-
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
@@ -4488,9 +4519,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* (should be rare).
*/
if (!bh) {
- EXT4_ERROR_INODE(inode,
- "Read failure block=%llu",
- (unsigned long long) nr);
+ EXT4_ERROR_INODE_BLOCK(inode, nr,
+ "Read failure");
continue;
}
@@ -4502,27 +4532,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
depth);
/*
- * We've probably journalled the indirect block several
- * times during the truncate. But it's no longer
- * needed and we now drop it from the transaction via
- * jbd2_journal_revoke().
- *
- * That's easy if it's exclusively part of this
- * transaction. But if it's part of the committing
- * transaction then jbd2_journal_forget() will simply
- * brelse() it. That means that if the underlying
- * block is reallocated in ext4_get_block(),
- * unmap_underlying_metadata() will find this block
- * and will try to get rid of it. damn, damn.
- *
- * If this block has already been committed to the
- * journal, a revoke record will be written. And
- * revoke records must be emitted *before* clearing
- * this block's bit in the bitmaps.
- */
- ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
-
- /*
* Everything below this this pointer has been
* released. Now let this top-of-subtree go.
*
@@ -4546,8 +4555,20 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
blocks_for_truncate(inode));
}
+ /*
+ * The forget flag here is critical because if
+ * we are journaling (and not doing data
+ * journaling), we have to make sure a revoke
+ * record is written to prevent the journal
+ * replay from overwriting the (former)
+ * indirect block if it gets reallocated as a
+ * data block. This must happen in the same
+ * transaction where the data blocks are
+ * actually freed.
+ */
ext4_free_blocks(handle, inode, 0, nr, 1,
- EXT4_FREE_BLOCKS_METADATA);
+ EXT4_FREE_BLOCKS_METADATA|
+ EXT4_FREE_BLOCKS_FORGET);
if (parent_bh) {
/*
@@ -4805,8 +4826,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
bh = sb_getblk(sb, block);
if (!bh) {
- EXT4_ERROR_INODE(inode, "unable to read inode block - "
- "block %llu", block);
+ EXT4_ERROR_INODE_BLOCK(inode, block,
+ "unable to read itable block");
return -EIO;
}
if (!buffer_uptodate(bh)) {
@@ -4904,8 +4925,8 @@ make_io:
submit_bh(READ_META, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
- EXT4_ERROR_INODE(inode, "unable to read inode "
- "block %llu", block);
+ EXT4_ERROR_INODE_BLOCK(inode, block,
+ "unable to read itable block");
brelse(bh);
return -EIO;
}
@@ -4942,20 +4963,26 @@ void ext4_set_inode_flags(struct inode *inode)
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
void ext4_get_inode_flags(struct ext4_inode_info *ei)
{
- unsigned int flags = ei->vfs_inode.i_flags;
-
- ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
- EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
- if (flags & S_SYNC)
- ei->i_flags |= EXT4_SYNC_FL;
- if (flags & S_APPEND)
- ei->i_flags |= EXT4_APPEND_FL;
- if (flags & S_IMMUTABLE)
- ei->i_flags |= EXT4_IMMUTABLE_FL;
- if (flags & S_NOATIME)
- ei->i_flags |= EXT4_NOATIME_FL;
- if (flags & S_DIRSYNC)
- ei->i_flags |= EXT4_DIRSYNC_FL;
+ unsigned int vfs_fl;
+ unsigned long old_fl, new_fl;
+
+ do {
+ vfs_fl = ei->vfs_inode.i_flags;
+ old_fl = ei->i_flags;
+ new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+ EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
+ EXT4_DIRSYNC_FL);
+ if (vfs_fl & S_SYNC)
+ new_fl |= EXT4_SYNC_FL;
+ if (vfs_fl & S_APPEND)
+ new_fl |= EXT4_APPEND_FL;
+ if (vfs_fl & S_IMMUTABLE)
+ new_fl |= EXT4_IMMUTABLE_FL;
+ if (vfs_fl & S_NOATIME)
+ new_fl |= EXT4_NOATIME_FL;
+ if (vfs_fl & S_DIRSYNC)
+ new_fl |= EXT4_DIRSYNC_FL;
+ } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
}
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
@@ -4970,7 +4997,7 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
/* we are using combined 48 bit field */
i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
le32_to_cpu(raw_inode->i_blocks_lo);
- if (ei->i_flags & EXT4_HUGE_FILE_FL) {
+ if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
/* i_blocks represent file system block size */
return i_blocks << (inode->i_blkbits - 9);
} else {
@@ -5066,7 +5093,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
transaction_t *transaction;
tid_t tid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_running_transaction)
transaction = journal->j_running_transaction;
else
@@ -5075,7 +5102,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
tid = transaction->t_tid;
else
tid = journal->j_commit_sequence;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
ei->i_sync_tid = tid;
ei->i_datasync_tid = tid;
}
@@ -5120,7 +5147,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ei->i_file_acl);
ret = -EIO;
goto bad_inode;
- } else if (ei->i_flags & EXT4_EXTENTS_FL) {
+ } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
(S_ISLNK(inode->i_mode) &&
!ext4_inode_is_fast_symlink(inode)))
@@ -5191,7 +5218,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = 0;
- ei->i_flags &= ~EXT4_HUGE_FILE_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
return 0;
}
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
@@ -5204,9 +5231,9 @@ static int ext4_inode_blocks_set(handle_t *handle,
*/
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
- ei->i_flags &= ~EXT4_HUGE_FILE_FL;
+ ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
} else {
- ei->i_flags |= EXT4_HUGE_FILE_FL;
+ ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
/* i_block is stored in file system block size */
i_blocks = i_blocks >> (inode->i_blkbits - 9);
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
@@ -5400,9 +5427,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode == WB_SYNC_ALL)
sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
- EXT4_ERROR_INODE(inode,
- "IO error syncing inode (block=%llu)",
- (unsigned long long) iloc.bh->b_blocknr);
+ EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
+ "IO error syncing inode");
err = -EIO;
}
brelse(iloc.bh);
@@ -5477,10 +5503,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- if (attr->ia_size > sbi->s_bitmap_maxbytes) {
- error = -EFBIG;
- goto err_out;
- }
+ if (attr->ia_size > sbi->s_bitmap_maxbytes)
+ return -EFBIG;
}
}
@@ -5523,11 +5547,19 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ext4_truncate(inode);
}
- rc = inode_setattr(inode, attr);
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode))
+ rc = vmtruncate(inode, attr->ia_size);
+
+ if (!rc) {
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ }
- /* If inode_setattr's call to ext4_truncate failed to get a
- * transaction handle at all, we need to clean up the in-core
- * orphan list manually. */
+ /*
+ * If the call to ext4_truncate failed to get a transaction handle at
+ * all, we need to clean up the in-core orphan list manually.
+ */
if (inode->i_nlink)
ext4_orphan_del(NULL, inode);
@@ -5682,7 +5714,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
* Calculate the journal credits for a chunk of data modification.
*
* This is called from DIO, fallocate or whoever calling
- * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
+ * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
*
* journal buffers for data blocks are not included here, as DIO
* and fallocate do no need to journal data buffers.
@@ -5748,7 +5780,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
{
struct ext4_inode *raw_inode;
struct ext4_xattr_ibody_header *header;
- struct ext4_xattr_entry *entry;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
return 0;
@@ -5756,7 +5787,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
raw_inode = ext4_raw_inode(&iloc);
header = IHDR(inode, raw_inode);
- entry = IFIRST(header);
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 12b3bc026a6..4b4ad4b7ce5 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -446,10 +446,11 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += first + i;
ext4_grp_locked_error(sb, e4b->bd_group,
- __func__, "double-free of inode"
- " %lu's block %llu(bit %u in group %u)",
- inode ? inode->i_ino : 0, blocknr,
- first + i, e4b->bd_group);
+ inode ? inode->i_ino : 0,
+ blocknr,
+ "freeing block already freed "
+ "(bit %u)",
+ first + i);
}
mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
}
@@ -712,9 +713,9 @@ void ext4_mb_generate_buddy(struct super_block *sb,
grp->bb_fragments = fragments;
if (free != grp->bb_free) {
- ext4_grp_locked_error(sb, group, __func__,
- "EXT4-fs: group %u: %u blocks in bitmap, %u in gd",
- group, free, grp->bb_free);
+ ext4_grp_locked_error(sb, group, 0, 0,
+ "%u blocks in bitmap, %u in gd",
+ free, grp->bb_free);
/*
* If we intent to continue, we consider group descritor
* corrupt and update bb_free using bitmap value
@@ -1296,10 +1297,10 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += block;
ext4_grp_locked_error(sb, e4b->bd_group,
- __func__, "double-free of inode"
- " %lu's block %llu(bit %u in group %u)",
- inode ? inode->i_ino : 0, blocknr, block,
- e4b->bd_group);
+ inode ? inode->i_ino : 0,
+ blocknr,
+ "freeing already freed block "
+ "(bit %u)", block);
}
mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
e4b->bd_info->bb_counters[order]++;
@@ -1788,8 +1789,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
* free blocks even though group info says we
* we have free blocks
*/
- ext4_grp_locked_error(sb, e4b->bd_group,
- __func__, "%d free blocks as per "
+ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
+ "%d free blocks as per "
"group info. But bitmap says 0",
free);
break;
@@ -1798,8 +1799,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
BUG_ON(ex.fe_len <= 0);
if (free < ex.fe_len) {
- ext4_grp_locked_error(sb, e4b->bd_group,
- __func__, "%d free blocks as per "
+ ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
+ "%d free blocks as per "
"group info. But got %d blocks",
free, ex.fe_len);
/*
@@ -1821,8 +1822,7 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
/*
* This is a special case for storages like raid5
- * we try to find stripe-aligned chunks for stripe-size requests
- * XXX should do so at least for multiples of stripe size as well
+ * we try to find stripe-aligned chunks for stripe-size-multiple requests
*/
static noinline_for_stack
void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
@@ -1999,7 +1999,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ext4_group_t ngroups, group, i;
int cr;
int err = 0;
- int bsbits;
struct ext4_sb_info *sbi;
struct super_block *sb;
struct ext4_buddy e4b;
@@ -2041,8 +2040,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ac->ac_2order = i - 1;
}
- bsbits = ac->ac_sb->s_blocksize_bits;
-
/* if stream allocation is enabled, use global goal */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
/* TBD: may be hot point */
@@ -2094,8 +2091,8 @@ repeat:
ac->ac_groups_scanned++;
if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
- else if (cr == 1 &&
- ac->ac_g_ex.fe_len == sbi->s_stripe)
+ else if (cr == 1 && sbi->s_stripe &&
+ !(ac->ac_g_ex.fe_len % sbi->s_stripe))
ext4_mb_scan_aligned(ac, &e4b);
else
ext4_mb_complex_scan_group(ac, &e4b);
@@ -2221,7 +2218,7 @@ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
rc = seq_open(file, &ext4_mb_seq_groups_ops);
if (rc == 0) {
- struct seq_file *m = (struct seq_file *)file->private_data;
+ struct seq_file *m = file->private_data;
m->private = sb;
}
return rc;
@@ -2560,6 +2557,22 @@ int ext4_mb_release(struct super_block *sb)
return 0;
}
+static inline void ext4_issue_discard(struct super_block *sb,
+ ext4_group_t block_group, ext4_grpblk_t block, int count)
+{
+ int ret;
+ ext4_fsblk_t discard_block;
+
+ discard_block = block + ext4_group_first_block_no(sb, block_group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long) discard_block, count);
+ ret = sb_issue_discard(sb, discard_block, count);
+ if (ret == EOPNOTSUPP) {
+ ext4_warning(sb, "discard not supported, disabling");
+ clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+ }
+}
+
/*
* This function is called by the jbd2 layer once the commit has finished,
* so we know we can free the blocks that were released with that commit.
@@ -2579,22 +2592,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
- if (test_opt(sb, DISCARD)) {
- int ret;
- ext4_fsblk_t discard_block;
-
- discard_block = entry->start_blk +
- ext4_group_first_block_no(sb, entry->group);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- ret = sb_issue_discard(sb, discard_block, entry->count);
- if (ret == EOPNOTSUPP) {
- ext4_warning(sb,
- "discard not supported, disabling");
- clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
- }
- }
+ if (test_opt(sb, DISCARD))
+ ext4_issue_discard(sb, entry->group,
+ entry->start_blk, entry->count);
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
@@ -2704,7 +2704,7 @@ void exit_ext4_mballoc(void)
/*
- * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
+ * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
* Returns 0 if success or error code
*/
static noinline_for_stack int
@@ -2712,7 +2712,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
handle_t *handle, unsigned int reserv_blks)
{
struct buffer_head *bitmap_bh = NULL;
- struct ext4_super_block *es;
struct ext4_group_desc *gdp;
struct buffer_head *gdp_bh;
struct ext4_sb_info *sbi;
@@ -2725,8 +2724,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
sb = ac->ac_sb;
sbi = EXT4_SB(sb);
- es = sbi->s_es;
-
err = -EIO;
bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
@@ -2812,7 +2809,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
out_err:
- sb->s_dirt = 1;
+ ext4_mark_super_dirty(sb);
brelse(bitmap_bh);
return err;
}
@@ -2850,7 +2847,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
int bsbits, max;
ext4_lblk_t end;
loff_t size, orig_size, start_off;
- ext4_lblk_t start, orig_start;
+ ext4_lblk_t start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_prealloc_space *pa;
@@ -2881,6 +2878,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
+ orig_size = size;
/* max size of free chunks */
max = 2 << bsbits;
@@ -2922,8 +2920,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
size = ac->ac_o_ex.fe_len << bsbits;
}
- orig_size = size = size >> bsbits;
- orig_start = start = start_off >> bsbits;
+ size = size >> bsbits;
+ start = start_off >> bsbits;
/* don't cover already allocated blocks in selected range */
if (ar->pleft && start <= ar->lleft) {
@@ -3547,7 +3545,6 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
ext4_group_t group;
ext4_grpblk_t bit;
unsigned long long grp_blk_start;
- sector_t start;
int err = 0;
int free = 0;
@@ -3567,10 +3564,9 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (bit >= end)
break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
- start = ext4_group_first_block_no(sb, group) + bit;
mb_debug(1, " free preallocated %u/%u in group %u\n",
- (unsigned) start, (unsigned) next - bit,
- (unsigned) group);
+ (unsigned) ext4_group_first_block_no(sb, group) + bit,
+ (unsigned) next - bit, (unsigned) group);
free += next - bit;
if (ac) {
@@ -3581,7 +3577,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
trace_ext4_mballoc_discard(ac);
}
- trace_ext4_mb_release_inode_pa(ac, pa, grp_blk_start + bit,
+ trace_ext4_mb_release_inode_pa(sb, ac, pa, grp_blk_start + bit,
next - bit);
mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
bit = next + 1;
@@ -3591,8 +3587,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
pa, (unsigned long) pa->pa_lstart,
(unsigned long) pa->pa_pstart,
(unsigned long) pa->pa_len);
- ext4_grp_locked_error(sb, group,
- __func__, "free %u, pa_free %u",
+ ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
free, pa->pa_free);
/*
* pa is already deleted so we use the value obtained
@@ -3613,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
ext4_group_t group;
ext4_grpblk_t bit;
- trace_ext4_mb_release_group_pa(ac, pa);
+ trace_ext4_mb_release_group_pa(sb, ac, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
@@ -3889,6 +3884,9 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
struct super_block *sb = ac->ac_sb;
ext4_group_t ngroups, i;
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ return;
+
printk(KERN_ERR "EXT4-fs: Can't allocate:"
" Allocation context details:\n");
printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
@@ -4255,7 +4253,7 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
* to usual allocation
*/
ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
- struct ext4_allocation_request *ar, int *errp)
+ struct ext4_allocation_request *ar, int *errp)
{
int freed;
struct ext4_allocation_context *ac = NULL;
@@ -4299,7 +4297,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
inquota = ar->len;
if (ar->len == 0) {
*errp = -EDQUOT;
- goto out3;
+ goto out;
}
}
@@ -4307,13 +4305,13 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
if (!ac) {
ar->len = 0;
*errp = -ENOMEM;
- goto out1;
+ goto out;
}
*errp = ext4_mb_initialize_context(ac, ar);
if (*errp) {
ar->len = 0;
- goto out2;
+ goto out;
}
ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
@@ -4322,7 +4320,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ext4_mb_normalize_request(ac, ar);
repeat:
/* allocate space in core */
- ext4_mb_regular_allocator(ac);
+ *errp = ext4_mb_regular_allocator(ac);
+ if (*errp)
+ goto errout;
/* as we've just preallocated more space than
* user requested orinally, we store allocated
@@ -4333,7 +4333,7 @@ repeat:
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
- if (*errp == -EAGAIN) {
+ if (*errp == -EAGAIN) {
/*
* drop the reference that we took
* in ext4_mb_use_best_found
@@ -4344,12 +4344,10 @@ repeat:
ac->ac_b_ex.fe_len = 0;
ac->ac_status = AC_STATUS_CONTINUE;
goto repeat;
- } else if (*errp) {
+ } else if (*errp)
+ errout:
ext4_discard_allocated_blocks(ac);
- ac->ac_b_ex.fe_len = 0;
- ar->len = 0;
- ext4_mb_show_ac(ac);
- } else {
+ else {
block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
ar->len = ac->ac_b_ex.fe_len;
}
@@ -4358,19 +4356,19 @@ repeat:
if (freed)
goto repeat;
*errp = -ENOSPC;
+ }
+
+ if (*errp) {
ac->ac_b_ex.fe_len = 0;
ar->len = 0;
ext4_mb_show_ac(ac);
}
-
ext4_mb_release_context(ac);
-
-out2:
- kmem_cache_free(ext4_ac_cachep, ac);
-out1:
+out:
+ if (ac)
+ kmem_cache_free(ext4_ac_cachep, ac);
if (inquota && ar->len < inquota)
dquot_free_block(ar->inode, inquota - ar->len);
-out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
/* release all the reserved blocks if non delalloc */
@@ -4402,6 +4400,7 @@ static noinline_for_stack int
ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
struct ext4_free_data *new_entry)
{
+ ext4_group_t group = e4b->bd_group;
ext4_grpblk_t block;
struct ext4_free_data *entry;
struct ext4_group_info *db = e4b->bd_info;
@@ -4434,9 +4433,9 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
else if (block >= (entry->start_blk + entry->count))
n = &(*n)->rb_right;
else {
- ext4_grp_locked_error(sb, e4b->bd_group, __func__,
- "Double free of blocks %d (%d %d)",
- block, entry->start_blk, entry->count);
+ ext4_grp_locked_error(sb, group, 0,
+ ext4_group_first_block_no(sb, group) + block,
+ "Block already on to-be-freed list");
return 0;
}
}
@@ -4494,7 +4493,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct super_block *sb = inode->i_sb;
struct ext4_allocation_context *ac = NULL;
struct ext4_group_desc *gdp;
- struct ext4_super_block *es;
unsigned long freed = 0;
unsigned int overflow;
ext4_grpblk_t bit;
@@ -4513,7 +4511,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
}
sbi = EXT4_SB(sb);
- es = EXT4_SB(sb)->s_es;
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
!ext4_data_block_valid(sbi, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - "
@@ -4647,6 +4644,8 @@ do_more:
mb_clear_bits(bitmap_bh->b_data, bit, count);
mb_free_blocks(inode, &e4b, bit, count);
ext4_mb_return_to_preallocation(inode, &e4b, block, count);
+ if (test_opt(sb, DISCARD))
+ ext4_issue_discard(sb, block_group, bit, count);
}
ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4680,7 +4679,7 @@ do_more:
put_bh(bitmap_bh);
goto do_more;
}
- sb->s_dirt = 1;
+ ext4_mark_super_dirty(sb);
error_return:
if (freed)
dquot_free_block(inode, freed);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 6f3a27ec30b..1765c2c50a9 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -376,7 +376,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* We have the extent map build with the tmp inode.
* Now copy the i_data across
*/
- ei->i_flags |= EXT4_EXTENTS_FL;
+ ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS);
memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
/*
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 3a6c92ac131..5f1ed9fc913 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -148,17 +148,17 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
*/
static int
mext_check_null_inode(struct inode *inode1, struct inode *inode2,
- const char *function)
+ const char *function, unsigned int line)
{
int ret = 0;
if (inode1 == NULL) {
- __ext4_error(inode2->i_sb, function,
+ __ext4_error(inode2->i_sb, function, line,
"Both inodes should not be NULL: "
"inode1 NULL inode2 %lu", inode2->i_ino);
ret = -EIO;
} else if (inode2 == NULL) {
- __ext4_error(inode1->i_sb, function,
+ __ext4_error(inode1->i_sb, function, line,
"Both inodes should not be NULL: "
"inode1 %lu inode2 NULL", inode1->i_ino);
ret = -EIO;
@@ -960,6 +960,9 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}
+ if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode))
+ return -EPERM;
+
/* Ext4 move extent does not support swapfile */
if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
@@ -1081,7 +1084,7 @@ mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
BUG_ON(inode1 == NULL && inode2 == NULL);
- ret = mext_check_null_inode(inode1, inode2, __func__);
+ ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
if (ret < 0)
goto out;
@@ -1118,7 +1121,7 @@ mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
BUG_ON(inode1 == NULL && inode2 == NULL);
- ret = mext_check_null_inode(inode1, inode2, __func__);
+ ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
if (ret < 0)
goto out;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a43e6617b35..314c0d3b3fa 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -179,30 +179,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode);
-unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
-{
- unsigned len = le16_to_cpu(dlen);
-
- if (len == EXT4_MAX_REC_LEN || len == 0)
- return blocksize;
- return (len & 65532) | ((len & 3) << 16);
-}
-
-__le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
-{
- if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
- BUG();
- if (len < 65536)
- return cpu_to_le16(len);
- if (len == blocksize) {
- if (blocksize == 65536)
- return cpu_to_le16(EXT4_MAX_REC_LEN);
- else
- return cpu_to_le16(0);
- }
- return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
-}
-
/*
* p is at least 6 bytes before the end of page
*/
@@ -605,7 +581,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
dir->i_sb->s_blocksize -
EXT4_DIR_REC_LEN(0));
for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
- if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+ if (!ext4_check_dir_entry(dir, de, bh,
(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+((char *)de - bh->b_data))) {
/* On error, skip the f_pos to the next block. */
@@ -844,8 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh,
if ((char *) de + namelen <= dlimit &&
ext4_match (namelen, name, de)) {
/* found a match - just to be sure, do a full check */
- if (!ext4_check_dir_entry("ext4_find_entry",
- dir, de, bh, offset))
+ if (!ext4_check_dir_entry(dir, de, bh, offset))
return -1;
*res_dir = de;
return 1;
@@ -1019,7 +994,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
int off = (block << EXT4_BLOCK_SIZE_BITS(sb))
+ ((char *) de - bh->b_data);
- if (!ext4_check_dir_entry(__func__, dir, de, bh, off)) {
+ if (!ext4_check_dir_entry(dir, de, bh, off)) {
brelse(bh);
*err = ERR_BAD_DX_DIR;
goto errout;
@@ -1088,7 +1063,6 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
struct dentry *ext4_get_parent(struct dentry *child)
{
__u32 ino;
- struct inode *inode;
static const struct qstr dotdot = {
.name = "..",
.len = 2,
@@ -1097,7 +1071,6 @@ struct dentry *ext4_get_parent(struct dentry *child)
struct buffer_head *bh;
bh = ext4_find_entry(child->d_inode, &dotdot, &de);
- inode = NULL;
if (!bh)
return ERR_PTR(-ENOENT);
ino = le32_to_cpu(de->inode);
@@ -1305,8 +1278,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
de = (struct ext4_dir_entry_2 *)bh->b_data;
top = bh->b_data + blocksize - reclen;
while ((char *) de <= top) {
- if (!ext4_check_dir_entry("ext4_add_entry", dir, de,
- bh, offset))
+ if (!ext4_check_dir_entry(dir, de, bh, offset))
return -EIO;
if (ext4_match(namelen, name, de))
return -EEXIST;
@@ -1673,7 +1645,7 @@ static int ext4_delete_entry(handle_t *handle,
pde = NULL;
de = (struct ext4_dir_entry_2 *) bh->b_data;
while (i < bh->b_size) {
- if (!ext4_check_dir_entry("ext4_delete_entry", dir, de, bh, i))
+ if (!ext4_check_dir_entry(dir, de, bh, i))
return -EIO;
if (de == de_del) {
BUFFER_TRACE(bh, "get_write_access");
@@ -1956,7 +1928,7 @@ static int empty_dir(struct inode *inode)
}
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
- if (!ext4_check_dir_entry("empty_dir", inode, de, bh, offset)) {
+ if (!ext4_check_dir_entry(inode, de, bh, offset)) {
de = (struct ext4_dir_entry_2 *)(bh->b_data +
sb->s_blocksize);
offset = (offset | (sb->s_blocksize - 1)) + 1;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 6df797eb9ae..ca5c8aa00a2 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -921,8 +921,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
&sbi->s_flex_groups[flex_group].free_inodes);
}
- ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
- sb->s_dirt = 1;
+ ext4_handle_dirty_super(handle, sb);
exit_journal:
mutex_unlock(&sbi->s_resize_lock);
@@ -953,7 +952,6 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_fsblk_t n_blocks_count)
{
ext4_fsblk_t o_blocks_count;
- ext4_group_t o_groups_count;
ext4_grpblk_t last;
ext4_grpblk_t add;
struct buffer_head *bh;
@@ -965,7 +963,6 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
* yet: we're going to revalidate es->s_blocks_count after
* taking the s_resize_lock below. */
o_blocks_count = ext4_blocks_count(es);
- o_groups_count = EXT4_SB(sb)->s_groups_count;
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n",
@@ -1045,13 +1042,12 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
goto exit_put;
}
ext4_blocks_count_set(es, o_blocks_count + add);
- ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
- sb->s_dirt = 1;
mutex_unlock(&EXT4_SB(sb)->s_resize_lock);
ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
/* We add the blocks to the bitmap and set the group need init bit */
ext4_add_groupblocks(handle, sb, o_blocks_count, add);
+ ext4_handle_dirty_super(handle, sb);
ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
o_blocks_count + add);
if ((err = ext4_journal_stop(handle)))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 4e8983a9811..26147746c27 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -241,14 +241,14 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
/* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly. */
journal = EXT4_SB(sb)->s_journal;
if (journal) {
if (is_journal_aborted(journal)) {
- ext4_abort(sb, __func__, "Detected aborted journal");
+ ext4_abort(sb, "Detected aborted journal");
return ERR_PTR(-EROFS);
}
return jbd2_journal_start(journal, nblocks);
@@ -262,7 +262,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
* that sync() will call the filesystem's write_super callback if
* appropriate.
*/
-int __ext4_journal_stop(const char *where, handle_t *handle)
+int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
int err;
@@ -279,12 +279,13 @@ int __ext4_journal_stop(const char *where, handle_t *handle)
if (!err)
err = rc;
if (err)
- __ext4_std_error(sb, where, err);
+ __ext4_std_error(sb, where, line, err);
return err;
}
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
- struct buffer_head *bh, handle_t *handle, int err)
+void ext4_journal_abort_handle(const char *caller, unsigned int line,
+ const char *err_fn, struct buffer_head *bh,
+ handle_t *handle, int err)
{
char nbuf[16];
const char *errstr = ext4_decode_error(NULL, err, nbuf);
@@ -300,12 +301,47 @@ void ext4_journal_abort_handle(const char *caller, const char *err_fn,
if (is_handle_aborted(handle))
return;
- printk(KERN_ERR "%s: aborting transaction: %s in %s\n",
- caller, errstr, err_fn);
+ printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n",
+ caller, line, errstr, err_fn);
jbd2_journal_abort_handle(handle);
}
+static void __save_error_info(struct super_block *sb, const char *func,
+ unsigned int line)
+{
+ struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+ EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+ es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+ es->s_last_error_time = cpu_to_le32(get_seconds());
+ strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
+ es->s_last_error_line = cpu_to_le32(line);
+ if (!es->s_first_error_time) {
+ es->s_first_error_time = es->s_last_error_time;
+ strncpy(es->s_first_error_func, func,
+ sizeof(es->s_first_error_func));
+ es->s_first_error_line = cpu_to_le32(line);
+ es->s_first_error_ino = es->s_last_error_ino;
+ es->s_first_error_block = es->s_last_error_block;
+ }
+ /*
+ * Start the daily error reporting function if it hasn't been
+ * started already
+ */
+ if (!es->s_error_count)
+ mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
+ es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
+}
+
+static void save_error_info(struct super_block *sb, const char *func,
+ unsigned int line)
+{
+ __save_error_info(sb, func, line);
+ ext4_commit_super(sb, 1);
+}
+
+
/* Deal with the reporting of failure conditions on a filesystem such as
* inconsistencies detected or read IO failures.
*
@@ -323,11 +359,6 @@ void ext4_journal_abort_handle(const char *caller, const char *err_fn,
static void ext4_handle_error(struct super_block *sb)
{
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
- es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-
if (sb->s_flags & MS_RDONLY)
return;
@@ -342,19 +373,19 @@ static void ext4_handle_error(struct super_block *sb)
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
sb->s_flags |= MS_RDONLY;
}
- ext4_commit_super(sb, 1);
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs (device %s): panic forced after error\n",
sb->s_id);
}
void __ext4_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
+ unsigned int line, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
+ sb->s_id, function, line, current->comm);
vprintk(fmt, args);
printk("\n");
va_end(args);
@@ -362,14 +393,22 @@ void __ext4_error(struct super_block *sb, const char *function,
ext4_handle_error(sb);
}
-void ext4_error_inode(const char *function, struct inode *inode,
+void ext4_error_inode(struct inode *inode, const char *function,
+ unsigned int line, ext4_fsblk_t block,
const char *fmt, ...)
{
va_list args;
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+ es->s_last_error_block = cpu_to_le64(block);
+ save_error_info(inode->i_sb, function, line);
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s: inode #%lu: (comm %s) ",
- inode->i_sb->s_id, function, inode->i_ino, current->comm);
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
+ inode->i_sb->s_id, function, line, inode->i_ino);
+ if (block)
+ printk("block %llu: ", block);
+ printk("comm %s: ", current->comm);
vprintk(fmt, args);
printk("\n");
va_end(args);
@@ -377,20 +416,26 @@ void ext4_error_inode(const char *function, struct inode *inode,
ext4_handle_error(inode->i_sb);
}
-void ext4_error_file(const char *function, struct file *file,
- const char *fmt, ...)
+void ext4_error_file(struct file *file, const char *function,
+ unsigned int line, const char *fmt, ...)
{
va_list args;
+ struct ext4_super_block *es;
struct inode *inode = file->f_dentry->d_inode;
char pathname[80], *path;
+ es = EXT4_SB(inode->i_sb)->s_es;
+ es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+ save_error_info(inode->i_sb, function, line);
va_start(args, fmt);
path = d_path(&(file->f_path), pathname, sizeof(pathname));
if (!path)
path = "(unknown)";
printk(KERN_CRIT
- "EXT4-fs error (device %s): %s: inode #%lu (comm %s path %s): ",
- inode->i_sb->s_id, function, inode->i_ino, current->comm, path);
+ "EXT4-fs error (device %s): %s:%d: inode #%lu "
+ "(comm %s path %s): ",
+ inode->i_sb->s_id, function, line, inode->i_ino,
+ current->comm, path);
vprintk(fmt, args);
printk("\n");
va_end(args);
@@ -435,7 +480,8 @@ static const char *ext4_decode_error(struct super_block *sb, int errno,
/* __ext4_std_error decodes expected errors from journaling functions
* automatically and invokes the appropriate error response. */
-void __ext4_std_error(struct super_block *sb, const char *function, int errno)
+void __ext4_std_error(struct super_block *sb, const char *function,
+ unsigned int line, int errno)
{
char nbuf[16];
const char *errstr;
@@ -448,8 +494,9 @@ void __ext4_std_error(struct super_block *sb, const char *function, int errno)
return;
errstr = ext4_decode_error(sb, errno, nbuf);
- printk(KERN_CRIT "EXT4-fs error (device %s) in %s: %s\n",
- sb->s_id, function, errstr);
+ printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+ sb->s_id, function, line, errstr);
+ save_error_info(sb, function, line);
ext4_handle_error(sb);
}
@@ -464,29 +511,29 @@ void __ext4_std_error(struct super_block *sb, const char *function, int errno)
* case we take the easy way out and panic immediately.
*/
-void ext4_abort(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void __ext4_abort(struct super_block *sb, const char *function,
+ unsigned int line, const char *fmt, ...)
{
va_list args;
+ save_error_info(sb, function, line);
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
+ function, line);
vprintk(fmt, args);
printk("\n");
va_end(args);
+ if ((sb->s_flags & MS_RDONLY) == 0) {
+ ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
+ sb->s_flags |= MS_RDONLY;
+ EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+ if (EXT4_SB(sb)->s_journal)
+ jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+ save_error_info(sb, function, line);
+ }
if (test_opt(sb, ERRORS_PANIC))
panic("EXT4-fs panic from previous error\n");
-
- if (sb->s_flags & MS_RDONLY)
- return;
-
- ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
- EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
- sb->s_flags |= MS_RDONLY;
- EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
- if (EXT4_SB(sb)->s_journal)
- jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
}
void ext4_msg (struct super_block * sb, const char *prefix,
@@ -502,38 +549,47 @@ void ext4_msg (struct super_block * sb, const char *prefix,
}
void __ext4_warning(struct super_block *sb, const char *function,
- const char *fmt, ...)
+ unsigned int line, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- printk(KERN_WARNING "EXT4-fs warning (device %s): %s: ",
- sb->s_id, function);
+ printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
+ sb->s_id, function, line);
vprintk(fmt, args);
printk("\n");
va_end(args);
}
-void ext4_grp_locked_error(struct super_block *sb, ext4_group_t grp,
- const char *function, const char *fmt, ...)
+void __ext4_grp_locked_error(const char *function, unsigned int line,
+ struct super_block *sb, ext4_group_t grp,
+ unsigned long ino, ext4_fsblk_t block,
+ const char *fmt, ...)
__releases(bitlock)
__acquires(bitlock)
{
va_list args;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ es->s_last_error_ino = cpu_to_le32(ino);
+ es->s_last_error_block = cpu_to_le64(block);
+ __save_error_info(sb, function, line);
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
+ sb->s_id, function, line, grp);
+ if (ino)
+ printk("inode %lu: ", ino);
+ if (block)
+ printk("block %llu:", (unsigned long long) block);
vprintk(fmt, args);
printk("\n");
va_end(args);
if (test_opt(sb, ERRORS_CONT)) {
- EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
- es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
ext4_commit_super(sb, 0);
return;
}
+
ext4_unlock_group(sb, grp);
ext4_handle_error(sb);
/*
@@ -660,8 +716,7 @@ static void ext4_put_super(struct super_block *sb)
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
if (err < 0)
- ext4_abort(sb, __func__,
- "Couldn't clean up the journal");
+ ext4_abort(sb, "Couldn't clean up the journal");
}
ext4_release_system_zone(sb);
@@ -813,8 +868,10 @@ static void destroy_inodecache(void)
kmem_cache_destroy(ext4_inode_cachep);
}
-static void ext4_clear_inode(struct inode *inode)
+void ext4_clear_inode(struct inode *inode)
{
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
if (EXT4_JOURNAL(inode))
@@ -946,14 +1003,12 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, ",journal_async_commit");
else if (test_opt(sb, JOURNAL_CHECKSUM))
seq_puts(seq, ",journal_checksum");
- if (test_opt(sb, NOBH))
- seq_puts(seq, ",nobh");
if (test_opt(sb, I_VERSION))
seq_puts(seq, ",i_version");
- if (!test_opt(sb, DELALLOC))
+ if (!test_opt(sb, DELALLOC) &&
+ !(def_mount_opts & EXT4_DEFM_NODELALLOC))
seq_puts(seq, ",nodelalloc");
-
if (sbi->s_stripe)
seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
/*
@@ -977,7 +1032,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, NO_AUTO_DA_ALLOC))
seq_puts(seq, ",noauto_da_alloc");
- if (test_opt(sb, DISCARD))
+ if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD))
seq_puts(seq, ",discard");
if (test_opt(sb, NOLOAD))
@@ -986,6 +1041,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
if (test_opt(sb, DIOREAD_NOLOCK))
seq_puts(seq, ",dioread_nolock");
+ if (test_opt(sb, BLOCK_VALIDITY) &&
+ !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
+ seq_puts(seq, ",block_validity");
+
ext4_show_quota_options(seq, sb);
return 0;
@@ -1065,6 +1124,7 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
char *path);
+static int ext4_quota_off(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
@@ -1086,7 +1146,7 @@ static const struct dquot_operations ext4_quota_operations = {
static const struct quotactl_ops ext4_qctl_operations = {
.quota_on = ext4_quota_on,
- .quota_off = dquot_quota_off,
+ .quota_off = ext4_quota_off,
.quota_sync = dquot_quota_sync,
.get_info = dquot_get_dqinfo,
.set_info = dquot_set_dqinfo,
@@ -1100,14 +1160,13 @@ static const struct super_operations ext4_sops = {
.destroy_inode = ext4_destroy_inode,
.write_inode = ext4_write_inode,
.dirty_inode = ext4_dirty_inode,
- .delete_inode = ext4_delete_inode,
+ .evict_inode = ext4_evict_inode,
.put_super = ext4_put_super,
.sync_fs = ext4_sync_fs,
.freeze_fs = ext4_freeze,
.unfreeze_fs = ext4_unfreeze,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
- .clear_inode = ext4_clear_inode,
.show_options = ext4_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext4_quota_read,
@@ -1121,12 +1180,11 @@ static const struct super_operations ext4_nojournal_sops = {
.destroy_inode = ext4_destroy_inode,
.write_inode = ext4_write_inode,
.dirty_inode = ext4_dirty_inode,
- .delete_inode = ext4_delete_inode,
+ .evict_inode = ext4_evict_inode,
.write_super = ext4_write_super,
.put_super = ext4_put_super,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
- .clear_inode = ext4_clear_inode,
.show_options = ext4_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext4_quota_read,
@@ -1624,10 +1682,12 @@ set_qf_format:
*n_blocks_count = option;
break;
case Opt_nobh:
- set_opt(sbi->s_mount_opt, NOBH);
+ ext4_msg(sb, KERN_WARNING,
+ "Ignoring deprecated nobh option");
break;
case Opt_bh:
- clear_opt(sbi->s_mount_opt, NOBH);
+ ext4_msg(sb, KERN_WARNING,
+ "Ignoring deprecated bh option");
break;
case Opt_i_version:
set_opt(sbi->s_mount_opt, I_VERSION);
@@ -2249,6 +2309,8 @@ static ssize_t session_write_kbytes_show(struct ext4_attr *a,
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%lu\n",
(part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
sbi->s_sectors_written_start) >> 1);
@@ -2259,6 +2321,8 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
{
struct super_block *sb = sbi->s_buddy_cache->i_sb;
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
@@ -2431,6 +2495,53 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 1;
}
+/*
+ * This function is called once a day if we have errors logged
+ * on the file system
+ */
+static void print_daily_error_info(unsigned long arg)
+{
+ struct super_block *sb = (struct super_block *) arg;
+ struct ext4_sb_info *sbi;
+ struct ext4_super_block *es;
+
+ sbi = EXT4_SB(sb);
+ es = sbi->s_es;
+
+ if (es->s_error_count)
+ ext4_msg(sb, KERN_NOTICE, "error count: %u",
+ le32_to_cpu(es->s_error_count));
+ if (es->s_first_error_time) {
+ printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
+ sb->s_id, le32_to_cpu(es->s_first_error_time),
+ (int) sizeof(es->s_first_error_func),
+ es->s_first_error_func,
+ le32_to_cpu(es->s_first_error_line));
+ if (es->s_first_error_ino)
+ printk(": inode %u",
+ le32_to_cpu(es->s_first_error_ino));
+ if (es->s_first_error_block)
+ printk(": block %llu", (unsigned long long)
+ le64_to_cpu(es->s_first_error_block));
+ printk("\n");
+ }
+ if (es->s_last_error_time) {
+ printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
+ sb->s_id, le32_to_cpu(es->s_last_error_time),
+ (int) sizeof(es->s_last_error_func),
+ es->s_last_error_func,
+ le32_to_cpu(es->s_last_error_line));
+ if (es->s_last_error_ino)
+ printk(": inode %u",
+ le32_to_cpu(es->s_last_error_ino));
+ if (es->s_last_error_block)
+ printk(": block %llu", (unsigned long long)
+ le64_to_cpu(es->s_last_error_block));
+ printk("\n");
+ }
+ mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
+}
+
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
__releases(kernel_lock)
__acquires(kernel_lock)
@@ -2448,7 +2559,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root;
char *cp;
const char *descr;
- int ret = -EINVAL;
+ int ret = -ENOMEM;
int blocksize;
unsigned int db_count;
unsigned int i;
@@ -2459,13 +2570,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- return -ENOMEM;
+ goto out_free_orig;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
- return -ENOMEM;
+ goto out_free_orig;
}
sb->s_fs_info = sbi;
sbi->s_mount_opt = 0;
@@ -2473,8 +2584,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_resgid = EXT4_DEF_RESGID;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block;
- sbi->s_sectors_written_start = part_stat_read(sb->s_bdev->bd_part,
- sectors[1]);
+ if (sb->s_bdev->bd_part)
+ sbi->s_sectors_written_start =
+ part_stat_read(sb->s_bdev->bd_part, sectors[1]);
unlock_kernel();
@@ -2482,6 +2594,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
*cp = '!';
+ ret = -EINVAL;
blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
if (!blocksize) {
ext4_msg(sb, KERN_ERR, "unable to set blocksize");
@@ -2546,6 +2659,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
else
set_opt(sbi->s_mount_opt, ERRORS_RO);
+ if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
+ set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+ if (def_mount_opts & EXT4_DEFM_DISCARD)
+ set_opt(sbi->s_mount_opt, DISCARD);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -2553,15 +2670,23 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
- set_opt(sbi->s_mount_opt, BARRIER);
+ if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
+ set_opt(sbi->s_mount_opt, BARRIER);
/*
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
- if (!IS_EXT3_SB(sb))
+ if (!IS_EXT3_SB(sb) &&
+ ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
set_opt(sbi->s_mount_opt, DELALLOC);
+ if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
+ &journal_devnum, &journal_ioprio, NULL, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ sbi->s_es->s_mount_opts);
+ }
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
goto failed_mount;
@@ -2912,18 +3037,7 @@ no_journal:
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount_wq;
}
- if (test_opt(sb, NOBH)) {
- if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
- ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
- "its supported only with writeback mode");
- clear_opt(sbi->s_mount_opt, NOBH);
- }
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- ext4_msg(sb, KERN_WARNING, "dioread_nolock option is "
- "not supported with nobh mode");
- goto failed_mount_wq;
- }
- }
+
EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
if (!EXT4_SB(sb)->dio_unwritten_wq) {
printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3010,7 +3124,7 @@ no_journal:
ext4_ext_init(sb);
err = ext4_mb_init(sb, needs_recovery);
if (err) {
- ext4_msg(sb, KERN_ERR, "failed to initalize mballoc (%d)",
+ ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
err);
goto failed_mount4;
}
@@ -3043,7 +3157,14 @@ no_journal:
descr = "out journal";
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s", descr, orig_data);
+ "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
+
+ init_timer(&sbi->s_err_report);
+ sbi->s_err_report.function = print_daily_error_info;
+ sbi->s_err_report.data = (unsigned long) sb;
+ if (es->s_error_count)
+ mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
lock_kernel();
kfree(orig_data);
@@ -3093,6 +3214,7 @@ out_fail:
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
lock_kernel();
+out_free_orig:
kfree(orig_data);
return ret;
}
@@ -3110,7 +3232,7 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
journal->j_min_batch_time = sbi->s_min_batch_time;
journal->j_max_batch_time = sbi->s_max_batch_time;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (test_opt(sb, BARRIER))
journal->j_flags |= JBD2_BARRIER;
else
@@ -3119,7 +3241,7 @@ static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
else
journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
static journal_t *ext4_get_journal(struct super_block *sb,
@@ -3327,8 +3449,17 @@ static int ext4_load_journal(struct super_block *sb,
if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER))
err = jbd2_journal_wipe(journal, !really_read_only);
- if (!err)
+ if (!err) {
+ char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
+ if (save)
+ memcpy(save, ((char *) es) +
+ EXT4_S_ERR_START, EXT4_S_ERR_LEN);
err = jbd2_journal_load(journal);
+ if (save)
+ memcpy(((char *) es) + EXT4_S_ERR_START,
+ save, EXT4_S_ERR_LEN);
+ kfree(save);
+ }
if (err) {
ext4_msg(sb, KERN_ERR, "error loading journal");
@@ -3384,10 +3515,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
*/
if (!(sb->s_flags & MS_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
- es->s_kbytes_written =
- cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+ if (sb->s_bdev->bd_part)
+ es->s_kbytes_written =
+ cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
+ else
+ es->s_kbytes_written =
+ cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeblocks_counter));
es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
@@ -3491,7 +3626,7 @@ int ext4_force_commit(struct super_block *sb)
journal = EXT4_SB(sb)->s_journal;
if (journal) {
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
ret = ext4_journal_force_commit(journal);
}
@@ -3616,7 +3751,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
- ext4_abort(sb, __func__, "Abort forced by user");
+ ext4_abort(sb, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -3981,6 +4116,18 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
return err;
}
+static int ext4_quota_off(struct super_block *sb, int type)
+{
+ /* Force all delayed allocation blocks to be allocated */
+ if (test_opt(sb, DELALLOC)) {
+ down_read(&sb->s_umount);
+ sync_filesystem(sb);
+ up_read(&sb->s_umount);
+ }
+
+ return dquot_quota_off(sb, type);
+}
+
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and noone else should touch the files)
@@ -4030,7 +4177,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
int err = 0;
int offset = off & (sb->s_blocksize - 1);
- int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL;
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@@ -4055,24 +4201,16 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
bh = ext4_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
- if (journal_quota) {
- err = ext4_journal_get_write_access(handle, bh);
- if (err) {
- brelse(bh);
- goto out;
- }
+ err = ext4_journal_get_write_access(handle, bh);
+ if (err) {
+ brelse(bh);
+ goto out;
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, len);
flush_dcache_page(bh->b_page);
unlock_buffer(bh);
- if (journal_quota)
- err = ext4_handle_dirty_metadata(handle, NULL, bh);
- else {
- /* Always do at least ordered writes for quotas */
- err = ext4_jbd2_file_inode(handle, inode);
- mark_buffer_dirty(bh);
- }
+ err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
out:
if (err) {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 04338009793..3a8cd8dff1a 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -458,8 +458,7 @@ static void ext4_xattr_update_super_block(handle_t *handle,
if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
- sb->s_dirt = 1;
- ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+ ext4_handle_dirty_super(handle, sb);
}
}
@@ -1418,7 +1417,7 @@ ext4_xattr_cache_insert(struct buffer_head *bh)
ea_bdebug(bh, "out of memory");
return;
}
- error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+ error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
if (error) {
mb_cache_entry_free(ce);
if (error == -EBUSY) {
@@ -1490,8 +1489,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
- ce = mb_cache_entry_find_first(ext4_xattr_cache, 0,
- inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
+ hash);
while (ce) {
struct buffer_head *bh;
@@ -1515,7 +1514,7 @@ again:
return bh;
}
brelse(bh);
- ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+ ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
}
return NULL;
}
@@ -1591,9 +1590,7 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
int __init
init_ext4_xattr(void)
{
- ext4_xattr_cache = mb_cache_create("ext4_xattr", NULL,
- sizeof(struct mb_cache_entry) +
- sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+ ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
if (!ext4_xattr_cache)
return -ENOMEM;
return 0;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 27ac2572595..d75a77f85c2 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -306,7 +306,6 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
extern const struct file_operations fat_file_operations;
extern const struct inode_operations fat_file_inode_operations;
extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
-extern int fat_setsize(struct inode *inode, loff_t offset);
extern void fat_truncate_blocks(struct inode *inode, loff_t offset);
extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 990dfae022e..7257752b6d5 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -364,18 +364,6 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
return 0;
}
-int fat_setsize(struct inode *inode, loff_t offset)
-{
- int error;
-
- error = simple_setsize(inode, offset);
- if (error)
- return error;
- fat_truncate_blocks(inode, offset);
-
- return error;
-}
-
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
/* valid file mode bits */
#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
@@ -387,21 +375,6 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
unsigned int ia_valid;
int error;
- /*
- * Expand the file. Since inode_setattr() updates ->i_size
- * before calling the ->truncate(), but FAT needs to fill the
- * hole before it. XXX: this is no longer true with new truncate
- * sequence.
- */
- if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size > inode->i_size) {
- error = fat_cont_expand(inode, attr->ia_size);
- if (error || attr->ia_valid == ATTR_SIZE)
- goto out;
- attr->ia_valid &= ~ATTR_SIZE;
- }
- }
-
/* Check for setting the inode time. */
ia_valid = attr->ia_valid;
if (ia_valid & TIMES_SET_FLAGS) {
@@ -417,6 +390,21 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
goto out;
}
+ /*
+ * Expand the file. Since inode_setattr() updates ->i_size
+ * before calling the ->truncate(), but FAT needs to fill the
+ * hole before it. XXX: this is no longer true with new truncate
+ * sequence.
+ */
+ if (attr->ia_valid & ATTR_SIZE) {
+ if (attr->ia_size > inode->i_size) {
+ error = fat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ goto out;
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+ }
+
if (((attr->ia_valid & ATTR_UID) &&
(attr->ia_uid != sbi->options.fs_uid)) ||
((attr->ia_valid & ATTR_GID) &&
@@ -441,12 +429,11 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- error = fat_setsize(inode, attr->ia_size);
- if (error)
- goto out;
+ truncate_setsize(inode, attr->ia_size);
+ fat_truncate_blocks(inode, attr->ia_size);
}
- generic_setattr(inode, attr);
+ setattr_copy(inode, attr);
mark_inode_dirty(inode);
out:
return error;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 7bf45aee56d..830058057d3 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -159,7 +159,7 @@ static int fat_write_begin(struct file *file, struct address_space *mapping,
int err;
*pagep = NULL;
- err = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
+ err = cont_write_begin(file, mapping, pos, len, flags,
pagep, fsdata, fat_get_block,
&MSDOS_I(mapping->host)->mmu_private);
if (err < 0)
@@ -212,8 +212,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
- ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev,
- iov, offset, nr_segs, fat_get_block, NULL);
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
+ iov, offset, nr_segs, fat_get_block, NULL);
if (ret < 0 && (rw & WRITE))
fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
@@ -263,7 +263,7 @@ static const struct address_space_operations fat_aops = {
* check if the location is still valid and retry if it
* isn't. Otherwise we do changes.
* 5. Spinlock is used to protect hash/unhash/location check/lookup
- * 6. fat_clear_inode() unhashes the F-d-c entry.
+ * 6. fat_evict_inode() unhashes the F-d-c entry.
* 7. lookup() and readdir() do igrab() if they find a F-d-c entry
* and consider negative result as cache miss.
*/
@@ -448,16 +448,15 @@ out:
EXPORT_SYMBOL_GPL(fat_build_inode);
-static void fat_delete_inode(struct inode *inode)
+static void fat_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- inode->i_size = 0;
- fat_truncate_blocks(inode, 0);
- clear_inode(inode);
-}
-
-static void fat_clear_inode(struct inode *inode)
-{
+ if (!inode->i_nlink) {
+ inode->i_size = 0;
+ fat_truncate_blocks(inode, 0);
+ }
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
fat_cache_inval_inode(inode);
fat_detach(inode);
}
@@ -674,12 +673,11 @@ static const struct super_operations fat_sops = {
.alloc_inode = fat_alloc_inode,
.destroy_inode = fat_destroy_inode,
.write_inode = fat_write_inode,
- .delete_inode = fat_delete_inode,
+ .evict_inode = fat_evict_inode,
.put_super = fat_put_super,
.write_super = fat_write_super,
.sync_fs = fat_sync_fs,
.statfs = fat_statfs,
- .clear_inode = fat_clear_inode,
.remount_fs = fat_remount,
.show_options = fat_show_options,
diff --git a/fs/fcntl.c b/fs/fcntl.c
index f74d270ba15..6769fd0f35b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -274,7 +274,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
ret = copy_from_user(&owner, owner_p, sizeof(owner));
if (ret)
- return ret;
+ return -EFAULT;
switch (owner.type) {
case F_OWNER_TID:
@@ -332,8 +332,11 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
}
read_unlock(&filp->f_owner.lock);
- if (!ret)
+ if (!ret) {
ret = copy_to_user(owner_p, &owner, sizeof(owner));
+ if (ret)
+ ret = -EFAULT;
+ }
return ret;
}
@@ -730,12 +733,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
struct fown_struct *fown;
+ unsigned long flags;
+
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!\n");
return;
}
- spin_lock(&fa->fa_lock);
+ spin_lock_irqsave(&fa->fa_lock, flags);
if (fa->fa_file) {
fown = &fa->fa_file->f_owner;
/* Don't send SIGURG to processes which have not set a
@@ -744,7 +749,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
}
- spin_unlock(&fa->fa_lock);
+ spin_unlock_irqrestore(&fa->fa_lock, flags);
fa = rcu_dereference(fa->fa_next);
}
}
@@ -762,11 +767,22 @@ void kill_fasync(struct fasync_struct **fp, int sig, int band)
}
EXPORT_SYMBOL(kill_fasync);
-static int __init fasync_init(void)
+static int __init fcntl_init(void)
{
+ /* please add new bits here to ensure allocation uniqueness */
+ BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ O_RDONLY | O_WRONLY | O_RDWR |
+ O_CREAT | O_EXCL | O_NOCTTY |
+ O_TRUNC | O_APPEND | O_NONBLOCK |
+ __O_SYNC | O_DSYNC | FASYNC |
+ O_DIRECT | O_LARGEFILE | O_DIRECTORY |
+ O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
+ FMODE_EXEC
+ ));
+
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
return 0;
}
-module_init(fasync_init)
+module_init(fcntl_init)
diff --git a/fs/file.c b/fs/file.c
index 34bb7f71d99..0be344755c0 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -39,28 +39,27 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */
*/
static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
-static inline void * alloc_fdmem(unsigned int size)
+static inline void *alloc_fdmem(unsigned int size)
{
- if (size <= PAGE_SIZE)
- return kmalloc(size, GFP_KERNEL);
- else
- return vmalloc(size);
+ void *data;
+
+ data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
+ if (data != NULL)
+ return data;
+
+ return vmalloc(size);
}
-static inline void free_fdarr(struct fdtable *fdt)
+static void free_fdmem(void *ptr)
{
- if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *)))
- kfree(fdt->fd);
- else
- vfree(fdt->fd);
+ is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr);
}
-static inline void free_fdset(struct fdtable *fdt)
+static void __free_fdtable(struct fdtable *fdt)
{
- if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2))
- kfree(fdt->open_fds);
- else
- vfree(fdt->open_fds);
+ free_fdmem(fdt->fd);
+ free_fdmem(fdt->open_fds);
+ kfree(fdt);
}
static void free_fdtable_work(struct work_struct *work)
@@ -75,9 +74,8 @@ static void free_fdtable_work(struct work_struct *work)
spin_unlock_bh(&f->lock);
while(fdt) {
struct fdtable *next = fdt->next;
- vfree(fdt->fd);
- free_fdset(fdt);
- kfree(fdt);
+
+ __free_fdtable(fdt);
fdt = next;
}
}
@@ -98,7 +96,7 @@ void free_fdtable_rcu(struct rcu_head *rcu)
container_of(fdt, struct files_struct, fdtab));
return;
}
- if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) {
+ if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) {
kfree(fdt->fd);
kfree(fdt->open_fds);
kfree(fdt);
@@ -178,13 +176,12 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
fdt->open_fds = (fd_set *)data;
data += nr / BITS_PER_BYTE;
fdt->close_on_exec = (fd_set *)data;
- INIT_RCU_HEAD(&fdt->rcu);
fdt->next = NULL;
return fdt;
out_arr:
- free_fdarr(fdt);
+ free_fdmem(fdt->fd);
out_fdt:
kfree(fdt);
out:
@@ -214,9 +211,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
* caller and alloc_fdtable(). Cheaper to catch it here...
*/
if (unlikely(new_fdt->max_fds <= nr)) {
- free_fdarr(new_fdt);
- free_fdset(new_fdt);
- kfree(new_fdt);
+ __free_fdtable(new_fdt);
return -EMFILE;
}
/*
@@ -232,9 +227,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
free_fdtable(cur_fdt);
} else {
/* Somebody else expanded, so undo our attempt */
- free_fdarr(new_fdt);
- free_fdset(new_fdt);
- kfree(new_fdt);
+ __free_fdtable(new_fdt);
}
return 1;
}
@@ -312,7 +305,6 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
new_fdt->fd = &newf->fd_array[0];
- INIT_RCU_HEAD(&new_fdt->rcu);
new_fdt->next = NULL;
spin_lock(&oldf->file_lock);
@@ -325,11 +317,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
while (unlikely(open_files > new_fdt->max_fds)) {
spin_unlock(&oldf->file_lock);
- if (new_fdt != &newf->fdtab) {
- free_fdarr(new_fdt);
- free_fdset(new_fdt);
- kfree(new_fdt);
- }
+ if (new_fdt != &newf->fdtab)
+ __free_fdtable(new_fdt);
new_fdt = alloc_fdtable(open_files - 1);
if (!new_fdt) {
@@ -339,9 +328,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
/* beyond sysctl_nr_open; nothing to do */
if (unlikely(new_fdt->max_fds < open_files)) {
- free_fdarr(new_fdt);
- free_fdset(new_fdt);
- kfree(new_fdt);
+ __free_fdtable(new_fdt);
*errorp = -EMFILE;
goto out_release;
}
@@ -430,7 +417,6 @@ struct files_struct init_files = {
.fd = &init_files.fd_array[0],
.close_on_exec = (fd_set *)&init_files.close_on_exec_init,
.open_fds = (fd_set *)&init_files.open_fds_init,
- .rcu = RCU_HEAD_INIT,
},
.file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
};
diff --git a/fs/file_table.c b/fs/file_table.c
index 5c7d10ead4a..edecd36fed9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -289,11 +289,20 @@ struct file *fget(unsigned int fd)
EXPORT_SYMBOL(fget);
/*
- * Lightweight file lookup - no refcnt increment if fd table isn't shared.
- * You can use this only if it is guranteed that the current task already
- * holds a refcnt to that file. That check has to be done at fget() only
- * and a flag is returned to be passed to the corresponding fput_light().
- * There must not be a cloning between an fget_light/fput_light pair.
+ * Lightweight file lookup - no refcnt increment if fd table isn't shared.
+ *
+ * You can use this instead of fget if you satisfy all of the following
+ * conditions:
+ * 1) You must call fput_light before exiting the syscall and returning control
+ * to userspace (i.e. you cannot remember the returned struct file * after
+ * returning to userspace).
+ * 2) You must not call filp_close on the returned struct file * in between
+ * calls to fget_light and fput_light.
+ * 3) You must not clone the current task in between the calls to fget_light
+ * and fput_light.
+ *
+ * The fput_needed flag returned by fget_light should be passed to the
+ * corresponding fput_light.
*/
struct file *fget_light(unsigned int fd, int *fput_needed)
{
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index 50ab5eecb99..881aa3d217f 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -63,7 +63,7 @@ extern void vxfs_put_fake_inode(struct inode *);
extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t);
extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t);
extern struct inode * vxfs_iget(struct super_block *, ino_t);
-extern void vxfs_clear_inode(struct inode *);
+extern void vxfs_evict_inode(struct inode *);
/* vxfs_lookup.c */
extern const struct inode_operations vxfs_dir_inode_ops;
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 03a6ea5e99f..79d1b4ea13e 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -337,15 +337,17 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
}
/**
- * vxfs_clear_inode - remove inode from main memory
+ * vxfs_evict_inode - remove inode from main memory
* @ip: inode to discard.
*
* Description:
- * vxfs_clear_inode() is called on the final iput and frees the private
+ * vxfs_evict_inode() is called on the final iput and frees the private
* inode area.
*/
void
-vxfs_clear_inode(struct inode *ip)
+vxfs_evict_inode(struct inode *ip)
{
+ truncate_inode_pages(&ip->i_data, 0);
+ end_writeback(ip);
kmem_cache_free(vxfs_inode_cachep, ip->i_private);
}
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 1e8af939b3e..dc0c041e85c 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -61,7 +61,7 @@ static int vxfs_statfs(struct dentry *, struct kstatfs *);
static int vxfs_remount(struct super_block *, int *, char *);
static const struct super_operations vxfs_super_ops = {
- .clear_inode = vxfs_clear_inode,
+ .evict_inode = vxfs_evict_inode,
.put_super = vxfs_put_super,
.statfs = vxfs_statfs,
.remount_fs = vxfs_remount,
@@ -135,7 +135,7 @@ static int vxfs_remount(struct super_block *sb, int *flags, char *data)
}
/**
- * vxfs_read_super - read superblock into memory and initalize filesystem
+ * vxfs_read_super - read superblock into memory and initialize filesystem
* @sbp: VFS superblock (to fill)
* @dp: fs private mount data
* @silent: do not complain loudly when sth is wrong
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ea8592b9069..7d9d06ba184 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -26,63 +26,38 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
+#include <linux/tracepoint.h>
#include "internal.h"
-#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
-struct wb_writeback_args {
+struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
enum writeback_sync_modes sync_mode;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
- unsigned int sb_pinned:1;
-};
-/*
- * Work items for the bdi_writeback threads
- */
-struct bdi_work {
struct list_head list; /* pending work list */
- struct rcu_head rcu_head; /* for RCU free/clear of work */
-
- unsigned long seen; /* threads that have seen this work */
- atomic_t pending; /* number of threads still to do work */
-
- struct wb_writeback_args args; /* writeback arguments */
-
- unsigned long state; /* flag bits, see WS_* */
-};
-
-enum {
- WS_USED_B = 0,
- WS_ONSTACK_B,
+ struct completion *done; /* set if the caller waits */
};
-#define WS_USED (1 << WS_USED_B)
-#define WS_ONSTACK (1 << WS_ONSTACK_B)
+/*
+ * Include the creation of the trace points after defining the
+ * wb_writeback_work structure so that the definition remains local to this
+ * file.
+ */
+#define CREATE_TRACE_POINTS
+#include <trace/events/writeback.h>
-static inline bool bdi_work_on_stack(struct bdi_work *work)
-{
- return test_bit(WS_ONSTACK_B, &work->state);
-}
+#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
-static inline void bdi_work_init(struct bdi_work *work,
- struct wb_writeback_args *args)
-{
- INIT_RCU_HEAD(&work->rcu_head);
- work->args = *args;
- work->state = WS_USED;
-}
+/*
+ * We don't actually have pdflush, but this one is exported though /proc...
+ */
+int nr_pdflush_threads;
/**
* writeback_in_progress - determine whether there is writeback in progress
@@ -93,196 +68,84 @@ static inline void bdi_work_init(struct bdi_work *work,
*/
int writeback_in_progress(struct backing_dev_info *bdi)
{
- return !list_empty(&bdi->work_list);
-}
-
-static void bdi_work_clear(struct bdi_work *work)
-{
- clear_bit(WS_USED_B, &work->state);
- smp_mb__after_clear_bit();
- /*
- * work can have disappeared at this point. bit waitq functions
- * should be able to tolerate this, provided bdi_sched_wait does
- * not dereference it's pointer argument.
- */
- wake_up_bit(&work->state, WS_USED_B);
-}
-
-static void bdi_work_free(struct rcu_head *head)
-{
- struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
-
- if (!bdi_work_on_stack(work))
- kfree(work);
- else
- bdi_work_clear(work);
-}
-
-static void wb_work_complete(struct bdi_work *work)
-{
- const enum writeback_sync_modes sync_mode = work->args.sync_mode;
- int onstack = bdi_work_on_stack(work);
-
- /*
- * For allocated work, we can clear the done/seen bit right here.
- * For on-stack work, we need to postpone both the clear and free
- * to after the RCU grace period, since the stack could be invalidated
- * as soon as bdi_work_clear() has done the wakeup.
- */
- if (!onstack)
- bdi_work_clear(work);
- if (sync_mode == WB_SYNC_NONE || onstack)
- call_rcu(&work->rcu_head, bdi_work_free);
+ return test_bit(BDI_writeback_running, &bdi->state);
}
-static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
+static void bdi_queue_work(struct backing_dev_info *bdi,
+ struct wb_writeback_work *work)
{
- /*
- * The caller has retrieved the work arguments from this work,
- * drop our reference. If this is the last ref, delete and free it
- */
- if (atomic_dec_and_test(&work->pending)) {
- struct backing_dev_info *bdi = wb->bdi;
-
- spin_lock(&bdi->wb_lock);
- list_del_rcu(&work->list);
- spin_unlock(&bdi->wb_lock);
-
- wb_work_complete(work);
- }
-}
+ trace_writeback_queue(bdi, work);
-static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
-{
- work->seen = bdi->wb_mask;
- BUG_ON(!work->seen);
- atomic_set(&work->pending, bdi->wb_cnt);
- BUG_ON(!bdi->wb_cnt);
-
- /*
- * list_add_tail_rcu() contains the necessary barriers to
- * make sure the above stores are seen before the item is
- * noticed on the list
- */
- spin_lock(&bdi->wb_lock);
- list_add_tail_rcu(&work->list, &bdi->work_list);
- spin_unlock(&bdi->wb_lock);
-
- /*
- * If the default thread isn't there, make sure we add it. When
- * it gets created and wakes up, we'll run this work.
- */
- if (unlikely(list_empty_careful(&bdi->wb_list)))
+ spin_lock_bh(&bdi->wb_lock);
+ list_add_tail(&work->list, &bdi->work_list);
+ if (bdi->wb.task) {
+ wake_up_process(bdi->wb.task);
+ } else {
+ /*
+ * The bdi thread isn't there, wake up the forker thread which
+ * will create and run it.
+ */
+ trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
- else {
- struct bdi_writeback *wb = &bdi->wb;
-
- if (wb->task)
- wake_up_process(wb->task);
}
+ spin_unlock_bh(&bdi->wb_lock);
}
-/*
- * Used for on-stack allocated work items. The caller needs to wait until
- * the wb threads have acked the work before it's safe to continue.
- */
-static void bdi_wait_on_work_clear(struct bdi_work *work)
-{
- wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
- TASK_UNINTERRUPTIBLE);
-}
-
-static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
- struct wb_writeback_args *args,
- int wait)
+static void
+__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ bool range_cyclic, bool for_background)
{
- struct bdi_work *work;
+ struct wb_writeback_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- bdi_work_init(work, args);
- bdi_queue_work(bdi, work);
- if (wait)
- bdi_wait_on_work_clear(work);
- } else {
- struct bdi_writeback *wb = &bdi->wb;
-
- if (wb->task)
- wake_up_process(wb->task);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ if (bdi->wb.task) {
+ trace_writeback_nowork(bdi);
+ wake_up_process(bdi->wb.task);
+ }
+ return;
}
+
+ work->sync_mode = WB_SYNC_NONE;
+ work->nr_pages = nr_pages;
+ work->range_cyclic = range_cyclic;
+ work->for_background = for_background;
+
+ bdi_queue_work(bdi, work);
}
/**
- * bdi_sync_writeback - start and wait for writeback
+ * bdi_start_writeback - start writeback
* @bdi: the backing device to write from
- * @sb: write inodes from this super_block
+ * @nr_pages: the number of pages to write
*
* Description:
- * This does WB_SYNC_ALL data integrity writeback and waits for the
- * IO to complete. Callers must hold the sb s_umount semaphore for
- * reading, to avoid having the super disappear before we are done.
+ * This does WB_SYNC_NONE opportunistic writeback. The IO is only
+ * started when this function returns, we make no guarentees on
+ * completion. Caller need not hold sb s_umount semaphore.
+ *
*/
-static void bdi_sync_writeback(struct backing_dev_info *bdi,
- struct super_block *sb)
+void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
{
- struct wb_writeback_args args = {
- .sb = sb,
- .sync_mode = WB_SYNC_ALL,
- .nr_pages = LONG_MAX,
- .range_cyclic = 0,
- /*
- * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
- * lets make it explicitly clear.
- */
- .sb_pinned = 1,
- };
- struct bdi_work work;
-
- bdi_work_init(&work, &args);
- work.state |= WS_ONSTACK;
-
- bdi_queue_work(bdi, &work);
- bdi_wait_on_work_clear(&work);
+ __bdi_start_writeback(bdi, nr_pages, true, false);
}
/**
- * bdi_start_writeback - start writeback
+ * bdi_start_background_writeback - start background writeback
* @bdi: the backing device to write from
- * @sb: write inodes from this super_block
- * @nr_pages: the number of pages to write
- * @sb_locked: caller already holds sb umount sem.
*
* Description:
- * This does WB_SYNC_NONE opportunistic writeback. The IO is only
+ * This does WB_SYNC_NONE background writeback. The IO is only
* started when this function returns, we make no guarentees on
- * completion. Caller specifies whether sb umount sem is held already or not.
- *
+ * completion. Caller need not hold sb s_umount semaphore.
*/
-void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
- long nr_pages, int sb_locked)
+void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
- struct wb_writeback_args args = {
- .sb = sb,
- .sync_mode = WB_SYNC_NONE,
- .nr_pages = nr_pages,
- .range_cyclic = 1,
- .sb_pinned = sb_locked,
- };
-
- /*
- * We treat @nr_pages=0 as the special case to do background writeback,
- * ie. to sync pages until the background dirty threshold is reached.
- */
- if (!nr_pages) {
- args.nr_pages = LONG_MAX;
- args.for_background = 1;
- }
-
- bdi_alloc_queue_work(bdi, &args, sb_locked);
+ __bdi_start_writeback(bdi, LONG_MAX, true, true);
}
/*
@@ -386,10 +249,18 @@ static void move_expired_inodes(struct list_head *delaying_queue,
/*
* Queue all expired dirty inodes for io, eldest first.
+ * Before
+ * newly dirtied b_dirty b_io b_more_io
+ * =============> gf edc BA
+ * After
+ * newly dirtied b_dirty b_io b_more_io
+ * =============> g fBAedc
+ * |
+ * +--> dequeue for IO
*/
static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
{
- list_splice_init(&wb->b_more_io, wb->b_io.prev);
+ list_splice_init(&wb->b_more_io, &wb->b_io);
move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
}
@@ -499,63 +370,36 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
spin_lock(&inode_lock);
inode->i_state &= ~I_SYNC;
- if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
- if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
- /*
- * More pages get dirtied by a fast dirtier.
- */
- goto select_queue;
- } else if (inode->i_state & I_DIRTY) {
- /*
- * At least XFS will redirty the inode during the
- * writeback (delalloc) and on io completion (isize).
- */
- redirty_tail(inode);
- } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ if (!(inode->i_state & I_FREEING)) {
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
- * sometimes bales out without doing anything. Redirty
- * the inode; Move it from b_io onto b_more_io/b_dirty.
- */
- /*
- * akpm: if the caller was the kupdate function we put
- * this inode at the head of b_dirty so it gets first
- * consideration. Otherwise, move it to the tail, for
- * the reasons described there. I'm not really sure
- * how much sense this makes. Presumably I had a good
- * reasons for doing it this way, and I'd rather not
- * muck with it at present.
+ * sometimes bales out without doing anything.
*/
- if (wbc->for_kupdate) {
+ inode->i_state |= I_DIRTY_PAGES;
+ if (wbc->nr_to_write <= 0) {
/*
- * For the kupdate function we move the inode
- * to b_more_io so it will get more writeout as
- * soon as the queue becomes uncongested.
+ * slice used up: queue for next turn
*/
- inode->i_state |= I_DIRTY_PAGES;
-select_queue:
- if (wbc->nr_to_write <= 0) {
- /*
- * slice used up: queue for next turn
- */
- requeue_io(inode);
- } else {
- /*
- * somehow blocked: retry later
- */
- redirty_tail(inode);
- }
+ requeue_io(inode);
} else {
/*
- * Otherwise fully redirty the inode so that
- * other inodes on this superblock will get some
- * writeout. Otherwise heavy writing to one
- * file would indefinitely suspend writeout of
- * all the other files.
+ * Writeback blocked by something other than
+ * congestion. Delay the inode for some time to
+ * avoid spinning on the CPU (100% iowait)
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
*/
- inode->i_state |= I_DIRTY_PAGES;
redirty_tail(inode);
}
+ } else if (inode->i_state & I_DIRTY) {
+ /*
+ * Filesystems can dirty the inode during writeback
+ * operations, such as delayed allocation during
+ * submission or metadata updates after data IO
+ * completion.
+ */
+ redirty_tail(inode);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
@@ -572,75 +416,69 @@ select_queue:
return ret;
}
-static void unpin_sb_for_writeback(struct super_block *sb)
-{
- up_read(&sb->s_umount);
- put_super(sb);
-}
-
-enum sb_pin_state {
- SB_PINNED,
- SB_NOT_PINNED,
- SB_PIN_FAILED
-};
-
/*
- * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
+ * For background writeback the caller does not have the sb pinned
* before calling writeback. So make sure that we do pin it, so it doesn't
* go away while we are writing inodes from it.
*/
-static enum sb_pin_state pin_sb_for_writeback(struct writeback_control *wbc,
- struct super_block *sb)
+static bool pin_sb_for_writeback(struct super_block *sb)
{
- /*
- * Caller must already hold the ref for this
- */
- if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
- WARN_ON(!rwsem_is_locked(&sb->s_umount));
- return SB_NOT_PINNED;
- }
spin_lock(&sb_lock);
+ if (list_empty(&sb->s_instances)) {
+ spin_unlock(&sb_lock);
+ return false;
+ }
+
sb->s_count++;
+ spin_unlock(&sb_lock);
+
if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root) {
- spin_unlock(&sb_lock);
- return SB_PINNED;
- }
- /*
- * umounted, drop rwsem again and fall through to failure
- */
+ if (sb->s_root)
+ return true;
up_read(&sb->s_umount);
}
- sb->s_count--;
- spin_unlock(&sb_lock);
- return SB_PIN_FAILED;
+
+ put_super(sb);
+ return false;
}
/*
* Write a portion of b_io inodes which belong to @sb.
- * If @wbc->sb != NULL, then find and write all such
+ *
+ * If @only_this_sb is true, then find and write all such
* inodes. Otherwise write only ones which go sequentially
* in reverse order.
+ *
* Return 1, if the caller writeback routine should be
* interrupted. Otherwise return 0.
*/
-static int writeback_sb_inodes(struct super_block *sb,
- struct bdi_writeback *wb,
- struct writeback_control *wbc)
+static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
+ struct writeback_control *wbc, bool only_this_sb)
{
while (!list_empty(&wb->b_io)) {
long pages_skipped;
struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list);
- if (wbc->sb && sb != inode->i_sb) {
- /* super block given and doesn't
- match, skip this inode */
- redirty_tail(inode);
- continue;
- }
- if (sb != inode->i_sb)
- /* finish with this superblock */
+
+ if (inode->i_sb != sb) {
+ if (only_this_sb) {
+ /*
+ * We only want to write back data for this
+ * superblock, move all inodes not belonging
+ * to it back onto the dirty list.
+ */
+ redirty_tail(inode);
+ continue;
+ }
+
+ /*
+ * The inode belongs to a different superblock.
+ * Bounce back to the caller to unpin this and
+ * pin the next superblock.
+ */
return 0;
+ }
+
if (inode->i_state & (I_NEW | I_WILL_FREE)) {
requeue_io(inode);
continue;
@@ -652,7 +490,7 @@ static int writeback_sb_inodes(struct super_block *sb,
if (inode_dirtied_after(inode, wbc->wb_start))
return 1;
- BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
+ BUG_ON(inode->i_state & I_FREEING);
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
@@ -678,12 +516,13 @@ static int writeback_sb_inodes(struct super_block *sb,
return 1;
}
-static void writeback_inodes_wb(struct bdi_writeback *wb,
- struct writeback_control *wbc)
+void writeback_inodes_wb(struct bdi_writeback *wb,
+ struct writeback_control *wbc)
{
int ret = 0;
- wbc->wb_start = jiffies; /* livelock avoidance */
+ if (!wbc->wb_start)
+ wbc->wb_start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
@@ -692,24 +531,14 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list);
struct super_block *sb = inode->i_sb;
- enum sb_pin_state state;
-
- if (wbc->sb && sb != wbc->sb) {
- /* super block given and doesn't
- match, skip this inode */
- redirty_tail(inode);
- continue;
- }
- state = pin_sb_for_writeback(wbc, sb);
- if (state == SB_PIN_FAILED) {
+ if (!pin_sb_for_writeback(sb)) {
requeue_io(inode);
continue;
}
- ret = writeback_sb_inodes(sb, wb, wbc);
+ ret = writeback_sb_inodes(sb, wb, wbc, false);
+ drop_super(sb);
- if (state == SB_PINNED)
- unpin_sb_for_writeback(sb);
if (ret)
break;
}
@@ -717,11 +546,16 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
/* Leave any unwritten inodes on b_io */
}
-void writeback_inodes_wbc(struct writeback_control *wbc)
+static void __writeback_inodes_sb(struct super_block *sb,
+ struct bdi_writeback *wb, struct writeback_control *wbc)
{
- struct backing_dev_info *bdi = wbc->bdi;
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
- writeback_inodes_wb(&bdi->wb, wbc);
+ spin_lock(&inode_lock);
+ if (!wbc->for_kupdate || list_empty(&wb->b_io))
+ queue_io(wb, wbc->older_than_this);
+ writeback_sb_inodes(sb, wb, wbc, true);
+ spin_unlock(&inode_lock);
}
/*
@@ -737,7 +571,7 @@ static inline bool over_bground_thresh(void)
{
unsigned long background_thresh, dirty_thresh;
- get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+ global_dirty_limits(&background_thresh, &dirty_thresh);
return (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
@@ -759,17 +593,14 @@ static inline bool over_bground_thresh(void)
* all dirty pages if they are all attached to "old" mappings.
*/
static long wb_writeback(struct bdi_writeback *wb,
- struct wb_writeback_args *args)
+ struct wb_writeback_work *work)
{
struct writeback_control wbc = {
- .bdi = wb->bdi,
- .sb = args->sb,
- .sync_mode = args->sync_mode,
+ .sync_mode = work->sync_mode,
.older_than_this = NULL,
- .for_kupdate = args->for_kupdate,
- .for_background = args->for_background,
- .range_cyclic = args->range_cyclic,
- .sb_pinned = args->sb_pinned,
+ .for_kupdate = work->for_kupdate,
+ .for_background = work->for_background,
+ .range_cyclic = work->range_cyclic,
};
unsigned long oldest_jif;
long wrote = 0;
@@ -785,25 +616,33 @@ static long wb_writeback(struct bdi_writeback *wb,
wbc.range_end = LLONG_MAX;
}
+ wbc.wb_start = jiffies; /* livelock avoidance */
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
*/
- if (args->nr_pages <= 0)
+ if (work->nr_pages <= 0)
break;
/*
* For background writeout, stop when we are below the
* background dirty threshold
*/
- if (args->for_background && !over_bground_thresh())
+ if (work->for_background && !over_bground_thresh())
break;
wbc.more_io = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
- writeback_inodes_wb(wb, &wbc);
- args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+
+ trace_wbc_writeback_start(&wbc, wb->bdi);
+ if (work->sb)
+ __writeback_inodes_sb(work->sb, wb, &wbc);
+ else
+ writeback_inodes_wb(wb, &wbc);
+ trace_wbc_writeback_written(&wbc, wb->bdi);
+
+ work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
/*
@@ -830,6 +669,7 @@ static long wb_writeback(struct bdi_writeback *wb,
if (!list_empty(&wb->b_more_io)) {
inode = list_entry(wb->b_more_io.prev,
struct inode, i_list);
+ trace_wbc_writeback_wait(&wbc, wb->bdi);
inode_wait_for_writeback(inode);
}
spin_unlock(&inode_lock);
@@ -839,31 +679,21 @@ static long wb_writeback(struct bdi_writeback *wb,
}
/*
- * Return the next bdi_work struct that hasn't been processed by this
- * wb thread yet. ->seen is initially set for each thread that exists
- * for this device, when a thread first notices a piece of work it
- * clears its bit. Depending on writeback type, the thread will notify
- * completion on either receiving the work (WB_SYNC_NONE) or after
- * it is done (WB_SYNC_ALL).
+ * Return the next wb_writeback_work struct that hasn't been processed yet.
*/
-static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
- struct bdi_writeback *wb)
+static struct wb_writeback_work *
+get_next_work_item(struct backing_dev_info *bdi)
{
- struct bdi_work *work, *ret = NULL;
-
- rcu_read_lock();
+ struct wb_writeback_work *work = NULL;
- list_for_each_entry_rcu(work, &bdi->work_list, list) {
- if (!test_bit(wb->nr, &work->seen))
- continue;
- clear_bit(wb->nr, &work->seen);
-
- ret = work;
- break;
+ spin_lock_bh(&bdi->wb_lock);
+ if (!list_empty(&bdi->work_list)) {
+ work = list_entry(bdi->work_list.next,
+ struct wb_writeback_work, list);
+ list_del_init(&work->list);
}
-
- rcu_read_unlock();
- return ret;
+ spin_unlock_bh(&bdi->wb_lock);
+ return work;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb)
@@ -888,14 +718,14 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
if (nr_pages) {
- struct wb_writeback_args args = {
+ struct wb_writeback_work work = {
.nr_pages = nr_pages,
.sync_mode = WB_SYNC_NONE,
.for_kupdate = 1,
.range_cyclic = 1,
};
- return wb_writeback(wb, &args);
+ return wb_writeback(wb, &work);
}
return 0;
@@ -907,42 +737,37 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
{
struct backing_dev_info *bdi = wb->bdi;
- struct bdi_work *work;
+ struct wb_writeback_work *work;
long wrote = 0;
- while ((work = get_next_work_item(bdi, wb)) != NULL) {
- struct wb_writeback_args args = work->args;
- int post_clear;
-
+ set_bit(BDI_writeback_running, &wb->bdi->state);
+ while ((work = get_next_work_item(bdi)) != NULL) {
/*
* Override sync mode, in case we must wait for completion
+ * because this thread is exiting now.
*/
if (force_wait)
- work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
+ work->sync_mode = WB_SYNC_ALL;
- post_clear = WB_SYNC_ALL || args.sb_pinned;
-
- /*
- * If this isn't a data integrity operation, just notify
- * that we have seen this work and we are now starting it.
- */
- if (!post_clear)
- wb_clear_pending(wb, work);
+ trace_writeback_exec(bdi, work);
- wrote += wb_writeback(wb, &args);
+ wrote += wb_writeback(wb, work);
/*
- * This is a data integrity writeback, so only do the
- * notification when we have completed the work.
+ * Notify the caller of completion if this is a synchronous
+ * work item, otherwise just free it.
*/
- if (post_clear)
- wb_clear_pending(wb, work);
+ if (work->done)
+ complete(work->done);
+ else
+ kfree(work);
}
/*
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
+ clear_bit(BDI_writeback_running, &wb->bdi->state);
return wrote;
}
@@ -951,84 +776,88 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
* Handle writeback of dirty data for the device backed by this bdi. Also
* wakes up periodically and does kupdated style flushing.
*/
-int bdi_writeback_task(struct bdi_writeback *wb)
+int bdi_writeback_thread(void *data)
{
- unsigned long last_active = jiffies;
- unsigned long wait_jiffies = -1UL;
+ struct bdi_writeback *wb = data;
+ struct backing_dev_info *bdi = wb->bdi;
long pages_written;
+ current->flags |= PF_FLUSHER | PF_SWAPWRITE;
+ set_freezable();
+ wb->last_active = jiffies;
+
+ /*
+ * Our parent may run at a different priority, just set us to normal
+ */
+ set_user_nice(current, 0);
+
+ trace_writeback_thread_start(bdi);
+
while (!kthread_should_stop()) {
+ /*
+ * Remove own delayed wake-up timer, since we are already awake
+ * and we'll take care of the preriodic write-back.
+ */
+ del_timer(&wb->wakeup_timer);
+
pages_written = wb_do_writeback(wb, 0);
+ trace_writeback_pages_written(pages_written);
+
if (pages_written)
- last_active = jiffies;
- else if (wait_jiffies != -1UL) {
- unsigned long max_idle;
+ wb->last_active = jiffies;
- /*
- * Longest period of inactivity that we tolerate. If we
- * see dirty data again later, the task will get
- * recreated automatically.
- */
- max_idle = max(5UL * 60 * HZ, wait_jiffies);
- if (time_after(jiffies, max_idle + last_active))
- break;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!list_empty(&bdi->work_list)) {
+ __set_current_state(TASK_RUNNING);
+ continue;
}
- if (dirty_writeback_interval) {
- wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
- schedule_timeout_interruptible(wait_jiffies);
- } else {
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty_careful(&wb->bdi->work_list) &&
- !kthread_should_stop())
- schedule();
- __set_current_state(TASK_RUNNING);
+ if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+ schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
+ else {
+ /*
+ * We have nothing to do, so can go sleep without any
+ * timeout and save power. When a work is queued or
+ * something is made dirty - we will be woken up.
+ */
+ schedule();
}
try_to_freeze();
}
+ /* Flush any work that raced with us exiting */
+ if (!list_empty(&bdi->work_list))
+ wb_do_writeback(wb, 1);
+
+ trace_writeback_thread_stop(bdi);
return 0;
}
+
/*
- * Schedule writeback for all backing devices. This does WB_SYNC_NONE
- * writeback, for integrity writeback see bdi_sync_writeback().
+ * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
+ * the whole world.
*/
-static void bdi_writeback_all(struct super_block *sb, long nr_pages)
+void wakeup_flusher_threads(long nr_pages)
{
- struct wb_writeback_args args = {
- .sb = sb,
- .nr_pages = nr_pages,
- .sync_mode = WB_SYNC_NONE,
- };
struct backing_dev_info *bdi;
- rcu_read_lock();
+ if (!nr_pages) {
+ nr_pages = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS);
+ }
+ rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
-
- bdi_alloc_queue_work(bdi, &args, 0);
+ __bdi_start_writeback(bdi, nr_pages, false, false);
}
-
rcu_read_unlock();
}
-/*
- * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
- * the whole world.
- */
-void wakeup_flusher_threads(long nr_pages)
-{
- if (nr_pages == 0)
- nr_pages = global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS);
- bdi_writeback_all(NULL, nr_pages);
-}
-
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1081,6 +910,8 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
void __mark_inode_dirty(struct inode *inode, int flags)
{
struct super_block *sb = inode->i_sb;
+ struct backing_dev_info *bdi = NULL;
+ bool wakeup_bdi = false;
/*
* Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -1126,7 +957,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
if (hlist_unhashed(&inode->i_hash))
goto out;
}
- if (inode->i_state & (I_FREEING|I_CLEAR))
+ if (inode->i_state & I_FREEING)
goto out;
/*
@@ -1134,22 +965,31 @@ void __mark_inode_dirty(struct inode *inode, int flags)
* reposition it (that would break b_dirty time-ordering).
*/
if (!was_dirty) {
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- struct backing_dev_info *bdi = wb->bdi;
-
- if (bdi_cap_writeback_dirty(bdi) &&
- !test_bit(BDI_registered, &bdi->state)) {
- WARN_ON(1);
- printk(KERN_ERR "bdi-%s not registered\n",
- bdi->name);
+ bdi = inode_to_bdi(inode);
+
+ if (bdi_cap_writeback_dirty(bdi)) {
+ WARN(!test_bit(BDI_registered, &bdi->state),
+ "bdi-%s not registered\n", bdi->name);
+
+ /*
+ * If this is the first dirty inode for this
+ * bdi, we have to wake-up the corresponding
+ * bdi thread to make sure background
+ * write-back happens later.
+ */
+ if (!wb_has_dirty_io(&bdi->wb))
+ wakeup_bdi = true;
}
inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &wb->b_dirty);
+ list_move(&inode->i_list, &bdi->wb.b_dirty);
}
}
out:
spin_unlock(&inode_lock);
+
+ if (wakeup_bdi)
+ bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);
@@ -1192,7 +1032,7 @@ static void wait_sb_inodes(struct super_block *sb)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
struct address_space *mapping;
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
mapping = inode->i_mapping;
if (mapping->nrpages == 0)
@@ -1220,18 +1060,6 @@ static void wait_sb_inodes(struct super_block *sb)
iput(old_inode);
}
-static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
-{
- unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
- unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
- long nr_to_write;
-
- nr_to_write = nr_dirty + nr_unstable +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused);
-
- bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
-}
-
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
@@ -1243,21 +1071,24 @@ static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
*/
void writeback_inodes_sb(struct super_block *sb)
{
- __writeback_inodes_sb(sb, 0);
-}
-EXPORT_SYMBOL(writeback_inodes_sb);
+ unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+ unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
+ .sb = sb,
+ .sync_mode = WB_SYNC_NONE,
+ .done = &done,
+ };
-/**
- * writeback_inodes_sb_locked - writeback dirty inodes from given super_block
- * @sb: the superblock
- *
- * Like writeback_inodes_sb(), except the caller already holds the
- * sb umount sem.
- */
-void writeback_inodes_sb_locked(struct super_block *sb)
-{
- __writeback_inodes_sb(sb, 1);
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
+
+ work.nr_pages = nr_dirty + nr_unstable +
+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
}
+EXPORT_SYMBOL(writeback_inodes_sb);
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
@@ -1269,7 +1100,9 @@ void writeback_inodes_sb_locked(struct super_block *sb)
int writeback_inodes_sb_if_idle(struct super_block *sb)
{
if (!writeback_in_progress(sb->s_bdi)) {
+ down_read(&sb->s_umount);
writeback_inodes_sb(sb);
+ up_read(&sb->s_umount);
return 1;
} else
return 0;
@@ -1285,7 +1118,20 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
*/
void sync_inodes_sb(struct super_block *sb)
{
- bdi_sync_writeback(sb->s_bdi, sb);
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct wb_writeback_work work = {
+ .sb = sb,
+ .sync_mode = WB_SYNC_ALL,
+ .nr_pages = LONG_MAX,
+ .range_cyclic = 0,
+ .done = &done,
+ };
+
+ WARN_ON(!rwsem_is_locked(&sb->s_umount));
+
+ bdi_queue_work(sb->s_bdi, &work);
+ wait_for_completion(&done);
+
wait_sb_inodes(sb);
}
EXPORT_SYMBOL(sync_inodes_sb);
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index eee059052db..1ee40eb9a2c 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -106,12 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
fs->in_exec = 0;
rwlock_init(&fs->lock);
fs->umask = old->umask;
- read_lock(&old->lock);
- fs->root = old->root;
- path_get(&old->root);
- fs->pwd = old->pwd;
- path_get(&old->pwd);
- read_unlock(&old->lock);
+ get_fs_root_and_pwd(old, &fs->root, &fs->pwd);
}
return fs;
}
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index cc94bb9563f..3f6dfa98988 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -1,7 +1,6 @@
config FSCACHE
tristate "General filesystem local caching manager"
- select SLOW_WORK
help
This option enables a generic filesystem caching manager that can be
used by various network and other filesystems to cache data locally.
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index edd7434ab6e..f6aad48d38a 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -82,6 +82,14 @@ extern unsigned fscache_defer_lookup;
extern unsigned fscache_defer_create;
extern unsigned fscache_debug;
extern struct kobject *fscache_root;
+extern struct workqueue_struct *fscache_object_wq;
+extern struct workqueue_struct *fscache_op_wq;
+DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
+
+static inline bool fscache_object_congested(void)
+{
+ return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
+}
extern int fscache_wait_bit(void *);
extern int fscache_wait_bit_interruptible(void *);
@@ -313,17 +321,11 @@ void fscache_put_context(struct fscache_cookie *cookie, void *context)
#define dbgprintk(FMT, ...) \
printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
-/* make sure we maintain the format strings, even when debugging is disabled */
-static inline __attribute__((format(printf, 1, 2)))
-void _dbprintk(const char *fmt, ...)
-{
-}
-
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
-#define kjournal(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+#define kjournal(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#ifdef __KDEBUG
#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
@@ -350,9 +352,9 @@ do { \
} while (0)
#else
-#define _enter(FMT, ...) _dbprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
-#define _leave(FMT, ...) _dbprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
-#define _debug(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index add6bdb53f0..f9d856773f7 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include "internal.h"
MODULE_DESCRIPTION("FS Cache Manager");
@@ -40,22 +41,105 @@ MODULE_PARM_DESC(fscache_debug,
"FS-Cache debugging mask");
struct kobject *fscache_root;
+struct workqueue_struct *fscache_object_wq;
+struct workqueue_struct *fscache_op_wq;
+
+DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
+
+/* these values serve as lower bounds, will be adjusted in fscache_init() */
+static unsigned fscache_object_max_active = 4;
+static unsigned fscache_op_max_active = 2;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *fscache_sysctl_header;
+
+static int fscache_max_active_sysctl(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct workqueue_struct **wqp = table->extra1;
+ unsigned int *datap = table->data;
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret == 0)
+ workqueue_set_max_active(*wqp, *datap);
+ return ret;
+}
+
+ctl_table fscache_sysctls[] = {
+ {
+ .procname = "object_max_active",
+ .data = &fscache_object_max_active,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = fscache_max_active_sysctl,
+ .extra1 = &fscache_object_wq,
+ },
+ {
+ .procname = "operation_max_active",
+ .data = &fscache_op_max_active,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = fscache_max_active_sysctl,
+ .extra1 = &fscache_op_wq,
+ },
+ {}
+};
+
+ctl_table fscache_sysctls_root[] = {
+ {
+ .procname = "fscache",
+ .mode = 0555,
+ .child = fscache_sysctls,
+ },
+ {}
+};
+#endif
/*
* initialise the fs caching module
*/
static int __init fscache_init(void)
{
+ unsigned int nr_cpus = num_possible_cpus();
+ unsigned int cpu;
int ret;
- ret = slow_work_register_user(THIS_MODULE);
- if (ret < 0)
- goto error_slow_work;
+ fscache_object_max_active =
+ clamp_val(nr_cpus,
+ fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE);
+
+ ret = -ENOMEM;
+ fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND,
+ fscache_object_max_active);
+ if (!fscache_object_wq)
+ goto error_object_wq;
+
+ fscache_op_max_active =
+ clamp_val(fscache_object_max_active / 2,
+ fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
+
+ ret = -ENOMEM;
+ fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
+ fscache_op_max_active);
+ if (!fscache_op_wq)
+ goto error_op_wq;
+
+ for_each_possible_cpu(cpu)
+ init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
ret = fscache_proc_init();
if (ret < 0)
goto error_proc;
+#ifdef CONFIG_SYSCTL
+ ret = -ENOMEM;
+ fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root);
+ if (!fscache_sysctl_header)
+ goto error_sysctl;
+#endif
+
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
0,
@@ -78,10 +162,16 @@ static int __init fscache_init(void)
error_kobj:
kmem_cache_destroy(fscache_cookie_jar);
error_cookie_jar:
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(fscache_sysctl_header);
+error_sysctl:
+#endif
fscache_proc_cleanup();
error_proc:
- slow_work_unregister_user(THIS_MODULE);
-error_slow_work:
+ destroy_workqueue(fscache_op_wq);
+error_op_wq:
+ destroy_workqueue(fscache_object_wq);
+error_object_wq:
return ret;
}
@@ -96,8 +186,12 @@ static void __exit fscache_exit(void)
kobject_put(fscache_root);
kmem_cache_destroy(fscache_cookie_jar);
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(fscache_sysctl_header);
+#endif
fscache_proc_cleanup();
- slow_work_unregister_user(THIS_MODULE);
+ destroy_workqueue(fscache_op_wq);
+ destroy_workqueue(fscache_object_wq);
printk(KERN_NOTICE "FS-Cache: Unloaded\n");
}
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 4a8eb31c533..ebe29c58138 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -34,8 +34,8 @@ struct fscache_objlist_data {
#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
-#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
-#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
+#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with work */
+#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without work */
u8 buf[512]; /* key and aux data buffer */
};
@@ -231,12 +231,11 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
READS, NOREADS);
FILTER(obj->events & obj->event_mask,
EVENTS, NOEVENTS);
- FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
- WORK, NOWORK);
+ FILTER(work_busy(&obj->work), WORK, NOWORK);
}
seq_printf(m,
- "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
+ "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1x | ",
obj->debug_id,
obj->parent ? obj->parent->debug_id : -1,
fscache_object_states_short[obj->state],
@@ -249,7 +248,7 @@ static int fscache_objlist_show(struct seq_file *m, void *v)
obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
obj->events,
obj->flags,
- obj->work.flags);
+ work_busy(&obj->work));
no_cookie = true;
keylen = auxlen = 0;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 0b589a9b4ff..b6b897c550a 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,7 +14,6 @@
#define FSCACHE_DEBUG_LEVEL COOKIE
#include <linux/module.h>
-#include <linux/seq_file.h>
#include "internal.h"
const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
@@ -50,12 +49,8 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
[FSCACHE_OBJECT_DEAD] = "DEAD",
};
-static void fscache_object_slow_work_put_ref(struct slow_work *);
-static int fscache_object_slow_work_get_ref(struct slow_work *);
-static void fscache_object_slow_work_execute(struct slow_work *);
-#ifdef CONFIG_SLOW_WORK_DEBUG
-static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
-#endif
+static int fscache_get_object(struct fscache_object *);
+static void fscache_put_object(struct fscache_object *);
static void fscache_initialise_object(struct fscache_object *);
static void fscache_lookup_object(struct fscache_object *);
static void fscache_object_available(struct fscache_object *);
@@ -64,17 +59,6 @@ static void fscache_withdraw_object(struct fscache_object *);
static void fscache_enqueue_dependents(struct fscache_object *);
static void fscache_dequeue_object(struct fscache_object *);
-const struct slow_work_ops fscache_object_slow_work_ops = {
- .owner = THIS_MODULE,
- .get_ref = fscache_object_slow_work_get_ref,
- .put_ref = fscache_object_slow_work_put_ref,
- .execute = fscache_object_slow_work_execute,
-#ifdef CONFIG_SLOW_WORK_DEBUG
- .desc = fscache_object_slow_work_desc,
-#endif
-};
-EXPORT_SYMBOL(fscache_object_slow_work_ops);
-
/*
* we need to notify the parent when an op completes that we had outstanding
* upon it
@@ -345,7 +329,7 @@ unsupported_event:
/*
* execute an object
*/
-static void fscache_object_slow_work_execute(struct slow_work *work)
+void fscache_object_work_func(struct work_struct *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
@@ -359,23 +343,9 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
if (object->events & object->event_mask)
fscache_enqueue_object(object);
clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ fscache_put_object(object);
}
-
-/*
- * describe an object for slow-work debugging
- */
-#ifdef CONFIG_SLOW_WORK_DEBUG
-static void fscache_object_slow_work_desc(struct slow_work *work,
- struct seq_file *m)
-{
- struct fscache_object *object =
- container_of(work, struct fscache_object, work);
-
- seq_printf(m, "FSC: OBJ%x: %s",
- object->debug_id,
- fscache_object_states_short[object->state]);
-}
-#endif
+EXPORT_SYMBOL(fscache_object_work_func);
/*
* initialise an object
@@ -393,7 +363,6 @@ static void fscache_initialise_object(struct fscache_object *object)
_enter("");
ASSERT(object->cookie != NULL);
ASSERT(object->cookie->parent != NULL);
- ASSERT(list_empty(&object->work.link));
if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
(1 << FSCACHE_OBJECT_EV_RELEASE) |
@@ -671,10 +640,8 @@ static void fscache_drop_object(struct fscache_object *object)
object->parent = NULL;
}
- /* this just shifts the object release to the slow work processor */
- fscache_stat(&fscache_n_cop_put_object);
- object->cache->ops->put_object(object);
- fscache_stat_d(&fscache_n_cop_put_object);
+ /* this just shifts the object release to the work processor */
+ fscache_put_object(object);
_leave("");
}
@@ -758,12 +725,10 @@ void fscache_withdrawing_object(struct fscache_cache *cache,
}
/*
- * allow the slow work item processor to get a ref on an object
+ * get a ref on an object
*/
-static int fscache_object_slow_work_get_ref(struct slow_work *work)
+static int fscache_get_object(struct fscache_object *object)
{
- struct fscache_object *object =
- container_of(work, struct fscache_object, work);
int ret;
fscache_stat(&fscache_n_cop_grab_object);
@@ -773,13 +738,10 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
}
/*
- * allow the slow work item processor to discard a ref on a work item
+ * discard a ref on a work item
*/
-static void fscache_object_slow_work_put_ref(struct slow_work *work)
+static void fscache_put_object(struct fscache_object *object)
{
- struct fscache_object *object =
- container_of(work, struct fscache_object, work);
-
fscache_stat(&fscache_n_cop_put_object);
object->cache->ops->put_object(object);
fscache_stat_d(&fscache_n_cop_put_object);
@@ -792,8 +754,48 @@ void fscache_enqueue_object(struct fscache_object *object)
{
_enter("{OBJ%x}", object->debug_id);
- slow_work_enqueue(&object->work);
+ if (fscache_get_object(object) >= 0) {
+ wait_queue_head_t *cong_wq =
+ &get_cpu_var(fscache_object_cong_wait);
+
+ if (queue_work(fscache_object_wq, &object->work)) {
+ if (fscache_object_congested())
+ wake_up(cong_wq);
+ } else
+ fscache_put_object(object);
+
+ put_cpu_var(fscache_object_cong_wait);
+ }
+}
+
+/**
+ * fscache_object_sleep_till_congested - Sleep until object wq is congested
+ * @timoutp: Scheduler sleep timeout
+ *
+ * Allow an object handler to sleep until the object workqueue is congested.
+ *
+ * The caller must set up a wake up event before calling this and must have set
+ * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
+ * condition before calling this function as no test is made here.
+ *
+ * %true is returned if the object wq is congested, %false otherwise.
+ */
+bool fscache_object_sleep_till_congested(signed long *timeoutp)
+{
+ wait_queue_head_t *cong_wq = &__get_cpu_var(fscache_object_cong_wait);
+ DEFINE_WAIT(wait);
+
+ if (fscache_object_congested())
+ return true;
+
+ add_wait_queue_exclusive(cong_wq, &wait);
+ if (!fscache_object_congested())
+ *timeoutp = schedule_timeout(*timeoutp);
+ finish_wait(cong_wq, &wait);
+
+ return fscache_object_congested();
}
+EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
/*
* enqueue the dependents of an object for metadata-type processing
@@ -819,9 +821,7 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
/* sort onto appropriate lists */
fscache_enqueue_object(dep);
- fscache_stat(&fscache_n_cop_put_object);
- dep->cache->ops->put_object(dep);
- fscache_stat_d(&fscache_n_cop_put_object);
+ fscache_put_object(dep);
if (!list_empty(&object->dependents))
cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index f17cecafae4..b9f34eaede0 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op)
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_FAST:
- _debug("queue fast");
+ case FSCACHE_OP_ASYNC:
+ _debug("queue async");
atomic_inc(&op->usage);
- if (!schedule_work(&op->fast_work))
+ if (!queue_work(fscache_op_wq, &op->work))
fscache_put_operation(op);
break;
- case FSCACHE_OP_SLOW:
- _debug("queue slow");
- slow_work_enqueue(&op->slow_work);
- break;
case FSCACHE_OP_MYTHREAD:
_debug("queue for caller's attention");
break;
@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work)
}
/*
- * allow the slow work item processor to get a ref on an operation
- */
-static int fscache_op_get_ref(struct slow_work *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- atomic_inc(&op->usage);
- return 0;
-}
-
-/*
- * allow the slow work item processor to discard a ref on an operation
- */
-static void fscache_op_put_ref(struct slow_work *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- fscache_put_operation(op);
-}
-
-/*
- * execute an operation using the slow thread pool to provide processing context
- * - the caller holds a ref to this object, so we don't need to hold one
+ * execute an operation using fs_op_wq to provide processing context -
+ * the caller holds a ref to this object, so we don't need to hold one
*/
-static void fscache_op_execute(struct slow_work *work)
+void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
+ container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work)
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
+ fscache_put_operation(op);
_leave("");
}
-
-/*
- * describe an operation for slow-work debugging
- */
-#ifdef CONFIG_SLOW_WORK_DEBUG
-static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, slow_work);
-
- seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
- op->object->debug_id, op->debug_id,
- op->name, op->state, op->flags);
-}
-#endif
-
-const struct slow_work_ops fscache_op_slow_work_ops = {
- .owner = THIS_MODULE,
- .get_ref = fscache_op_get_ref,
- .put_ref = fscache_op_put_ref,
- .execute = fscache_op_execute,
-#ifdef CONFIG_SLOW_WORK_DEBUG
- .desc = fscache_op_desc,
-#endif
-};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 47aefd376e5..41c441c2058 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
page_busy:
/* we might want to wait here, but that could deadlock the allocator as
- * the slow-work threads writing to the cache may all end up sleeping
+ * the work threads writing to the cache may all end up sleeping
* on memory allocation */
fscache_stat(&fscache_n_store_vmscan_busy);
return false;
@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
return -ENOMEM;
}
- fscache_operation_init(op, NULL);
- fscache_operation_init_slow(op, fscache_attr_changed_op);
- op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
+ fscache_operation_init(op, fscache_attr_changed_op, NULL);
+ op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
fscache_set_op_name(op, "Attr");
spin_lock(&cookie->lock);
@@ -218,24 +217,6 @@ nobufs:
EXPORT_SYMBOL(__fscache_attr_changed);
/*
- * handle secondary execution given to a retrieval op on behalf of the
- * cache
- */
-static void fscache_retrieval_work(struct work_struct *work)
-{
- struct fscache_retrieval *op =
- container_of(work, struct fscache_retrieval, op.fast_work);
- unsigned long start;
-
- _enter("{OP%x}", op->op.debug_id);
-
- start = jiffies;
- op->op.processor(&op->op);
- fscache_hist(fscache_ops_histogram, start);
- fscache_put_operation(&op->op);
-}
-
-/*
* release a retrieval op reference
*/
static void fscache_release_retrieval_op(struct fscache_operation *_op)
@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
return NULL;
}
- fscache_operation_init(&op->op, fscache_release_retrieval_op);
+ fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
op->start_time = jiffies;
- INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
INIT_LIST_HEAD(&op->to_do);
fscache_set_op_name(&op->op, "Retr");
return op;
@@ -710,30 +690,26 @@ static void fscache_write_op(struct fscache_operation *_op)
goto superseded;
}
- if (page) {
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
- }
+ radix_tree_tag_set(&cookie->stores, page->index,
+ FSCACHE_COOKIE_STORING_TAG);
+ radix_tree_tag_clear(&cookie->stores, page->index,
+ FSCACHE_COOKIE_PENDING_TAG);
spin_unlock(&cookie->stores_lock);
spin_unlock(&object->lock);
- if (page) {
- fscache_set_op_state(&op->op, "Store");
- fscache_stat(&fscache_n_store_pages);
- fscache_stat(&fscache_n_cop_write_page);
- ret = object->cache->ops->write_page(op, page);
- fscache_stat_d(&fscache_n_cop_write_page);
- fscache_set_op_state(&op->op, "EndWrite");
- fscache_end_page_write(object, page);
- if (ret < 0) {
- fscache_set_op_state(&op->op, "Abort");
- fscache_abort_object(object);
- } else {
- fscache_enqueue_operation(&op->op);
- }
+ fscache_set_op_state(&op->op, "Store");
+ fscache_stat(&fscache_n_store_pages);
+ fscache_stat(&fscache_n_cop_write_page);
+ ret = object->cache->ops->write_page(op, page);
+ fscache_stat_d(&fscache_n_cop_write_page);
+ fscache_set_op_state(&op->op, "EndWrite");
+ fscache_end_page_write(object, page);
+ if (ret < 0) {
+ fscache_set_op_state(&op->op, "Abort");
+ fscache_abort_object(object);
+ } else {
+ fscache_enqueue_operation(&op->op);
}
_leave("");
@@ -799,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
if (!op)
goto nomem;
- fscache_operation_init(&op->op, fscache_release_write_op);
- fscache_operation_init_slow(&op->op, fscache_write_op);
- op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
+ fscache_operation_init(&op->op, fscache_write_op,
+ fscache_release_write_op);
+ op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
fscache_set_op_name(&op->op, "Write1");
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
@@ -856,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_stat(&fscache_n_store_ops);
fscache_stat(&fscache_n_stores_ok);
- /* the slow work queue now carries its own ref on the object */
+ /* the work queue now carries its own ref on the object */
fscache_put_operation(&op->op);
_leave(" = 0");
return 0;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 9424796d663..69ad053ffd7 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -239,7 +239,6 @@ static u64 fuse_get_unique(struct fuse_conn *fc)
static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
{
- req->in.h.unique = fuse_get_unique(fc);
req->in.h.len = sizeof(struct fuse_in_header) +
len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
list_add_tail(&req->list, &fc->pending);
@@ -261,6 +260,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
req = list_entry(fc->bg_queue.next, struct fuse_req, list);
list_del(&req->list);
fc->active_background++;
+ req->in.h.unique = fuse_get_unique(fc);
queue_request(fc, req);
}
}
@@ -398,6 +398,7 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
else if (fc->conn_error)
req->out.h.error = -ECONNREFUSED;
else {
+ req->in.h.unique = fuse_get_unique(fc);
queue_request(fc, req);
/* acquire extra reference, since request is still needed
after request_end() */
@@ -450,6 +451,23 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send_background);
+static int fuse_request_send_notify_reply(struct fuse_conn *fc,
+ struct fuse_req *req, u64 unique)
+{
+ int err = -ENODEV;
+
+ req->isreply = 0;
+ req->in.h.unique = unique;
+ spin_lock(&fc->lock);
+ if (fc->connected) {
+ queue_request(fc, req);
+ err = 0;
+ }
+ spin_unlock(&fc->lock);
+
+ return err;
+}
+
/*
* Called under fc->lock
*
@@ -535,13 +553,13 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
if (!cs->write) {
buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
} else {
- kunmap_atomic(cs->mapaddr, KM_USER0);
+ kunmap(buf->page);
buf->len = PAGE_SIZE - cs->len;
}
cs->currbuf = NULL;
cs->mapaddr = NULL;
} else if (cs->mapaddr) {
- kunmap_atomic(cs->mapaddr, KM_USER0);
+ kunmap(cs->pg);
if (cs->write) {
flush_dcache_page(cs->pg);
set_page_dirty_lock(cs->pg);
@@ -572,7 +590,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
BUG_ON(!cs->nr_segs);
cs->currbuf = buf;
- cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
+ cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
cs->len = buf->len;
cs->buf = cs->mapaddr + buf->offset;
cs->pipebufs++;
@@ -592,7 +610,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
buf->len = 0;
cs->currbuf = buf;
- cs->mapaddr = kmap_atomic(page, KM_USER0);
+ cs->mapaddr = kmap(page);
cs->buf = cs->mapaddr;
cs->len = PAGE_SIZE;
cs->pipebufs++;
@@ -611,7 +629,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
return err;
BUG_ON(err != 1);
offset = cs->addr % PAGE_SIZE;
- cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
+ cs->mapaddr = kmap(cs->pg);
cs->buf = cs->mapaddr + offset;
cs->len = min(PAGE_SIZE - offset, cs->seglen);
cs->seglen -= cs->len;
@@ -1231,6 +1249,199 @@ err:
return err;
}
+static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_store_out outarg;
+ struct inode *inode;
+ struct address_space *mapping;
+ u64 nodeid;
+ int err;
+ pgoff_t index;
+ unsigned int offset;
+ unsigned int num;
+ loff_t file_size;
+ loff_t end;
+
+ err = -EINVAL;
+ if (size < sizeof(outarg))
+ goto out_finish;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto out_finish;
+
+ err = -EINVAL;
+ if (size - sizeof(outarg) != outarg.size)
+ goto out_finish;
+
+ nodeid = outarg.nodeid;
+
+ down_read(&fc->killsb);
+
+ err = -ENOENT;
+ if (!fc->sb)
+ goto out_up_killsb;
+
+ inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
+ if (!inode)
+ goto out_up_killsb;
+
+ mapping = inode->i_mapping;
+ index = outarg.offset >> PAGE_CACHE_SHIFT;
+ offset = outarg.offset & ~PAGE_CACHE_MASK;
+ file_size = i_size_read(inode);
+ end = outarg.offset + outarg.size;
+ if (end > file_size) {
+ file_size = end;
+ fuse_write_update_size(inode, file_size);
+ }
+
+ num = outarg.size;
+ while (num) {
+ struct page *page;
+ unsigned int this_num;
+
+ err = -ENOMEM;
+ page = find_or_create_page(mapping, index,
+ mapping_gfp_mask(mapping));
+ if (!page)
+ goto out_iput;
+
+ this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+ err = fuse_copy_page(cs, &page, offset, this_num, 0);
+ if (!err && offset == 0 && (num != 0 || file_size == end))
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+
+ if (err)
+ goto out_iput;
+
+ num -= this_num;
+ offset = 0;
+ index++;
+ }
+
+ err = 0;
+
+out_iput:
+ iput(inode);
+out_up_killsb:
+ up_read(&fc->killsb);
+out_finish:
+ fuse_copy_finish(cs);
+ return err;
+}
+
+static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+{
+ int i;
+
+ for (i = 0; i < req->num_pages; i++) {
+ struct page *page = req->pages[i];
+ page_cache_release(page);
+ }
+}
+
+static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+ struct fuse_notify_retrieve_out *outarg)
+{
+ int err;
+ struct address_space *mapping = inode->i_mapping;
+ struct fuse_req *req;
+ pgoff_t index;
+ loff_t file_size;
+ unsigned int num;
+ unsigned int offset;
+ size_t total_len;
+
+ req = fuse_get_req(fc);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ offset = outarg->offset & ~PAGE_CACHE_MASK;
+
+ req->in.h.opcode = FUSE_NOTIFY_REPLY;
+ req->in.h.nodeid = outarg->nodeid;
+ req->in.numargs = 2;
+ req->in.argpages = 1;
+ req->page_offset = offset;
+ req->end = fuse_retrieve_end;
+
+ index = outarg->offset >> PAGE_CACHE_SHIFT;
+ file_size = i_size_read(inode);
+ num = outarg->size;
+ if (outarg->offset > file_size)
+ num = 0;
+ else if (outarg->offset + num > file_size)
+ num = file_size - outarg->offset;
+
+ while (num) {
+ struct page *page;
+ unsigned int this_num;
+
+ page = find_get_page(mapping, index);
+ if (!page)
+ break;
+
+ this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
+ req->pages[req->num_pages] = page;
+ req->num_pages++;
+
+ num -= this_num;
+ total_len += this_num;
+ }
+ req->misc.retrieve_in.offset = outarg->offset;
+ req->misc.retrieve_in.size = total_len;
+ req->in.args[0].size = sizeof(req->misc.retrieve_in);
+ req->in.args[0].value = &req->misc.retrieve_in;
+ req->in.args[1].size = total_len;
+
+ err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
+ if (err)
+ fuse_retrieve_end(fc, req);
+
+ return err;
+}
+
+static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
+ struct fuse_copy_state *cs)
+{
+ struct fuse_notify_retrieve_out outarg;
+ struct inode *inode;
+ int err;
+
+ err = -EINVAL;
+ if (size != sizeof(outarg))
+ goto copy_finish;
+
+ err = fuse_copy_one(cs, &outarg, sizeof(outarg));
+ if (err)
+ goto copy_finish;
+
+ fuse_copy_finish(cs);
+
+ down_read(&fc->killsb);
+ err = -ENOENT;
+ if (fc->sb) {
+ u64 nodeid = outarg.nodeid;
+
+ inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
+ if (inode) {
+ err = fuse_retrieve(fc, inode, &outarg);
+ iput(inode);
+ }
+ }
+ up_read(&fc->killsb);
+
+ return err;
+
+copy_finish:
+ fuse_copy_finish(cs);
+ return err;
+}
+
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
@@ -1244,6 +1455,12 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
case FUSE_NOTIFY_INVAL_ENTRY:
return fuse_notify_inval_entry(fc, size, cs);
+ case FUSE_NOTIFY_STORE:
+ return fuse_notify_store(fc, size, cs);
+
+ case FUSE_NOTIFY_RETRIEVE:
+ return fuse_notify_retrieve(fc, size, cs);
+
default:
fuse_copy_finish(cs);
return -EINVAL;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 3cdc5f78a40..c9627c95482 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1016,7 +1016,7 @@ static int fuse_permission(struct inode *inode, int mask)
exist. So if permissions are revoked this won't be
noticed immediately, only after the attribute
timeout has expired */
- } else if (mask & MAY_ACCESS) {
+ } else if (mask & (MAY_ACCESS | MAY_CHDIR)) {
err = fuse_access(inode, mask);
} else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) {
if (!(inode->i_mode & S_IXUGO)) {
@@ -1270,21 +1270,18 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
if (!fuse_allow_task(fc, current))
return -EACCES;
- if (fc->flags & FUSE_DEFAULT_PERMISSIONS) {
- err = inode_change_ok(inode, attr);
- if (err)
- return err;
- }
+ if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ attr->ia_valid |= ATTR_FORCE;
+
+ err = inode_change_ok(inode, attr);
+ if (err)
+ return err;
if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
return 0;
- if (attr->ia_valid & ATTR_SIZE) {
- err = inode_newsize_ok(inode, attr->ia_size);
- if (err)
- return err;
+ if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
- }
req = fuse_get_req(fc);
if (IS_ERR(req))
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ada0adeb3bb..147c1f71bdb 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -706,7 +706,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
return 0;
}
-static void fuse_write_update_size(struct inode *inode, loff_t pos)
+void fuse_write_update_size(struct inode *inode, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 8f309f04064..57d4a3a0f10 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -272,6 +272,7 @@ struct fuse_req {
struct fuse_write_in in;
struct fuse_write_out out;
} write;
+ struct fuse_notify_retrieve_in retrieve_in;
struct fuse_lk_in lk_in;
} misc;
@@ -748,4 +749,6 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned fuse_file_poll(struct file *file, poll_table *wait);
int fuse_dev_release(struct inode *inode, struct file *file);
+void fuse_write_update_size(struct inode *inode, loff_t pos);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index ec14d19ce50..da9e6e11374 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -122,8 +122,10 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
fuse_request_send_noreply(fc, req);
}
-static void fuse_clear_inode(struct inode *inode)
+static void fuse_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
if (inode->i_sb->s_flags & MS_ACTIVE) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -736,7 +738,7 @@ static const struct export_operations fuse_export_operations = {
static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
.destroy_inode = fuse_destroy_inode,
- .clear_inode = fuse_clear_inode,
+ .evict_inode = fuse_evict_inode,
.drop_inode = generic_delete_inode,
.remount_fs = fuse_remount_fs,
.put_super = fuse_put_super,
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index a47b4310711..cc966552214 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -7,7 +7,6 @@ config GFS2_FS
select IP_SCTP if DLM_SCTP
select FS_POSIX_ACL
select CRC32
- select SLOW_WORK
select QUOTACTL
help
A cluster filesystem.
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9f8b52500d6..194fe16d841 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -136,10 +136,7 @@ static int gfs2_writeback_writepage(struct page *page,
if (ret <= 0)
return ret;
- ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
- if (ret == -EAGAIN)
- ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
- return ret;
+ return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
}
/**
@@ -637,9 +634,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
}
}
- error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
- if (error)
- goto out_unlock;
+ alloc_required = gfs2_write_alloc_required(ip, pos, len);
if (alloc_required || gfs2_is_jdata(ip))
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
@@ -702,12 +697,12 @@ out:
page_cache_release(page);
/*
- * XXX(hch): the call below should probably be replaced with
+ * XXX(truncate): the call below should probably be replaced with
* a call to the gfs2-specific truncate blocks helper to actually
* release disk blocks..
*/
if (pos + len > ip->i_inode.i_size)
- simple_setsize(&ip->i_inode, ip->i_inode.i_size);
+ truncate_setsize(&ip->i_inode, ip->i_inode.i_size);
out_endtrans:
gfs2_trans_end(sdp);
out_trans_fail:
@@ -1047,9 +1042,9 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
- rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
- iov, offset, nr_segs,
- gfs2_get_block_direct, NULL);
+ rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ offset, nr_segs, gfs2_get_block_direct,
+ NULL, NULL, 0);
out:
gfs2_glock_dq_m(1, &gh);
gfs2_holder_uninit(&gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 4a48c0f4b40..6f482809d1a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1040,7 +1040,8 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
goto out;
if (gfs2_is_stuffed(ip)) {
- u64 dsize = size + sizeof(struct gfs2_inode);
+ u64 dsize = size + sizeof(struct gfs2_dinode);
+ ip->i_disksize = size;
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
@@ -1243,13 +1244,12 @@ int gfs2_file_dealloc(struct gfs2_inode *ip)
* @ip: the file being written to
* @offset: the offset to write to
* @len: the number of bytes being written
- * @alloc_required: set to 1 if an alloc is required, 0 otherwise
*
- * Returns: errno
+ * Returns: 1 if an alloc is required, 0 otherwise
*/
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
- unsigned int len, int *alloc_required)
+ unsigned int len)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head bh;
@@ -1257,26 +1257,23 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
u64 lblock, lblock_stop, size;
u64 end_of_file;
- *alloc_required = 0;
-
if (!len)
return 0;
if (gfs2_is_stuffed(ip)) {
if (offset + len >
sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
- *alloc_required = 1;
+ return 1;
return 0;
}
- *alloc_required = 1;
shift = sdp->sd_sb.sb_bsize_shift;
BUG_ON(gfs2_is_dir(ip));
end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
if (lblock_stop > end_of_file)
- return 0;
+ return 1;
size = (lblock_stop - lblock) << shift;
do {
@@ -1284,12 +1281,11 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
bh.b_size = size;
gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
if (!buffer_mapped(&bh))
- return 0;
+ return 1;
size -= bh.b_size;
lblock += (bh.b_size >> ip->i_inode.i_blkbits);
} while(size > 0);
- *alloc_required = 0;
return 0;
}
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index c983177e05a..a20a5213135 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -52,6 +52,6 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
int gfs2_truncatei_resume(struct gfs2_inode *ip);
int gfs2_file_dealloc(struct gfs2_inode *ip);
int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
- unsigned int len, int *alloc_required);
+ unsigned int len);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 8295c5b5d4a..b9dd88a78dd 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -392,7 +392,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
unsigned totlen = be16_to_cpu(dent->de_rec_len);
if (gfs2_dirent_sentinel(dent))
- actual = GFS2_DIRENT_SIZE(0);
+ actual = 0;
if (totlen - actual >= required)
return 1;
return 0;
@@ -955,7 +955,12 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
/* Change the pointers.
Don't bother distinguishing stuffed from non-stuffed.
This code is complicated enough already. */
- lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
+ lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS);
+ if (!lp) {
+ error = -ENOMEM;
+ goto fail_brelse;
+ }
+
/* Change the pointers */
for (x = 0; x < half_len; x++)
lp[x] = cpu_to_be64(bn);
@@ -1063,7 +1068,9 @@ static int dir_double_exhash(struct gfs2_inode *dip)
/* Allocate both the "from" and "to" buffers in one big chunk */
- buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
+ buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) {
error = gfs2_dir_read_data(dip, (char *)buf,
@@ -1231,6 +1238,25 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
return 0;
}
+static void *gfs2_alloc_sort_buffer(unsigned size)
+{
+ void *ptr = NULL;
+
+ if (size < KMALLOC_MAX_SIZE)
+ ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
+ if (!ptr)
+ ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
+ return ptr;
+}
+
+static void gfs2_free_sort_buffer(void *ptr)
+{
+ if (is_vmalloc_addr(ptr))
+ vfree(ptr);
+ else
+ kfree(ptr);
+}
+
static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir, int *copied, unsigned *depth,
u64 leaf_no)
@@ -1271,7 +1297,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
* 99 is the maximum number of entries that can fit in a single
* leaf block.
*/
- larr = vmalloc((leaves + entries + 99) * sizeof(void *));
+ larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
if (!larr)
goto out;
darr = (const struct gfs2_dirent **)(larr + leaves);
@@ -1282,7 +1308,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
do {
error = get_leaf(ip, lfn, &bh);
if (error)
- goto out_kfree;
+ goto out_free;
lf = (struct gfs2_leaf *)bh->b_data;
lfn = be64_to_cpu(lf->lf_next);
if (lf->lf_entries) {
@@ -1291,7 +1317,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
gfs2_dirent_gather, NULL, &g);
error = PTR_ERR(dent);
if (IS_ERR(dent))
- goto out_kfree;
+ goto out_free;
if (entries2 != g.offset) {
fs_warn(sdp, "Number of entries corrupt in dir "
"leaf %llu, entries2 (%u) != "
@@ -1300,7 +1326,7 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
entries2, g.offset);
error = -EIO;
- goto out_kfree;
+ goto out_free;
}
error = 0;
larr[leaf++] = bh;
@@ -1312,10 +1338,10 @@ static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
BUG_ON(entries2 != entries);
error = do_filldir_main(ip, offset, opaque, filldir, darr,
entries, copied);
-out_kfree:
+out_free:
for(i = 0; i < leaf; i++)
brelse(larr[i]);
- vfree(larr);
+ gfs2_free_sort_buffer(larr);
out:
return error;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index ed9a94f0ef1..4edd662c823 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -351,7 +351,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long last_index;
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
- int alloc_required = 0;
struct gfs2_holder gh;
struct gfs2_alloc *al;
int ret;
@@ -364,8 +363,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
- ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
- if (ret || !alloc_required)
+ if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
goto out_unlock;
ret = -ENOMEM;
al = gfs2_alloc_get(ip);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index ddcdbf49353..9adf8f924e0 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -328,6 +328,30 @@ static void gfs2_holder_wake(struct gfs2_holder *gh)
}
/**
+ * do_error - Something unexpected has happened during a lock request
+ *
+ */
+
+static inline void do_error(struct gfs2_glock *gl, const int ret)
+{
+ struct gfs2_holder *gh, *tmp;
+
+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+ continue;
+ if (ret & LM_OUT_ERROR)
+ gh->gh_error = -EIO;
+ else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
+ gh->gh_error = GLR_TRYFAILED;
+ else
+ continue;
+ list_del_init(&gh->gh_list);
+ trace_gfs2_glock_queue(gh, 0);
+ gfs2_holder_wake(gh);
+ }
+}
+
+/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
*
@@ -375,36 +399,13 @@ restart:
}
if (gh->gh_list.prev == &gl->gl_holders)
return 1;
+ do_error(gl, 0);
break;
}
return 0;
}
/**
- * do_error - Something unexpected has happened during a lock request
- *
- */
-
-static inline void do_error(struct gfs2_glock *gl, const int ret)
-{
- struct gfs2_holder *gh, *tmp;
-
- list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
- if (test_bit(HIF_HOLDER, &gh->gh_iflags))
- continue;
- if (ret & LM_OUT_ERROR)
- gh->gh_error = -EIO;
- else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
- gh->gh_error = GLR_TRYFAILED;
- else
- continue;
- list_del_init(&gh->gh_list);
- trace_gfs2_glock_queue(gh, 0);
- gfs2_holder_wake(gh);
- }
-}
-
-/**
* find_first_waiter - find the first gh that's waiting for the glock
* @gl: the glock
*/
@@ -1062,6 +1063,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
spin_lock(&gl->gl_spin);
add_to_queue(gh);
+ if ((LM_FLAG_NOEXP & gh->gh_flags) &&
+ test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
run_queue(gl, 1);
spin_unlock(&gl->gl_spin);
@@ -1319,6 +1323,36 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
}
/**
+ * gfs2_should_freeze - Figure out if glock should be frozen
+ * @gl: The glock in question
+ *
+ * Glocks are not frozen if (a) the result of the dlm operation is
+ * an error, (b) the locking operation was an unlock operation or
+ * (c) if there is a "noexp" flagged request anywhere in the queue
+ *
+ * Returns: 1 if freezing should occur, 0 otherwise
+ */
+
+static int gfs2_should_freeze(const struct gfs2_glock *gl)
+{
+ const struct gfs2_holder *gh;
+
+ if (gl->gl_reply & ~LM_OUT_ST_MASK)
+ return 0;
+ if (gl->gl_target == LM_ST_UNLOCKED)
+ return 0;
+
+ list_for_each_entry(gh, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+ continue;
+ if (LM_FLAG_NOEXP & gh->gh_flags)
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
* gfs2_glock_complete - Callback used by locking
* @gl: Pointer to the glock
* @ret: The return value from the dlm
@@ -1328,18 +1362,17 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+
gl->gl_reply = ret;
+
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
- struct gfs2_holder *gh;
spin_lock(&gl->gl_spin);
- gh = find_first_waiter(gl);
- if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
- (gl->gl_target != LM_ST_UNLOCKED)) ||
- ((ret & ~LM_OUT_ST_MASK) != 0))
+ if (gfs2_should_freeze(gl)) {
set_bit(GLF_FROZEN, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- if (test_bit(GLF_FROZEN, &gl->gl_flags))
+ spin_unlock(&gl->gl_spin);
return;
+ }
+ spin_unlock(&gl->gl_spin);
}
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_hold(gl);
@@ -1348,7 +1381,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
}
-static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
+static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
struct gfs2_glock *gl;
int may_demote;
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index b5d7363b22d..fdbf4b366fa 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -12,7 +12,6 @@
#include <linux/fs.h>
#include <linux/workqueue.h>
-#include <linux/slow-work.h>
#include <linux/dlm.h>
#include <linux/buffer_head.h>
@@ -383,7 +382,7 @@ struct gfs2_journal_extent {
struct gfs2_jdesc {
struct list_head jd_list;
struct list_head extent_list;
- struct slow_work jd_work;
+ struct work_struct jd_work;
struct inode *jd_inode;
unsigned long jd_flags;
#define JDF_RECOVERY 1
@@ -460,6 +459,7 @@ enum {
SDF_NOBARRIERS = 3,
SDF_NORECOVERY = 4,
SDF_DEMOTE = 5,
+ SDF_NOJOURNALID = 6,
};
#define GFS2_FSNAME_LEN 256
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index b5612cbb62a..08140f185a3 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -84,7 +84,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
struct gfs2_skip_data *data = opaque;
if (ip->i_no_addr == data->no_addr) {
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
+ if (inode->i_state & (I_FREEING|I_WILL_FREE)){
data->skipped = 1;
return 0;
}
@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
{
struct inode *inode;
struct gfs2_inode *ip;
- struct gfs2_glock *io_gl;
+ struct gfs2_glock *io_gl = NULL;
int error;
inode = gfs2_iget(sb, no_addr);
@@ -198,6 +198,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
ip->i_iopen_gh.gh_gl->gl_object = ip;
gfs2_glock_put(io_gl);
+ io_gl = NULL;
if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
goto gfs2_nfsbypass;
@@ -228,7 +229,8 @@ gfs2_nfsbypass:
fail_glock:
gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
- gfs2_glock_put(io_gl);
+ if (io_gl)
+ gfs2_glock_put(io_gl);
fail_put:
if (inode->i_state & I_NEW)
ip->i_gl->gl_object = NULL;
@@ -256,7 +258,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
{
struct gfs2_sbd *sdp;
struct gfs2_inode *ip;
- struct gfs2_glock *io_gl;
+ struct gfs2_glock *io_gl = NULL;
int error;
struct gfs2_holder gh;
struct inode *inode;
@@ -293,6 +295,7 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
ip->i_iopen_gh.gh_gl->gl_object = ip;
gfs2_glock_put(io_gl);
+ io_gl = NULL;
inode->i_mode = DT2IF(DT_UNKNOWN);
@@ -319,7 +322,8 @@ void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
fail_glock:
gfs2_glock_dq(&ip->i_iopen_gh);
fail_iopen:
- gfs2_glock_put(io_gl);
+ if (io_gl)
+ gfs2_glock_put(io_gl);
fail_put:
ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
@@ -987,18 +991,29 @@ fail:
static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
{
+ struct inode *inode = &ip->i_inode;
struct buffer_head *dibh;
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- error = inode_setattr(&ip->i_inode, attr);
- gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
}
- return error;
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ gfs2_assert_warn(GFS2_SB(inode), !error);
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+ return 0;
}
/**
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 6a857e24f94..cde1248a622 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
goto skip_barrier;
get_bh(bh);
- submit_bh(WRITE_SYNC | (1 << BIO_RW_BARRIER) | (1 << BIO_RW_META), bh);
+ submit_bh(WRITE_BARRIER | REQ_META, bh);
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh);
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
lock_buffer(bh);
skip_barrier:
get_bh(bh);
- submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh);
+ submit_bh(WRITE_SYNC | REQ_META, bh);
wait_on_buffer(bh);
}
if (!buffer_uptodate(bh))
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index fb2a5f93b7c..b1e9630eb46 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
#include <asm/atomic.h>
-#include <linux/slow-work.h>
#include "gfs2.h"
#include "incore.h"
@@ -24,6 +23,7 @@
#include "util.h"
#include "glock.h"
#include "quota.h"
+#include "recovery.h"
static struct shrinker qd_shrinker = {
.shrink = gfs2_shrink_qd_memory,
@@ -138,9 +138,11 @@ static int __init init_gfs2_fs(void)
if (error)
goto fail_unregister;
- error = slow_work_register_user(THIS_MODULE);
- if (error)
- goto fail_slow;
+ error = -ENOMEM;
+ gfs_recovery_wq = alloc_workqueue("gfs_recovery",
+ WQ_NON_REENTRANT | WQ_RESCUER, 0);
+ if (!gfs_recovery_wq)
+ goto fail_wq;
gfs2_register_debugfs();
@@ -148,7 +150,7 @@ static int __init init_gfs2_fs(void)
return 0;
-fail_slow:
+fail_wq:
unregister_filesystem(&gfs2meta_fs_type);
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
@@ -190,7 +192,7 @@ static void __exit exit_gfs2_fs(void)
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
- slow_work_unregister_user(THIS_MODULE);
+ destroy_workqueue(gfs_recovery_wq);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 18176d0b75d..f3b071f921a 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE));
+ int write_op = REQ_META |
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
}
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(READ_SYNC | (1 << BIO_RW_META), bh);
+ submit_bh(READ_SYNC | REQ_META, bh);
if (!(flags & DIO_WAIT))
return 0;
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
- ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh);
+ ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 3593b3a7290..4d4b1e8ac64 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -17,7 +17,6 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/slow-work.h>
#include <linux/quotaops.h>
#include "gfs2.h"
@@ -76,7 +75,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
-
+ set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
init_waitqueue_head(&sdp->sd_glock_wait);
@@ -275,7 +274,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
+ submit_bio(READ_SYNC | REQ_META, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
@@ -673,7 +672,7 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
break;
INIT_LIST_HEAD(&jd->extent_list);
- slow_work_init(&jd->jd_work, &gfs2_recover_ops);
+ INIT_WORK(&jd->jd_work, gfs2_recover_func);
jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
if (!jd->jd_inode)
@@ -782,7 +781,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
if (sdp->sd_lockstruct.ls_first) {
unsigned int x;
for (x = 0; x < sdp->sd_journals; x++) {
- error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x));
+ error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x),
+ true);
if (error) {
fs_err(sdp, "error recovering journal %u: %d\n",
x, error);
@@ -792,7 +792,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
gfs2_others_may_mount(sdp);
} else if (!sdp->sd_args.ar_spectator) {
- error = gfs2_recover_journal(sdp->sd_jdesc);
+ error = gfs2_recover_journal(sdp->sd_jdesc, true);
if (error) {
fs_err(sdp, "error recovering my journal: %d\n", error);
goto fail_jinode_gh;
@@ -1050,7 +1050,8 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
ret = match_int(&tmp[0], &option);
if (ret || option < 0)
goto hostdata_error;
- ls->ls_jid = option;
+ if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
+ ls->ls_jid = option;
break;
case Opt_id:
/* Obsolete, but left for backward compat purposes */
@@ -1102,6 +1103,24 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp)
lm->lm_unmount(sdp);
}
+static int gfs2_journalid_wait(void *word)
+{
+ if (signal_pending(current))
+ return -EINTR;
+ schedule();
+ return 0;
+}
+
+static int wait_on_journal(struct gfs2_sbd *sdp)
+{
+ if (sdp->sd_args.ar_spectator)
+ return 0;
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ return 0;
+
+ return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE);
+}
+
void gfs2_online_uevent(struct gfs2_sbd *sdp)
{
struct super_block *sb = sdp->sd_vfs;
@@ -1194,6 +1213,10 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
if (error)
goto fail_locking;
+ error = wait_on_journal(sdp);
+ if (error)
+ goto fail_sb;
+
error = init_inodes(sdp, DO);
if (error)
goto fail_sb;
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 98cdd05f331..1009be2c973 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -1072,7 +1072,7 @@ int gfs2_permission(struct inode *inode, int mask)
}
/*
- * XXX: should be changed to have proper ordering by opencoding simple_setsize
+ * XXX(truncate): the truncate_setsize calls should be moved to the end.
*/
static int setattr_size(struct inode *inode, struct iattr *attr)
{
@@ -1084,10 +1084,8 @@ static int setattr_size(struct inode *inode, struct iattr *attr)
error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (error)
return error;
- error = simple_setsize(inode, attr->ia_size);
+ truncate_setsize(inode, attr->ia_size);
gfs2_trans_end(sdp);
- if (error)
- return error;
}
error = gfs2_truncatei(ip, attr->ia_size);
@@ -1136,8 +1134,16 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (error)
goto out_end_trans;
- error = inode_setattr(inode, attr);
- gfs2_assert_warn(sdp, !error);
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ int error;
+
+ error = vmtruncate(inode, attr->ia_size);
+ gfs2_assert_warn(sdp, !error);
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 49667d68769..1bc6b5695e6 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -77,7 +77,7 @@ static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(qd_lru_lock);
-int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
+int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp;
@@ -694,10 +694,8 @@ get_a_page:
if (!buffer_mapped(bh))
goto unlock_out;
/* If it's a newly allocated disk block for quota, zero it */
- if (buffer_new(bh)) {
- memset(bh->b_data, 0, bh->b_size);
- set_buffer_uptodate(bh);
- }
+ if (buffer_new(bh))
+ zero_user(page, pos - blocksize, bh->b_size);
}
if (PageUptodate(page))
@@ -723,7 +721,7 @@ get_a_page:
/* If quota straddles page boundary, we need to update the rest of the
* quota at the beginning of the next page */
- if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
+ if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
ptr = ptr + nbytes;
nbytes = sizeof(struct gfs2_quota) - nbytes;
offset = 0;
@@ -789,15 +787,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
goto out;
for (x = 0; x < num_qd; x++) {
- int alloc_required;
-
offset = qd2offset(qda[x]);
- error = gfs2_write_alloc_required(ip, offset,
- sizeof(struct gfs2_quota),
- &alloc_required);
- if (error)
- goto out_gunlock;
- if (alloc_required)
+ if (gfs2_write_alloc_required(ip, offset,
+ sizeof(struct gfs2_quota)))
nalloc++;
}
@@ -1457,10 +1449,10 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
switch (sdp->sd_args.ar_quota) {
case GFS2_QUOTA_ON:
- fqs->qs_flags |= (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
+ fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
/*FALLTHRU*/
case GFS2_QUOTA_ACCOUNT:
- fqs->qs_flags |= (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
+ fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
break;
case GFS2_QUOTA_OFF:
break;
@@ -1506,7 +1498,7 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
fdq->d_version = FS_DQUOT_VERSION;
- fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+ fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
fdq->d_id = id;
fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
@@ -1541,12 +1533,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
switch(type) {
case USRQUOTA:
type = QUOTA_USER;
- if (fdq->d_flags != XFS_USER_QUOTA)
+ if (fdq->d_flags != FS_USER_QUOTA)
return -EINVAL;
break;
case GRPQUOTA:
type = QUOTA_GROUP;
- if (fdq->d_flags != XFS_GROUP_QUOTA)
+ if (fdq->d_flags != FS_GROUP_QUOTA)
return -EINVAL;
break;
default:
@@ -1586,10 +1578,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
goto out_i;
offset = qd2offset(qd);
- error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
- &alloc_required);
- if (error)
- goto out_i;
+ alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
if (alloc_required) {
al = gfs2_alloc_get(ip);
if (al == NULL)
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 195f60c8bd1..e7d236ca48b 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -51,7 +51,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret;
}
-extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
+extern int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask);
extern const struct quotactl_ops gfs2_quotactl_ops;
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 4b9bece3d43..f7f89a94a5a 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -14,7 +14,6 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/slow-work.h>
#include "gfs2.h"
#include "incore.h"
@@ -28,6 +27,8 @@
#include "util.h"
#include "dir.h"
+struct workqueue_struct *gfs_recovery_wq;
+
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
struct buffer_head **bh)
{
@@ -443,23 +444,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
}
-static int gfs2_recover_get_ref(struct slow_work *work)
-{
- struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
- if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags))
- return -EBUSY;
- return 0;
-}
-
-static void gfs2_recover_put_ref(struct slow_work *work)
-{
- struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
- clear_bit(JDF_RECOVERY, &jd->jd_flags);
- smp_mb__after_clear_bit();
- wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
-}
-
-static void gfs2_recover_work(struct slow_work *work)
+void gfs2_recover_func(struct work_struct *work)
{
struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
@@ -578,7 +563,7 @@ static void gfs2_recover_work(struct slow_work *work)
gfs2_glock_dq_uninit(&j_gh);
fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
- return;
+ goto done;
fail_gunlock_tr:
gfs2_glock_dq_uninit(&t_gh);
@@ -590,32 +575,35 @@ fail_gunlock_j:
}
fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
-
fail:
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
+done:
+ clear_bit(JDF_RECOVERY, &jd->jd_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
}
-struct slow_work_ops gfs2_recover_ops = {
- .owner = THIS_MODULE,
- .get_ref = gfs2_recover_get_ref,
- .put_ref = gfs2_recover_put_ref,
- .execute = gfs2_recover_work,
-};
-
-
static int gfs2_recovery_wait(void *word)
{
schedule();
return 0;
}
-int gfs2_recover_journal(struct gfs2_jdesc *jd)
+int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
{
int rv;
- rv = slow_work_enqueue(&jd->jd_work);
- if (rv)
- return rv;
- wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait, TASK_UNINTERRUPTIBLE);
+
+ if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags))
+ return -EBUSY;
+
+ /* we have JDF_RECOVERY, queue should always succeed */
+ rv = queue_work(gfs_recovery_wq, &jd->jd_work);
+ BUG_ON(!rv);
+
+ if (wait)
+ wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait,
+ TASK_UNINTERRUPTIBLE);
+
return 0;
}
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 1616ac22569..2226136c764 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -12,6 +12,8 @@
#include "incore.h"
+extern struct workqueue_struct *gfs_recovery_wq;
+
static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
{
if (++*blk == sdp->sd_jdesc->jd_blocks)
@@ -27,8 +29,8 @@ extern void gfs2_revoke_clean(struct gfs2_sbd *sdp);
extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head);
-extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
-extern struct slow_work_ops gfs2_recover_ops;
+extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
+extern void gfs2_recover_func(struct work_struct *work);
#endif /* __RECOVERY_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 4d1aad38f1b..77cb9f830ee 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -342,8 +342,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
{
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- int ar;
- int error;
if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
(ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
@@ -352,13 +350,12 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
}
jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
- error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
- if (!error && ar) {
+ if (gfs2_write_alloc_required(ip, 0, ip->i_disksize)) {
gfs2_consist_inode(ip);
- error = -EIO;
+ return -EIO;
}
- return error;
+ return 0;
}
/**
@@ -1191,7 +1188,7 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
* node for later deallocation.
*/
-static void gfs2_drop_inode(struct inode *inode)
+static int gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
@@ -1200,26 +1197,7 @@ static void gfs2_drop_inode(struct inode *inode)
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
}
- generic_drop_inode(inode);
-}
-
-/**
- * gfs2_clear_inode - Deallocate an inode when VFS is done with it
- * @inode: The VFS inode
- *
- */
-
-static void gfs2_clear_inode(struct inode *inode)
-{
- struct gfs2_inode *ip = GFS2_I(inode);
-
- ip->i_gl->gl_object = NULL;
- gfs2_glock_put(ip->i_gl);
- ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
+ return generic_drop_inode(inode);
}
static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
@@ -1347,13 +1325,16 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
* is safe, just less efficient.
*/
-static void gfs2_delete_inode(struct inode *inode)
+static void gfs2_evict_inode(struct inode *inode)
{
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
+ if (inode->i_nlink)
+ goto out;
+
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
@@ -1407,10 +1388,18 @@ out_unlock:
gfs2_holder_uninit(&ip->i_iopen_gh);
gfs2_glock_dq_uninit(&gh);
if (error && error != GLR_TRYFAILED && error != -EROFS)
- fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
+ fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
out:
truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ end_writeback(inode);
+
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ if (ip->i_iopen_gh.gh_gl) {
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+ }
}
static struct inode *gfs2_alloc_inode(struct super_block *sb)
@@ -1434,14 +1423,13 @@ const struct super_operations gfs2_super_ops = {
.alloc_inode = gfs2_alloc_inode,
.destroy_inode = gfs2_destroy_inode,
.write_inode = gfs2_write_inode,
- .delete_inode = gfs2_delete_inode,
+ .evict_inode = gfs2_evict_inode,
.put_super = gfs2_put_super,
.sync_fs = gfs2_sync_fs,
.freeze_fs = gfs2_freeze,
.unfreeze_fs = gfs2_unfreeze,
.statfs = gfs2_statfs,
.remount_fs = gfs2_remount_fs,
- .clear_inode = gfs2_clear_inode,
.drop_inode = gfs2_drop_inode,
.show_options = gfs2_show_options,
};
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 37f5393e68e..ccacffd2faa 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -25,6 +25,7 @@
#include "quota.h"
#include "util.h"
#include "glops.h"
+#include "recovery.h"
struct gfs2_attr {
struct attribute attr;
@@ -325,6 +326,30 @@ static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
return sprintf(buf, "%d\n", ls->ls_first);
}
+static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+{
+ unsigned first;
+ int rv;
+
+ rv = sscanf(buf, "%u", &first);
+ if (rv != 1 || first > 1)
+ return -EINVAL;
+ spin_lock(&sdp->sd_jindex_spin);
+ rv = -EBUSY;
+ if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
+ goto out;
+ rv = -EINVAL;
+ if (sdp->sd_args.ar_spectator)
+ goto out;
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ goto out;
+ sdp->sd_lockstruct.ls_first = first;
+ rv = 0;
+out:
+ spin_unlock(&sdp->sd_jindex_spin);
+ return rv ? rv : len;
+}
+
static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
{
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
@@ -352,7 +377,7 @@ static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
if (jd->jd_jid != jid)
continue;
- rv = slow_work_enqueue(&jd->jd_work);
+ rv = gfs2_recover_journal(jd, false);
break;
}
out:
@@ -377,14 +402,41 @@ static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
return sprintf(buf, "%u\n", sdp->sd_lockstruct.ls_jid);
}
+static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+{
+ unsigned jid;
+ int rv;
+
+ rv = sscanf(buf, "%u", &jid);
+ if (rv != 1)
+ return -EINVAL;
+
+ spin_lock(&sdp->sd_jindex_spin);
+ rv = -EINVAL;
+ if (sdp->sd_args.ar_spectator)
+ goto out;
+ if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
+ goto out;
+ rv = -EBUSY;
+ if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
+ goto out;
+ sdp->sd_lockstruct.ls_jid = jid;
+ smp_mb__after_clear_bit();
+ wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
+ rv = 0;
+out:
+ spin_unlock(&sdp->sd_jindex_spin);
+ return rv ? rv : len;
+}
+
#define GDLM_ATTR(_name,_mode,_show,_store) \
static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
GDLM_ATTR(block, 0644, block_show, block_store);
GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
-GDLM_ATTR(jid, 0444, jid_show, NULL);
-GDLM_ATTR(first, 0444, lkfirst_show, NULL);
+GDLM_ATTR(jid, 0644, jid_show, jid_store);
+GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
GDLM_ATTR(first_done, 0444, first_done_show, NULL);
GDLM_ATTR(recover, 0600, NULL, recover_store);
GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
@@ -564,7 +616,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
- if (!sdp->sd_args.ar_spectator)
+ if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
add_uevent_var(env, "JOURNALID=%u", sdp->sd_lockstruct.ls_jid);
if (gfs2_uuid_valid(uuid))
add_uevent_var(env, "UUID=%pUB", uuid);
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 82f93da00d1..776af6eb4bc 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1296,6 +1296,7 @@ fail:
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
{
+ struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_location el;
struct buffer_head *dibh;
@@ -1321,14 +1322,25 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
return error;
error = gfs2_meta_inode_buffer(ip, &dibh);
- if (!error) {
- error = inode_setattr(&ip->i_inode, attr);
- gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
- gfs2_trans_add_bh(ip->i_gl, dibh, 1);
- gfs2_dinode_out(ip, dibh->b_data);
- brelse(dibh);
+ if (error)
+ goto out_trans_end;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ int error;
+
+ error = vmtruncate(inode, attr->ia_size);
+ gfs2_assert_warn(GFS2_SB(inode), !error);
}
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
+ gfs2_trans_add_bh(ip->i_gl, dibh, 1);
+ gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
+
+out_trans_end:
gfs2_trans_end(sdp);
return error;
}
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index fe35e3b626c..4f55651aaa5 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -193,7 +193,7 @@ extern int hfs_inode_setattr(struct dentry *, struct iattr *);
extern void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
__be32 log_size, __be32 phys_size, u32 clump_size);
extern struct inode *hfs_iget(struct super_block *, struct hfs_cat_key *, hfs_cat_rec *);
-extern void hfs_clear_inode(struct inode *);
+extern void hfs_evict_inode(struct inode *);
extern void hfs_delete_inode(struct inode *);
/* attr.c */
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 14f5cb1b9fd..397b7adc7ce 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -39,10 +39,19 @@ static int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
@@ -112,9 +121,24 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+ ssize_t ret;
- return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, hfs_get_block, NULL);
+
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+
+ return ret;
}
static int hfs_writepages(struct address_space *mapping,
@@ -507,8 +531,10 @@ out:
return NULL;
}
-void hfs_clear_inode(struct inode *inode)
+void hfs_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFS_I(inode)->rsrc_inode);
@@ -588,13 +614,43 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
attr->ia_mode = inode->i_mode & ~S_IWUGO;
attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
}
- error = inode_setattr(inode, attr);
- if (error)
- return error;
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
return 0;
}
+static int hfs_file_fsync(struct file *filp, int datasync)
+{
+ struct inode *inode = filp->f_mapping->host;
+ struct super_block * sb;
+ int ret, err;
+
+ /* sync the inode to buffers */
+ ret = write_inode_now(inode, 0);
+
+ /* sync the superblock to buffers */
+ sb = inode->i_sb;
+ if (sb->s_dirt) {
+ lock_super(sb);
+ sb->s_dirt = 0;
+ if (!(sb->s_flags & MS_RDONLY))
+ hfs_mdb_commit(sb);
+ unlock_super(sb);
+ }
+ /* .. finally sync the buffers to disk */
+ err = sync_blockdev(sb->s_bdev);
+ if (!ret)
+ ret = err;
+ return ret;
+}
static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
@@ -604,7 +660,7 @@ static const struct file_operations hfs_file_operations = {
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
- .fsync = file_fsync,
+ .fsync = hfs_file_fsync,
.open = hfs_file_open,
.release = hfs_file_release,
};
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 0a81eb7111f..34235d4bf08 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -181,7 +181,7 @@ static const struct super_operations hfs_super_operations = {
.alloc_inode = hfs_alloc_inode,
.destroy_inode = hfs_destroy_inode,
.write_inode = hfs_write_inode,
- .clear_inode = hfs_clear_inode,
+ .evict_inode = hfs_evict_inode,
.put_super = hfs_put_super,
.write_super = hfs_write_super,
.sync_fs = hfs_sync_fs,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 6505c30ad96..dc856be3c2b 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -351,6 +351,7 @@ int hfsplus_show_options(struct seq_file *, struct vfsmount *);
/* super.c */
struct inode *hfsplus_iget(struct super_block *, unsigned long);
+int hfsplus_sync_fs(struct super_block *sb, int wait);
/* tables.c */
extern u16 hfsplus_case_fold_table[];
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 9bbb82924a2..c5a979d62c6 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -31,10 +31,19 @@ static int hfsplus_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hfsplus_get_block,
&HFSPLUS_I(mapping->host).phys_size);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
@@ -105,9 +114,24 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
+ ssize_t ret;
- return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, hfsplus_get_block, NULL);
+
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+
+ return ret;
}
static int hfsplus_writepages(struct address_space *mapping,
@@ -266,9 +290,56 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
return 0;
}
+static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+static int hfsplus_file_fsync(struct file *filp, int datasync)
+{
+ struct inode *inode = filp->f_mapping->host;
+ struct super_block * sb;
+ int ret, err;
+
+ /* sync the inode to buffers */
+ ret = write_inode_now(inode, 0);
+
+ /* sync the superblock to buffers */
+ sb = inode->i_sb;
+ if (sb->s_dirt) {
+ if (!(sb->s_flags & MS_RDONLY))
+ hfsplus_sync_fs(sb, 1);
+ else
+ sb->s_dirt = 0;
+ }
+
+ /* .. finally sync the buffers to disk */
+ err = sync_blockdev(sb->s_bdev);
+ if (!ret)
+ ret = err;
+ return ret;
+}
+
static const struct inode_operations hfsplus_file_inode_operations = {
.lookup = hfsplus_file_lookup,
.truncate = hfsplus_file_truncate,
+ .setattr = hfsplus_setattr,
.setxattr = hfsplus_setxattr,
.getxattr = hfsplus_getxattr,
.listxattr = hfsplus_listxattr,
@@ -282,7 +353,7 @@ static const struct file_operations hfsplus_file_operations = {
.aio_write = generic_file_aio_write,
.mmap = generic_file_mmap,
.splice_read = generic_file_splice_read,
- .fsync = file_fsync,
+ .fsync = hfsplus_file_fsync,
.open = hfsplus_file_open,
.release = hfsplus_file_release,
.unlocked_ioctl = hfsplus_ioctl,
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 74b473a8ef9..3b55c050c74 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -145,16 +145,18 @@ static int hfsplus_write_inode(struct inode *inode,
return ret;
}
-static void hfsplus_clear_inode(struct inode *inode)
+static void hfsplus_evict_inode(struct inode *inode)
{
- dprint(DBG_INODE, "hfsplus_clear_inode: %lu\n", inode->i_ino);
+ dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
if (HFSPLUS_IS_RSRC(inode)) {
HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL;
iput(HFSPLUS_I(inode).rsrc_inode);
}
}
-static int hfsplus_sync_fs(struct super_block *sb, int wait)
+int hfsplus_sync_fs(struct super_block *sb, int wait)
{
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb).s_vhdr;
@@ -293,7 +295,7 @@ static const struct super_operations hfsplus_sops = {
.alloc_inode = hfsplus_alloc_inode,
.destroy_inode = hfsplus_destroy_inode,
.write_inode = hfsplus_write_inode,
- .clear_inode = hfsplus_clear_inode,
+ .evict_inode = hfsplus_evict_inode,
.put_super = hfsplus_put_super,
.write_super = hfsplus_write_super,
.sync_fs = hfsplus_sync_fs,
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 2f34f8f2134..6bbd75c5589 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -53,18 +53,28 @@ struct hostfs_iattr {
struct timespec ia_ctime;
};
-extern int stat_file(const char *path, unsigned long long *inode_out,
- int *mode_out, int *nlink_out, int *uid_out, int *gid_out,
- unsigned long long *size_out, struct timespec *atime_out,
- struct timespec *mtime_out, struct timespec *ctime_out,
- int *blksize_out, unsigned long long *blocks_out, int fd);
+struct hostfs_stat {
+ unsigned long long ino;
+ unsigned int mode;
+ unsigned int nlink;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned long long size;
+ struct timespec atime, mtime, ctime;
+ unsigned int blksize;
+ unsigned long long blocks;
+ unsigned int maj;
+ unsigned int min;
+};
+
+extern int stat_file(const char *path, struct hostfs_stat *p, int fd);
extern int access_file(char *path, int r, int w, int x);
extern int open_file(char *path, int r, int w, int append);
-extern int file_type(const char *path, int *maj, int *min);
extern void *open_dir(char *path, int *err_out);
extern char *read_dir(void *stream, unsigned long long *pos,
unsigned long long *ino_out, int *len_out);
extern void close_file(void *stream);
+extern int replace_file(int oldfd, int fd);
extern void close_dir(void *stream);
extern int read_file(int fd, unsigned long long *offset, char *buf, int len);
extern int write_file(int fd, unsigned long long *offset, const char *buf,
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 87ac1891a18..dd1e55535a4 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -14,12 +14,12 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
+#include <linux/namei.h>
#include "hostfs.h"
#include "init.h"
#include "kern.h"
struct hostfs_inode_info {
- char *host_filename;
int fd;
fmode_t mode;
struct inode vfs_inode;
@@ -49,7 +49,7 @@ static int append = 0;
static const struct inode_operations hostfs_iops;
static const struct inode_operations hostfs_dir_iops;
-static const struct address_space_operations hostfs_link_aops;
+static const struct inode_operations hostfs_link_iops;
#ifndef MODULE
static int __init hostfs_args(char *options, int *add)
@@ -90,71 +90,58 @@ __uml_setup("hostfs=", hostfs_args,
);
#endif
-static char *dentry_name(struct dentry *dentry, int extra)
+static char *__dentry_name(struct dentry *dentry, char *name)
{
- struct dentry *parent;
- char *root, *name;
- int len;
-
- len = 0;
- parent = dentry;
- while (parent->d_parent != parent) {
- len += parent->d_name.len + 1;
- parent = parent->d_parent;
- }
+ char *p = __dentry_path(dentry, name, PATH_MAX);
+ char *root;
+ size_t len;
- root = HOSTFS_I(parent->d_inode)->host_filename;
- len += strlen(root);
- name = kmalloc(len + extra + 1, GFP_KERNEL);
- if (name == NULL)
- return NULL;
+ spin_unlock(&dcache_lock);
- name[len] = '\0';
- parent = dentry;
- while (parent->d_parent != parent) {
- len -= parent->d_name.len + 1;
- name[len] = '/';
- strncpy(&name[len + 1], parent->d_name.name,
- parent->d_name.len);
- parent = parent->d_parent;
+ root = dentry->d_sb->s_fs_info;
+ len = strlen(root);
+ if (IS_ERR(p)) {
+ __putname(name);
+ return NULL;
+ }
+ strncpy(name, root, PATH_MAX);
+ if (len > p - name) {
+ __putname(name);
+ return NULL;
+ }
+ if (p > name + len) {
+ char *s = name + len;
+ while ((*s++ = *p++) != '\0')
+ ;
}
- strncpy(name, root, strlen(root));
return name;
}
-static char *inode_name(struct inode *ino, int extra)
+static char *dentry_name(struct dentry *dentry)
{
- struct dentry *dentry;
+ char *name = __getname();
+ if (!name)
+ return NULL;
- dentry = list_entry(ino->i_dentry.next, struct dentry, d_alias);
- return dentry_name(dentry, extra);
+ spin_lock(&dcache_lock);
+ return __dentry_name(dentry, name); /* will unlock */
}
-static int read_name(struct inode *ino, char *name)
+static char *inode_name(struct inode *ino)
{
- /*
- * The non-int inode fields are copied into ints by stat_file and
- * then copied into the inode because passing the actual pointers
- * in and having them treated as int * breaks on big-endian machines
- */
- int err;
- int i_mode, i_nlink, i_blksize;
- unsigned long long i_size;
- unsigned long long i_ino;
- unsigned long long i_blocks;
-
- err = stat_file(name, &i_ino, &i_mode, &i_nlink, &ino->i_uid,
- &ino->i_gid, &i_size, &ino->i_atime, &ino->i_mtime,
- &ino->i_ctime, &i_blksize, &i_blocks, -1);
- if (err)
- return err;
+ struct dentry *dentry;
+ char *name = __getname();
+ if (!name)
+ return NULL;
- ino->i_ino = i_ino;
- ino->i_mode = i_mode;
- ino->i_nlink = i_nlink;
- ino->i_size = i_size;
- ino->i_blocks = i_blocks;
- return 0;
+ spin_lock(&dcache_lock);
+ if (list_empty(&ino->i_dentry)) {
+ spin_unlock(&dcache_lock);
+ __putname(name);
+ return NULL;
+ }
+ dentry = list_first_entry(&ino->i_dentry, struct dentry, d_alias);
+ return __dentry_name(dentry, name); /* will unlock */
}
static char *follow_link(char *link)
@@ -205,53 +192,11 @@ static char *follow_link(char *link)
return ERR_PTR(n);
}
-static int hostfs_read_inode(struct inode *ino)
-{
- char *name;
- int err = 0;
-
- /*
- * Unfortunately, we are called from iget() when we don't have a dentry
- * allocated yet.
- */
- if (list_empty(&ino->i_dentry))
- goto out;
-
- err = -ENOMEM;
- name = inode_name(ino, 0);
- if (name == NULL)
- goto out;
-
- if (file_type(name, NULL, NULL) == OS_TYPE_SYMLINK) {
- name = follow_link(name);
- if (IS_ERR(name)) {
- err = PTR_ERR(name);
- goto out;
- }
- }
-
- err = read_name(ino, name);
- kfree(name);
- out:
- return err;
-}
-
static struct inode *hostfs_iget(struct super_block *sb)
{
- struct inode *inode;
- long ret;
-
- inode = iget_locked(sb, 0);
+ struct inode *inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
- if (inode->i_state & I_NEW) {
- ret = hostfs_read_inode(inode);
- if (ret < 0) {
- iget_failed(inode);
- return ERR_PTR(ret);
- }
- unlock_new_inode(inode);
- }
return inode;
}
@@ -269,7 +214,7 @@ int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
long long f_files;
long long f_ffree;
- err = do_statfs(HOSTFS_I(dentry->d_sb->s_root->d_inode)->host_filename,
+ err = do_statfs(dentry->d_sb->s_fs_info,
&sf->f_bsize, &f_blocks, &f_bfree, &f_bavail, &f_files,
&f_ffree, &sf->f_fsid, sizeof(sf->f_fsid),
&sf->f_namelen, sf->f_spare);
@@ -288,47 +233,32 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
{
struct hostfs_inode_info *hi;
- hi = kmalloc(sizeof(*hi), GFP_KERNEL);
+ hi = kzalloc(sizeof(*hi), GFP_KERNEL);
if (hi == NULL)
return NULL;
-
- *hi = ((struct hostfs_inode_info) { .host_filename = NULL,
- .fd = -1,
- .mode = 0 });
+ hi->fd = -1;
inode_init_once(&hi->vfs_inode);
return &hi->vfs_inode;
}
-static void hostfs_delete_inode(struct inode *inode)
+static void hostfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
if (HOSTFS_I(inode)->fd != -1) {
close_file(&HOSTFS_I(inode)->fd);
HOSTFS_I(inode)->fd = -1;
}
- clear_inode(inode);
}
static void hostfs_destroy_inode(struct inode *inode)
{
- kfree(HOSTFS_I(inode)->host_filename);
-
- /*
- * XXX: This should not happen, probably. The check is here for
- * additional safety.
- */
- if (HOSTFS_I(inode)->fd != -1) {
- close_file(&HOSTFS_I(inode)->fd);
- printk(KERN_DEBUG "Closing host fd in .destroy_inode\n");
- }
-
kfree(HOSTFS_I(inode));
}
static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
{
- struct inode *root = vfs->mnt_sb->s_root->d_inode;
- const char *root_path = HOSTFS_I(root)->host_filename;
+ const char *root_path = vfs->mnt_sb->s_fs_info;
size_t offset = strlen(root_ino) + 1;
if (strlen(root_path) > offset)
@@ -339,9 +269,8 @@ static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
static const struct super_operations hostfs_sbops = {
.alloc_inode = hostfs_alloc_inode,
- .drop_inode = generic_delete_inode,
- .delete_inode = hostfs_delete_inode,
.destroy_inode = hostfs_destroy_inode,
+ .evict_inode = hostfs_evict_inode,
.statfs = hostfs_statfs,
.show_options = hostfs_show_options,
};
@@ -353,11 +282,11 @@ int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
unsigned long long next, ino;
int error, len;
- name = dentry_name(file->f_path.dentry, 0);
+ name = dentry_name(file->f_path.dentry);
if (name == NULL)
return -ENOMEM;
dir = open_dir(name, &error);
- kfree(name);
+ __putname(name);
if (dir == NULL)
return -error;
next = file->f_pos;
@@ -373,40 +302,59 @@ int hostfs_readdir(struct file *file, void *ent, filldir_t filldir)
int hostfs_file_open(struct inode *ino, struct file *file)
{
+ static DEFINE_MUTEX(open_mutex);
char *name;
fmode_t mode = 0;
+ int err;
int r = 0, w = 0, fd;
mode = file->f_mode & (FMODE_READ | FMODE_WRITE);
if ((mode & HOSTFS_I(ino)->mode) == mode)
return 0;
- /*
- * The file may already have been opened, but with the wrong access,
- * so this resets things and reopens the file with the new access.
- */
- if (HOSTFS_I(ino)->fd != -1) {
- close_file(&HOSTFS_I(ino)->fd);
- HOSTFS_I(ino)->fd = -1;
- }
+ mode |= HOSTFS_I(ino)->mode;
- HOSTFS_I(ino)->mode |= mode;
- if (HOSTFS_I(ino)->mode & FMODE_READ)
+retry:
+ if (mode & FMODE_READ)
r = 1;
- if (HOSTFS_I(ino)->mode & FMODE_WRITE)
+ if (mode & FMODE_WRITE)
w = 1;
if (w)
r = 1;
- name = dentry_name(file->f_path.dentry, 0);
+ name = dentry_name(file->f_path.dentry);
if (name == NULL)
return -ENOMEM;
fd = open_file(name, r, w, append);
- kfree(name);
+ __putname(name);
if (fd < 0)
return fd;
- FILE_HOSTFS_I(file)->fd = fd;
+
+ mutex_lock(&open_mutex);
+ /* somebody else had handled it first? */
+ if ((mode & HOSTFS_I(ino)->mode) == mode) {
+ mutex_unlock(&open_mutex);
+ return 0;
+ }
+ if ((mode | HOSTFS_I(ino)->mode) != mode) {
+ mode |= HOSTFS_I(ino)->mode;
+ mutex_unlock(&open_mutex);
+ close_file(&fd);
+ goto retry;
+ }
+ if (HOSTFS_I(ino)->fd == -1) {
+ HOSTFS_I(ino)->fd = fd;
+ } else {
+ err = replace_file(fd, HOSTFS_I(ino)->fd);
+ close_file(&fd);
+ if (err < 0) {
+ mutex_unlock(&open_mutex);
+ return err;
+ }
+ }
+ HOSTFS_I(ino)->mode = mode;
+ mutex_unlock(&open_mutex);
return 0;
}
@@ -544,54 +492,50 @@ static const struct address_space_operations hostfs_aops = {
.write_end = hostfs_write_end,
};
-static int init_inode(struct inode *inode, struct dentry *dentry)
+static int read_name(struct inode *ino, char *name)
{
- char *name;
- int type, err = -ENOMEM;
- int maj, min;
- dev_t rdev = 0;
+ dev_t rdev;
+ struct hostfs_stat st;
+ int err = stat_file(name, &st, -1);
+ if (err)
+ return err;
- if (dentry) {
- name = dentry_name(dentry, 0);
- if (name == NULL)
- goto out;
- type = file_type(name, &maj, &min);
- /* Reencode maj and min with the kernel encoding.*/
- rdev = MKDEV(maj, min);
- kfree(name);
- }
- else type = OS_TYPE_DIR;
+ /* Reencode maj and min with the kernel encoding.*/
+ rdev = MKDEV(st.maj, st.min);
- err = 0;
- if (type == OS_TYPE_SYMLINK)
- inode->i_op = &page_symlink_inode_operations;
- else if (type == OS_TYPE_DIR)
- inode->i_op = &hostfs_dir_iops;
- else inode->i_op = &hostfs_iops;
-
- if (type == OS_TYPE_DIR) inode->i_fop = &hostfs_dir_fops;
- else inode->i_fop = &hostfs_file_fops;
-
- if (type == OS_TYPE_SYMLINK)
- inode->i_mapping->a_ops = &hostfs_link_aops;
- else inode->i_mapping->a_ops = &hostfs_aops;
-
- switch (type) {
- case OS_TYPE_CHARDEV:
- init_special_inode(inode, S_IFCHR, rdev);
+ switch (st.mode & S_IFMT) {
+ case S_IFLNK:
+ ino->i_op = &hostfs_link_iops;
break;
- case OS_TYPE_BLOCKDEV:
- init_special_inode(inode, S_IFBLK, rdev);
+ case S_IFDIR:
+ ino->i_op = &hostfs_dir_iops;
+ ino->i_fop = &hostfs_dir_fops;
break;
- case OS_TYPE_FIFO:
- init_special_inode(inode, S_IFIFO, 0);
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ init_special_inode(ino, st.mode & S_IFMT, rdev);
+ ino->i_op = &hostfs_iops;
break;
- case OS_TYPE_SOCK:
- init_special_inode(inode, S_IFSOCK, 0);
- break;
- }
- out:
- return err;
+
+ default:
+ ino->i_op = &hostfs_iops;
+ ino->i_fop = &hostfs_file_fops;
+ ino->i_mapping->a_ops = &hostfs_aops;
+ }
+
+ ino->i_ino = st.ino;
+ ino->i_mode = st.mode;
+ ino->i_nlink = st.nlink;
+ ino->i_uid = st.uid;
+ ino->i_gid = st.gid;
+ ino->i_atime = st.atime;
+ ino->i_mtime = st.mtime;
+ ino->i_ctime = st.ctime;
+ ino->i_size = st.size;
+ ino->i_blocks = st.blocks;
+ return 0;
}
int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
@@ -607,12 +551,8 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
goto out;
}
- error = init_inode(inode, dentry);
- if (error)
- goto out_put;
-
error = -ENOMEM;
- name = dentry_name(dentry, 0);
+ name = dentry_name(dentry);
if (name == NULL)
goto out_put;
@@ -622,9 +562,10 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
mode & S_IROTH, mode & S_IWOTH, mode & S_IXOTH);
if (fd < 0)
error = fd;
- else error = read_name(inode, name);
+ else
+ error = read_name(inode, name);
- kfree(name);
+ __putname(name);
if (error)
goto out_put;
@@ -652,17 +593,14 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
goto out;
}
- err = init_inode(inode, dentry);
- if (err)
- goto out_put;
-
err = -ENOMEM;
- name = dentry_name(dentry, 0);
+ name = dentry_name(dentry);
if (name == NULL)
goto out_put;
err = read_name(inode, name);
- kfree(name);
+
+ __putname(name);
if (err == -ENOENT) {
iput(inode);
inode = NULL;
@@ -680,36 +618,21 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
return ERR_PTR(err);
}
-static char *inode_dentry_name(struct inode *ino, struct dentry *dentry)
-{
- char *file;
- int len;
-
- file = inode_name(ino, dentry->d_name.len + 1);
- if (file == NULL)
- return NULL;
- strcat(file, "/");
- len = strlen(file);
- strncat(file, dentry->d_name.name, dentry->d_name.len);
- file[len + dentry->d_name.len] = '\0';
- return file;
-}
-
int hostfs_link(struct dentry *to, struct inode *ino, struct dentry *from)
{
char *from_name, *to_name;
int err;
- if ((from_name = inode_dentry_name(ino, from)) == NULL)
+ if ((from_name = dentry_name(from)) == NULL)
return -ENOMEM;
- to_name = dentry_name(to, 0);
+ to_name = dentry_name(to);
if (to_name == NULL) {
- kfree(from_name);
+ __putname(from_name);
return -ENOMEM;
}
err = link_file(to_name, from_name);
- kfree(from_name);
- kfree(to_name);
+ __putname(from_name);
+ __putname(to_name);
return err;
}
@@ -718,13 +641,14 @@ int hostfs_unlink(struct inode *ino, struct dentry *dentry)
char *file;
int err;
- if ((file = inode_dentry_name(ino, dentry)) == NULL)
- return -ENOMEM;
if (append)
return -EPERM;
+ if ((file = dentry_name(dentry)) == NULL)
+ return -ENOMEM;
+
err = unlink_file(file);
- kfree(file);
+ __putname(file);
return err;
}
@@ -733,10 +657,10 @@ int hostfs_symlink(struct inode *ino, struct dentry *dentry, const char *to)
char *file;
int err;
- if ((file = inode_dentry_name(ino, dentry)) == NULL)
+ if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = make_symlink(file, to);
- kfree(file);
+ __putname(file);
return err;
}
@@ -745,10 +669,10 @@ int hostfs_mkdir(struct inode *ino, struct dentry *dentry, int mode)
char *file;
int err;
- if ((file = inode_dentry_name(ino, dentry)) == NULL)
+ if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = do_mkdir(file, mode);
- kfree(file);
+ __putname(file);
return err;
}
@@ -757,10 +681,10 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
char *file;
int err;
- if ((file = inode_dentry_name(ino, dentry)) == NULL)
+ if ((file = dentry_name(dentry)) == NULL)
return -ENOMEM;
err = do_rmdir(file);
- kfree(file);
+ __putname(file);
return err;
}
@@ -776,22 +700,20 @@ int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
goto out;
}
- err = init_inode(inode, dentry);
- if (err)
- goto out_put;
-
err = -ENOMEM;
- name = dentry_name(dentry, 0);
+ name = dentry_name(dentry);
if (name == NULL)
goto out_put;
init_special_inode(inode, mode, dev);
err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
- if (err)
+ if (!err)
goto out_free;
err = read_name(inode, name);
- kfree(name);
+ __putname(name);
+ if (err)
+ goto out_put;
if (err)
goto out_put;
@@ -799,7 +721,7 @@ int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
return 0;
out_free:
- kfree(name);
+ __putname(name);
out_put:
iput(inode);
out:
@@ -812,15 +734,15 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
char *from_name, *to_name;
int err;
- if ((from_name = inode_dentry_name(from_ino, from)) == NULL)
+ if ((from_name = dentry_name(from)) == NULL)
return -ENOMEM;
- if ((to_name = inode_dentry_name(to_ino, to)) == NULL) {
- kfree(from_name);
+ if ((to_name = dentry_name(to)) == NULL) {
+ __putname(from_name);
return -ENOMEM;
}
err = rename_file(from_name, to_name);
- kfree(from_name);
- kfree(to_name);
+ __putname(from_name);
+ __putname(to_name);
return err;
}
@@ -832,7 +754,7 @@ int hostfs_permission(struct inode *ino, int desired)
if (desired & MAY_READ) r = 1;
if (desired & MAY_WRITE) w = 1;
if (desired & MAY_EXEC) x = 1;
- name = inode_name(ino, 0);
+ name = inode_name(ino);
if (name == NULL)
return -ENOMEM;
@@ -841,7 +763,7 @@ int hostfs_permission(struct inode *ino, int desired)
err = 0;
else
err = access_file(name, r, w, x);
- kfree(name);
+ __putname(name);
if (!err)
err = generic_permission(ino, desired, NULL);
return err;
@@ -849,13 +771,14 @@ int hostfs_permission(struct inode *ino, int desired)
int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
{
+ struct inode *inode = dentry->d_inode;
struct hostfs_iattr attrs;
char *name;
int err;
- int fd = HOSTFS_I(dentry->d_inode)->fd;
+ int fd = HOSTFS_I(inode)->fd;
- err = inode_change_ok(dentry->d_inode, attr);
+ err = inode_change_ok(inode, attr);
if (err)
return err;
@@ -897,15 +820,26 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_MTIME_SET) {
attrs.ia_valid |= HOSTFS_ATTR_MTIME_SET;
}
- name = dentry_name(dentry, 0);
+ name = dentry_name(dentry);
if (name == NULL)
return -ENOMEM;
err = set_attr(name, &attrs, fd);
- kfree(name);
+ __putname(name);
if (err)
return err;
- return inode_setattr(dentry->d_inode, attr);
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ int error;
+
+ error = vmtruncate(inode, attr->ia_size);
+ if (err)
+ return err;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static const struct inode_operations hostfs_iops = {
@@ -935,32 +869,41 @@ static const struct inode_operations hostfs_dir_iops = {
.setattr = hostfs_setattr,
};
-int hostfs_link_readpage(struct file *file, struct page *page)
-{
- char *buffer, *name;
- int err;
-
- buffer = kmap(page);
- name = inode_name(page->mapping->host, 0);
- if (name == NULL)
- return -ENOMEM;
- err = hostfs_do_readlink(name, buffer, PAGE_CACHE_SIZE);
- kfree(name);
- if (err == PAGE_CACHE_SIZE)
- err = -E2BIG;
- else if (err > 0) {
- flush_dcache_page(page);
- SetPageUptodate(page);
- if (PageError(page)) ClearPageError(page);
- err = 0;
+static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ char *link = __getname();
+ if (link) {
+ char *path = dentry_name(dentry);
+ int err = -ENOMEM;
+ if (path) {
+ int err = hostfs_do_readlink(path, link, PATH_MAX);
+ if (err == PATH_MAX)
+ err = -E2BIG;
+ __putname(path);
+ }
+ if (err < 0) {
+ __putname(link);
+ link = ERR_PTR(err);
+ }
+ } else {
+ link = ERR_PTR(-ENOMEM);
}
- kunmap(page);
- unlock_page(page);
- return err;
+
+ nd_set_link(nd, link);
+ return NULL;
}
-static const struct address_space_operations hostfs_link_aops = {
- .readpage = hostfs_link_readpage,
+static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
+}
+
+static const struct inode_operations hostfs_link_iops = {
+ .readlink = generic_readlink,
+ .follow_link = hostfs_follow_link,
+ .put_link = hostfs_put_link,
};
static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
@@ -980,49 +923,41 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
req_root = "";
err = -ENOMEM;
- host_root_path = kmalloc(strlen(root_ino) + 1
- + strlen(req_root) + 1, GFP_KERNEL);
+ sb->s_fs_info = host_root_path =
+ kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL);
if (host_root_path == NULL)
goto out;
sprintf(host_root_path, "%s/%s", root_ino, req_root);
- root_inode = hostfs_iget(sb);
- if (IS_ERR(root_inode)) {
- err = PTR_ERR(root_inode);
- goto out_free;
- }
+ root_inode = new_inode(sb);
+ if (!root_inode)
+ goto out;
- err = init_inode(root_inode, NULL);
+ err = read_name(root_inode, host_root_path);
if (err)
goto out_put;
- HOSTFS_I(root_inode)->host_filename = host_root_path;
- /*
- * Avoid that in the error path, iput(root_inode) frees again
- * host_root_path through hostfs_destroy_inode!
- */
- host_root_path = NULL;
+ if (S_ISLNK(root_inode->i_mode)) {
+ char *name = follow_link(host_root_path);
+ if (IS_ERR(name))
+ err = PTR_ERR(name);
+ else
+ err = read_name(root_inode, name);
+ kfree(name);
+ if (err)
+ goto out_put;
+ }
err = -ENOMEM;
sb->s_root = d_alloc_root(root_inode);
if (sb->s_root == NULL)
goto out_put;
- err = hostfs_read_inode(root_inode);
- if (err) {
- /* No iput in this case because the dput does that for us */
- dput(sb->s_root);
- sb->s_root = NULL;
- goto out;
- }
-
return 0;
out_put:
iput(root_inode);
-out_free:
- kfree(host_root_path);
out:
return err;
}
@@ -1034,11 +969,17 @@ static int hostfs_read_sb(struct file_system_type *type,
return get_sb_nodev(type, flags, data, hostfs_fill_sb_common, mnt);
}
+static void hostfs_kill_sb(struct super_block *s)
+{
+ kill_anon_super(s);
+ kfree(s->s_fs_info);
+}
+
static struct file_system_type hostfs_type = {
.owner = THIS_MODULE,
.name = "hostfs",
.get_sb = hostfs_read_sb,
- .kill_sb = kill_anon_super,
+ .kill_sb = hostfs_kill_sb,
.fs_flags = 0,
};
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index b79424f9328..6777aa06ce2 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -19,11 +19,27 @@
#include "user.h"
#include <utime.h>
-int stat_file(const char *path, unsigned long long *inode_out, int *mode_out,
- int *nlink_out, int *uid_out, int *gid_out,
- unsigned long long *size_out, struct timespec *atime_out,
- struct timespec *mtime_out, struct timespec *ctime_out,
- int *blksize_out, unsigned long long *blocks_out, int fd)
+static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+{
+ p->ino = buf->st_ino;
+ p->mode = buf->st_mode;
+ p->nlink = buf->st_nlink;
+ p->uid = buf->st_uid;
+ p->gid = buf->st_gid;
+ p->size = buf->st_size;
+ p->atime.tv_sec = buf->st_atime;
+ p->atime.tv_nsec = 0;
+ p->ctime.tv_sec = buf->st_ctime;
+ p->ctime.tv_nsec = 0;
+ p->mtime.tv_sec = buf->st_mtime;
+ p->mtime.tv_nsec = 0;
+ p->blksize = buf->st_blksize;
+ p->blocks = buf->st_blocks;
+ p->maj = os_major(buf->st_rdev);
+ p->min = os_minor(buf->st_rdev);
+}
+
+int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
struct stat64 buf;
@@ -33,68 +49,10 @@ int stat_file(const char *path, unsigned long long *inode_out, int *mode_out,
} else if (lstat64(path, &buf) < 0) {
return -errno;
}
-
- if (inode_out != NULL)
- *inode_out = buf.st_ino;
- if (mode_out != NULL)
- *mode_out = buf.st_mode;
- if (nlink_out != NULL)
- *nlink_out = buf.st_nlink;
- if (uid_out != NULL)
- *uid_out = buf.st_uid;
- if (gid_out != NULL)
- *gid_out = buf.st_gid;
- if (size_out != NULL)
- *size_out = buf.st_size;
- if (atime_out != NULL) {
- atime_out->tv_sec = buf.st_atime;
- atime_out->tv_nsec = 0;
- }
- if (mtime_out != NULL) {
- mtime_out->tv_sec = buf.st_mtime;
- mtime_out->tv_nsec = 0;
- }
- if (ctime_out != NULL) {
- ctime_out->tv_sec = buf.st_ctime;
- ctime_out->tv_nsec = 0;
- }
- if (blksize_out != NULL)
- *blksize_out = buf.st_blksize;
- if (blocks_out != NULL)
- *blocks_out = buf.st_blocks;
+ stat64_to_hostfs(&buf, p);
return 0;
}
-int file_type(const char *path, int *maj, int *min)
-{
- struct stat64 buf;
-
- if (lstat64(path, &buf) < 0)
- return -errno;
- /*
- * We cannot pass rdev as is because glibc and the kernel disagree
- * about its definition.
- */
- if (maj != NULL)
- *maj = major(buf.st_rdev);
- if (min != NULL)
- *min = minor(buf.st_rdev);
-
- if (S_ISDIR(buf.st_mode))
- return OS_TYPE_DIR;
- else if (S_ISLNK(buf.st_mode))
- return OS_TYPE_SYMLINK;
- else if (S_ISCHR(buf.st_mode))
- return OS_TYPE_CHARDEV;
- else if (S_ISBLK(buf.st_mode))
- return OS_TYPE_BLOCKDEV;
- else if (S_ISFIFO(buf.st_mode))
- return OS_TYPE_FIFO;
- else if (S_ISSOCK(buf.st_mode))
- return OS_TYPE_SOCK;
- else return OS_TYPE_FILE;
-}
-
int access_file(char *path, int r, int w, int x)
{
int mode = 0;
@@ -202,6 +160,11 @@ int fsync_file(int fd, int datasync)
return 0;
}
+int replace_file(int oldfd, int fd)
+{
+ return dup2(oldfd, fd);
+}
+
void close_file(void *stream)
{
close(*((int *) stream));
@@ -235,8 +198,8 @@ int file_create(char *name, int ur, int uw, int ux, int gr,
int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
{
+ struct hostfs_stat st;
struct timeval times[2];
- struct timespec atime_ts, mtime_ts;
int err, ma;
if (attrs->ia_valid & HOSTFS_ATTR_MODE) {
@@ -279,15 +242,14 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
*/
ma = (HOSTFS_ATTR_ATIME_SET | HOSTFS_ATTR_MTIME_SET);
if (attrs->ia_valid & ma) {
- err = stat_file(file, NULL, NULL, NULL, NULL, NULL, NULL,
- &atime_ts, &mtime_ts, NULL, NULL, NULL, fd);
+ err = stat_file(file, &st, fd);
if (err != 0)
return err;
- times[0].tv_sec = atime_ts.tv_sec;
- times[0].tv_usec = atime_ts.tv_nsec / 1000;
- times[1].tv_sec = mtime_ts.tv_sec;
- times[1].tv_usec = mtime_ts.tv_nsec / 1000;
+ times[0].tv_sec = st.atime.tv_sec;
+ times[0].tv_usec = st.atime.tv_nsec / 1000;
+ times[1].tv_sec = st.mtime.tv_sec;
+ times[1].tv_usec = st.mtime.tv_nsec / 1000;
if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) {
times[0].tv_sec = attrs->ia_atime.tv_sec;
@@ -308,9 +270,9 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
/* Note: ctime is not handled */
if (attrs->ia_valid & (HOSTFS_ATTR_ATIME | HOSTFS_ATTR_MTIME)) {
- err = stat_file(file, NULL, NULL, NULL, NULL, NULL, NULL,
- &attrs->ia_atime, &attrs->ia_mtime, NULL,
- NULL, NULL, fd);
+ err = stat_file(file, &st, fd);
+ attrs->ia_atime = st.atime;
+ attrs->ia_mtime = st.mtime;
if (err != 0)
return err;
}
@@ -361,7 +323,7 @@ int do_mknod(const char *file, int mode, unsigned int major, unsigned int minor)
{
int err;
- err = mknod(file, mode, makedev(major, minor));
+ err = mknod(file, mode, os_makedev(major, minor));
if (err)
return -errno;
return 0;
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index a9ae9bfa752..c0340887c7e 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -97,10 +97,19 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
hpfs_get_block,
&hpfs_i(mapping->host)->mmu_private);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 75f9d432485..b59eac0232a 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -281,7 +281,7 @@ void hpfs_write_inode(struct inode *);
void hpfs_write_inode_nolock(struct inode *);
int hpfs_setattr(struct dentry *, struct iattr *);
void hpfs_write_if_changed(struct inode *);
-void hpfs_delete_inode(struct inode *);
+void hpfs_evict_inode(struct inode *);
/* map.c */
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 1042a9bc97f..56f0da1cfd1 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -277,9 +277,15 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
if (error)
goto out_unlock;
- error = inode_setattr(inode, attr);
- if (error)
- goto out_unlock;
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
hpfs_write_inode(inode);
@@ -296,11 +302,13 @@ void hpfs_write_if_changed(struct inode *inode)
hpfs_write_inode(inode);
}
-void hpfs_delete_inode(struct inode *inode)
+void hpfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- lock_kernel();
- hpfs_remove_fnode(inode->i_sb, inode->i_ino);
- unlock_kernel();
- clear_inode(inode);
+ end_writeback(inode);
+ if (!inode->i_nlink) {
+ lock_kernel();
+ hpfs_remove_fnode(inode->i_sb, inode->i_ino);
+ unlock_kernel();
+ }
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index aa53842c599..2607010be2f 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -450,7 +450,7 @@ static const struct super_operations hpfs_sops =
{
.alloc_inode = hpfs_alloc_inode,
.destroy_inode = hpfs_destroy_inode,
- .delete_inode = hpfs_delete_inode,
+ .evict_inode = hpfs_evict_inode,
.put_super = hpfs_put_super,
.statfs = hpfs_statfs,
.remount_fs = hpfs_remount_fs,
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 826c3f9d29a..7b027720d82 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/types.h>
+#include <linux/pid_namespace.h>
#include <asm/uaccess.h>
#include "os.h"
@@ -623,12 +624,11 @@ static struct inode *hppfs_alloc_inode(struct super_block *sb)
return &hi->vfs_inode;
}
-void hppfs_delete_inode(struct inode *ino)
+void hppfs_evict_inode(struct inode *ino)
{
+ end_writeback(ino);
dput(HPPFS_I(ino)->proc_dentry);
mntput(ino->i_sb->s_fs_info);
-
- clear_inode(ino);
}
static void hppfs_destroy_inode(struct inode *inode)
@@ -639,7 +639,7 @@ static void hppfs_destroy_inode(struct inode *inode)
static const struct super_operations hppfs_sbops = {
.alloc_inode = hppfs_alloc_inode,
.destroy_inode = hppfs_destroy_inode,
- .delete_inode = hppfs_delete_inode,
+ .evict_inode = hppfs_evict_inode,
.statfs = hppfs_statfs,
};
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a4e9a7ec369..6e5bd42f386 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -371,27 +371,10 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
hugetlb_unreserve_pages(inode, start, freed);
}
-static void hugetlbfs_delete_inode(struct inode *inode)
+static void hugetlbfs_evict_inode(struct inode *inode)
{
truncate_hugepages(inode, 0);
- clear_inode(inode);
-}
-
-static void hugetlbfs_forget_inode(struct inode *inode) __releases(inode_lock)
-{
- if (generic_detach_inode(inode)) {
- truncate_hugepages(inode, 0);
- clear_inode(inode);
- destroy_inode(inode);
- }
-}
-
-static void hugetlbfs_drop_inode(struct inode *inode)
-{
- if (!inode->i_nlink)
- generic_delete_inode(inode);
- else
- hugetlbfs_forget_inode(inode);
+ end_writeback(inode);
}
static inline void
@@ -448,19 +431,20 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
error = inode_change_ok(inode, attr);
if (error)
- goto out;
+ return error;
if (ia_valid & ATTR_SIZE) {
error = -EINVAL;
- if (!(attr->ia_size & ~huge_page_mask(h)))
- error = hugetlb_vmtruncate(inode, attr->ia_size);
+ if (attr->ia_size & ~huge_page_mask(h))
+ return -EINVAL;
+ error = hugetlb_vmtruncate(inode, attr->ia_size);
if (error)
- goto out;
- attr->ia_valid &= ~ATTR_SIZE;
+ return error;
}
- error = inode_setattr(inode, attr);
-out:
- return error;
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
@@ -712,9 +696,8 @@ static const struct inode_operations hugetlbfs_inode_operations = {
static const struct super_operations hugetlbfs_ops = {
.alloc_inode = hugetlbfs_alloc_inode,
.destroy_inode = hugetlbfs_destroy_inode,
+ .evict_inode = hugetlbfs_evict_inode,
.statfs = hugetlbfs_statfs,
- .delete_inode = hugetlbfs_delete_inode,
- .drop_inode = hugetlbfs_drop_inode,
.put_super = hugetlbfs_put_super,
.show_options = generic_show_options,
};
diff --git a/fs/inode.c b/fs/inode.c
index 2bee20ae3d6..86464332e59 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -20,7 +20,6 @@
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
-#include <linux/inotify.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/async.h>
@@ -264,12 +263,8 @@ void inode_init_once(struct inode *inode)
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
i_size_ordered_init(inode);
-#ifdef CONFIG_INOTIFY
- INIT_LIST_HEAD(&inode->inotify_watches);
- mutex_init(&inode->inotify_mutex);
-#endif
#ifdef CONFIG_FSNOTIFY
- INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
+ INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
#endif
}
EXPORT_SYMBOL(inode_init_once);
@@ -294,32 +289,34 @@ void __iget(struct inode *inode)
inodes_stat.nr_unused--;
}
-/**
- * clear_inode - clear an inode
- * @inode: inode to clear
- *
- * This is called by the filesystem to tell us
- * that the inode is no longer useful. We just
- * terminate it with extreme prejudice.
- */
-void clear_inode(struct inode *inode)
+void end_writeback(struct inode *inode)
{
might_sleep();
- invalidate_inode_buffers(inode);
-
BUG_ON(inode->i_data.nrpages);
+ BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
inode_sync_wait(inode);
- if (inode->i_sb->s_op->clear_inode)
- inode->i_sb->s_op->clear_inode(inode);
+ inode->i_state = I_FREEING | I_CLEAR;
+}
+EXPORT_SYMBOL(end_writeback);
+
+static void evict(struct inode *inode)
+{
+ const struct super_operations *op = inode->i_sb->s_op;
+
+ if (op->evict_inode) {
+ op->evict_inode(inode);
+ } else {
+ if (inode->i_data.nrpages)
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+ }
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
- inode->i_state = I_CLEAR;
}
-EXPORT_SYMBOL(clear_inode);
/*
* dispose_list - dispose of the contents of a local list
@@ -338,9 +335,7 @@ static void dispose_list(struct list_head *head)
inode = list_first_entry(head, struct inode, i_list);
list_del(&inode->i_list);
- if (inode->i_data.nrpages)
- truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ evict(inode);
spin_lock(&inode_lock);
hlist_del_init(&inode->i_hash);
@@ -413,7 +408,6 @@ int invalidate_inodes(struct super_block *sb)
down_write(&iprune_sem);
spin_lock(&inode_lock);
- inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
@@ -512,7 +506,7 @@ static void prune_icache(int nr_to_scan)
* This function is passed the number of inodes to scan, and it returns the
* total number of remaining possibly-reclaimable inodes.
*/
-static int shrink_icache_memory(int nr, gfp_t gfp_mask)
+static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
if (nr) {
/*
@@ -553,7 +547,7 @@ repeat:
continue;
if (!test(inode, data))
continue;
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
@@ -578,7 +572,7 @@ repeat:
continue;
if (inode->i_sb != sb)
continue;
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
__wait_on_freeing_inode(inode);
goto repeat;
}
@@ -840,7 +834,7 @@ EXPORT_SYMBOL(iunique);
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode_lock);
- if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)))
+ if (!(inode->i_state & (I_FREEING|I_WILL_FREE)))
__iget(inode);
else
/*
@@ -1089,7 +1083,7 @@ int insert_inode_locked(struct inode *inode)
continue;
if (old->i_sb != sb)
continue;
- if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+ if (old->i_state & (I_FREEING|I_WILL_FREE))
continue;
break;
}
@@ -1128,7 +1122,7 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,
continue;
if (!test(old, data))
continue;
- if (old->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+ if (old->i_state & (I_FREEING|I_WILL_FREE))
continue;
break;
}
@@ -1180,69 +1174,51 @@ void remove_inode_hash(struct inode *inode)
}
EXPORT_SYMBOL(remove_inode_hash);
+int generic_delete_inode(struct inode *inode)
+{
+ return 1;
+}
+EXPORT_SYMBOL(generic_delete_inode);
+
/*
- * Tell the filesystem that this inode is no longer of any interest and should
- * be completely destroyed.
- *
- * We leave the inode in the inode hash table until *after* the filesystem's
- * ->delete_inode completes. This ensures that an iget (such as nfsd might
- * instigate) will always find up-to-date information either in the hash or on
- * disk.
- *
- * I_FREEING is set so that no-one will take a new reference to the inode while
- * it is being deleted.
+ * Normal UNIX filesystem behaviour: delete the
+ * inode when the usage count drops to zero, and
+ * i_nlink is zero.
*/
-void generic_delete_inode(struct inode *inode)
+int generic_drop_inode(struct inode *inode)
{
- const struct super_operations *op = inode->i_sb->s_op;
-
- list_del_init(&inode->i_list);
- list_del_init(&inode->i_sb_list);
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state |= I_FREEING;
- inodes_stat.nr_inodes--;
- spin_unlock(&inode_lock);
-
- if (op->delete_inode) {
- void (*delete)(struct inode *) = op->delete_inode;
- /* Filesystems implementing their own
- * s_op->delete_inode are required to call
- * truncate_inode_pages and clear_inode()
- * internally */
- delete(inode);
- } else {
- truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
- }
- spin_lock(&inode_lock);
- hlist_del_init(&inode->i_hash);
- spin_unlock(&inode_lock);
- wake_up_inode(inode);
- BUG_ON(inode->i_state != I_CLEAR);
- destroy_inode(inode);
+ return !inode->i_nlink || hlist_unhashed(&inode->i_hash);
}
-EXPORT_SYMBOL(generic_delete_inode);
+EXPORT_SYMBOL_GPL(generic_drop_inode);
-/**
- * generic_detach_inode - remove inode from inode lists
- * @inode: inode to remove
- *
- * Remove inode from inode lists, write it if it's dirty. This is just an
- * internal VFS helper exported for hugetlbfs. Do not use!
+/*
+ * Called when we're dropping the last reference
+ * to an inode.
*
- * Returns 1 if inode should be completely destroyed.
+ * Call the FS "drop_inode()" function, defaulting to
+ * the legacy UNIX filesystem behaviour. If it tells
+ * us to evict inode, do so. Otherwise, retain inode
+ * in cache if fs is alive, sync and evict if fs is
+ * shutting down.
*/
-int generic_detach_inode(struct inode *inode)
+static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
+ const struct super_operations *op = inode->i_sb->s_op;
+ int drop;
- if (!hlist_unhashed(&inode->i_hash)) {
+ if (op && op->drop_inode)
+ drop = op->drop_inode(inode);
+ else
+ drop = generic_drop_inode(inode);
+
+ if (!drop) {
if (!(inode->i_state & (I_DIRTY|I_SYNC)))
list_move(&inode->i_list, &inode_unused);
inodes_stat.nr_unused++;
if (sb->s_flags & MS_ACTIVE) {
spin_unlock(&inode_lock);
- return 0;
+ return;
}
WARN_ON(inode->i_state & I_NEW);
inode->i_state |= I_WILL_FREE;
@@ -1260,56 +1236,15 @@ int generic_detach_inode(struct inode *inode)
inode->i_state |= I_FREEING;
inodes_stat.nr_inodes--;
spin_unlock(&inode_lock);
- return 1;
-}
-EXPORT_SYMBOL_GPL(generic_detach_inode);
-
-static void generic_forget_inode(struct inode *inode)
-{
- if (!generic_detach_inode(inode))
- return;
- if (inode->i_data.nrpages)
- truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ evict(inode);
+ spin_lock(&inode_lock);
+ hlist_del_init(&inode->i_hash);
+ spin_unlock(&inode_lock);
wake_up_inode(inode);
+ BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
destroy_inode(inode);
}
-/*
- * Normal UNIX filesystem behaviour: delete the
- * inode when the usage count drops to zero, and
- * i_nlink is zero.
- */
-void generic_drop_inode(struct inode *inode)
-{
- if (!inode->i_nlink)
- generic_delete_inode(inode);
- else
- generic_forget_inode(inode);
-}
-EXPORT_SYMBOL_GPL(generic_drop_inode);
-
-/*
- * Called when we're dropping the last reference
- * to an inode.
- *
- * Call the FS "drop()" function, defaulting to
- * the legacy UNIX filesystem behaviour..
- *
- * NOTE! NOTE! NOTE! We're called with the inode lock
- * held, and the drop function is supposed to release
- * the lock!
- */
-static inline void iput_final(struct inode *inode)
-{
- const struct super_operations *op = inode->i_sb->s_op;
- void (*drop)(struct inode *) = generic_drop_inode;
-
- if (op && op->drop_inode)
- drop = op->drop_inode;
- drop(inode);
-}
-
/**
* iput - put an inode
* @inode: inode to put
@@ -1322,7 +1257,7 @@ static inline void iput_final(struct inode *inode)
void iput(struct inode *inode)
{
if (inode) {
- BUG_ON(inode->i_state == I_CLEAR);
+ BUG_ON(inode->i_state & I_CLEAR);
if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
iput_final(inode);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 2d140a71386..f855ea4fc88 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -29,7 +29,6 @@
* @arg: command-specific argument for ioctl
*
* Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
- * invokes filesystem specific ->ioctl method. If neither method exists,
* returns -ENOTTY.
*
* Returns 0 on success, -errno on error.
@@ -39,21 +38,12 @@ static long vfs_ioctl(struct file *filp, unsigned int cmd,
{
int error = -ENOTTY;
- if (!filp->f_op)
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
goto out;
- if (filp->f_op->unlocked_ioctl) {
- error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
- if (error == -ENOIOCTLCMD)
- error = -EINVAL;
- goto out;
- } else if (filp->f_op->ioctl) {
- lock_kernel();
- error = filp->f_op->ioctl(filp->f_path.dentry->d_inode,
- filp, cmd, arg);
- unlock_kernel();
- }
-
+ error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
+ if (error == -ENOIOCTLCMD)
+ error = -EINVAL;
out:
return error;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 6b4dcd4f294..5a44811b502 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -722,7 +722,12 @@ root_found:
}
s->s_magic = ISOFS_SUPER_MAGIC;
- s->s_maxbytes = 0xffffffff; /* We can handle files up to 4 GB */
+
+ /*
+ * With multi-extent files, file size is only limited by the maximum
+ * size of a file system, which is 8 TB.
+ */
+ s->s_maxbytes = 0x80000000000LL;
/*
* The CDROM is read-only, has no nodes (devices) on it, and since
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 93d1e47647b..f19ce94693d 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1281,13 +1281,9 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
int journal_check_available_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
- journal_superblock_t *sb;
-
if (!compat && !ro && !incompat)
return 1;
- sb = journal->j_superblock;
-
/* We can support any known requested features iff the
* superblock is in version 2. Otherwise we fail to support any
* extended sb features. */
@@ -1481,7 +1477,6 @@ int journal_flush(journal_t *journal)
int journal_wipe(journal_t *journal, int write)
{
- journal_superblock_t *sb;
int err = 0;
J_ASSERT (!(journal->j_flags & JFS_LOADED));
@@ -1490,8 +1485,6 @@ int journal_wipe(journal_t *journal, int write)
if (err)
return err;
- sb = journal->j_superblock;
-
if (!journal->j_tail)
goto no_recovery;
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 54c9bc9e1b1..81051dafebf 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -283,12 +283,9 @@ int journal_recover(journal_t *journal)
int journal_skip_recovery(journal_t *journal)
{
int err;
- journal_superblock_t * sb;
-
struct recovery_info info;
memset (&info, 0, sizeof(info));
- sb = journal->j_superblock;
err = do_one_pass(journal, &info, PASS_SCAN);
@@ -297,7 +294,8 @@ int journal_skip_recovery(journal_t *journal)
++journal->j_transaction_sequence;
} else {
#ifdef CONFIG_JBD_DEBUG
- int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
+ int dropped = info.end_transaction -
+ be32_to_cpu(journal->j_superblock->s_sequence);
#endif
jbd_debug(1,
"JBD: ignoring %d transaction%s from the journal.\n",
@@ -321,11 +319,6 @@ static int do_one_pass(journal_t *journal,
unsigned int sequence;
int blocktype;
- /* Precompute the maximum metadata descriptors in a descriptor block */
- int MAX_BLOCKS_PER_DESC;
- MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
- / sizeof(journal_block_tag_t));
-
/*
* First thing is to establish what we expect to find in the log
* (in terms of transaction IDs), and where (in terms of log
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 076d1cc44f9..1c23a0f4e8a 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -118,13 +118,13 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
void __jbd2_log_wait_for_space(journal_t *journal)
{
int nblocks, space_left;
- assert_spin_locked(&journal->j_state_lock);
+ /* assert_spin_locked(&journal->j_state_lock); */
nblocks = jbd_space_needed(journal);
while (__jbd2_log_space_left(journal) < nblocks) {
if (journal->j_flags & JBD2_ABORT)
return;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
mutex_lock(&journal->j_checkpoint_mutex);
/*
@@ -138,7 +138,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
* filesystem, so abort the journal and leave a stack
* trace for forensic evidence.
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
nblocks = jbd_space_needed(journal);
space_left = __jbd2_log_space_left(journal);
@@ -149,7 +149,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
if (journal->j_committing_transaction)
tid = journal->j_committing_transaction->t_tid;
spin_unlock(&journal->j_list_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
if (chkpt) {
jbd2_log_do_checkpoint(journal);
} else if (jbd2_cleanup_journal_tail(journal) == 0) {
@@ -167,7 +167,7 @@ void __jbd2_log_wait_for_space(journal_t *journal)
WARN_ON(1);
jbd2_journal_abort(journal, 0);
}
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
} else {
spin_unlock(&journal->j_list_lock);
}
@@ -474,7 +474,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
* next transaction ID we will write, and where it will
* start. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
transaction = journal->j_checkpoint_transactions;
if (transaction) {
@@ -496,7 +496,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/* If the oldest pinned transaction is at the tail of the log
already then there's not much we can do right now. */
if (journal->j_tail_sequence == first_tid) {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 1;
}
@@ -516,7 +516,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
journal->j_free += freed;
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/*
* If there is an external journal, we need to make sure that
@@ -775,7 +775,7 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact
J_ASSERT(transaction->t_log_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
J_ASSERT(transaction->t_checkpoint_io_list == NULL);
- J_ASSERT(transaction->t_updates == 0);
+ J_ASSERT(atomic_read(&transaction->t_updates) == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 75716d3d2be..f52e5e8049f 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -150,11 +150,11 @@ static int journal_submit_commit_record(journal_t *journal,
*/
if (ret == -EOPNOTSUPP && barrier_done) {
printk(KERN_WARNING
- "JBD: barrier-based sync failed on %s - "
- "disabling barriers\n", journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ "JBD2: Disabling barriers on %s, "
+ "not supported by device\n", journal->j_devname);
+ write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
lock_buffer(bh);
@@ -180,11 +180,11 @@ retry:
wait_on_buffer(bh);
if (buffer_eopnotsupp(bh) && (journal->j_flags & JBD2_BARRIER)) {
printk(KERN_WARNING
- "JBD2: wait_on_commit_record: sync failed on %s - "
- "disabling barriers\n", journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ "JBD2: %s: disabling barries on %s - not supported "
+ "by device\n", __func__, journal->j_devname);
+ write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
lock_buffer(bh);
clear_buffer_dirty(bh);
@@ -400,7 +400,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd_debug(1, "JBD: starting commit of transaction %d\n",
commit_transaction->t_tid);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
/*
@@ -417,23 +417,23 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_locked);
spin_lock(&commit_transaction->t_handle_lock);
- while (commit_transaction->t_updates) {
+ while (atomic_read(&commit_transaction->t_updates)) {
DEFINE_WAIT(wait);
prepare_to_wait(&journal->j_wait_updates, &wait,
TASK_UNINTERRUPTIBLE);
- if (commit_transaction->t_updates) {
+ if (atomic_read(&commit_transaction->t_updates)) {
spin_unlock(&commit_transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&commit_transaction->t_handle_lock);
}
finish_wait(&journal->j_wait_updates, &wait);
}
spin_unlock(&commit_transaction->t_handle_lock);
- J_ASSERT (commit_transaction->t_outstanding_credits <=
+ J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
/*
@@ -497,7 +497,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
wake_up(&journal->j_wait_transaction_locked);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd_debug (3, "JBD: commit phase 2\n");
@@ -519,19 +519,20 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* transaction! Now comes the tricky part: we need to write out
* metadata. Loop over the transaction's entire buffer list:
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_COMMIT;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
trace_jbd2_commit_logging(journal, commit_transaction);
stats.run.rs_logging = jiffies;
stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
stats.run.rs_logging);
- stats.run.rs_blocks = commit_transaction->t_outstanding_credits;
+ stats.run.rs_blocks =
+ atomic_read(&commit_transaction->t_outstanding_credits);
stats.run.rs_blocks_logged = 0;
J_ASSERT(commit_transaction->t_nr_buffers <=
- commit_transaction->t_outstanding_credits);
+ atomic_read(&commit_transaction->t_outstanding_credits));
err = 0;
descriptor = NULL;
@@ -616,7 +617,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* the free space in the log, but this counter is changed
* by jbd2_journal_next_log_block() also.
*/
- commit_transaction->t_outstanding_credits--;
+ atomic_dec(&commit_transaction->t_outstanding_credits);
/* Bump b_count to prevent truncate from stumbling over
the shadowed buffer! @@@ This can go if we ever get
@@ -977,7 +978,7 @@ restart_loop:
* __jbd2_journal_drop_transaction(). Otherwise we could race with
* other checkpointing code processing the transaction...
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
/*
* Now recheck if some buffers did not get attached to the transaction
@@ -985,7 +986,7 @@ restart_loop:
*/
if (commit_transaction->t_forget) {
spin_unlock(&journal->j_list_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
goto restart_loop;
}
@@ -1003,7 +1004,8 @@ restart_loop:
* File the transaction statistics
*/
stats.ts_tid = commit_transaction->t_tid;
- stats.run.rs_handle_count = commit_transaction->t_handle_count;
+ stats.run.rs_handle_count =
+ atomic_read(&commit_transaction->t_handle_count);
trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
commit_transaction->t_tid, &stats.run);
@@ -1037,7 +1039,7 @@ restart_loop:
journal->j_average_commit_time*3) / 4;
else
journal->j_average_commit_time = commit_time;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index bc2ff593276..ad5866aaf0f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -41,6 +41,7 @@
#include <linux/hash.h>
#include <linux/log2.h>
#include <linux/vmalloc.h>
+#include <linux/backing-dev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/jbd2.h>
@@ -48,8 +49,6 @@
#include <asm/uaccess.h>
#include <asm/page.h>
-EXPORT_SYMBOL(jbd2_journal_start);
-EXPORT_SYMBOL(jbd2_journal_restart);
EXPORT_SYMBOL(jbd2_journal_extend);
EXPORT_SYMBOL(jbd2_journal_stop);
EXPORT_SYMBOL(jbd2_journal_lock_updates);
@@ -143,7 +142,7 @@ static int kjournald2(void *arg)
/*
* And now, wait forever for commit wakeup events.
*/
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
loop:
if (journal->j_flags & JBD2_UNMOUNT)
@@ -154,10 +153,10 @@ loop:
if (journal->j_commit_sequence != journal->j_commit_request) {
jbd_debug(1, "OK, requests differ\n");
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
goto loop;
}
@@ -169,9 +168,9 @@ loop:
* be already stopped.
*/
jbd_debug(1, "Now suspending kjournald2\n");
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
refrigerator();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
} else {
/*
* We assume on resume that commits are already there,
@@ -191,9 +190,9 @@ loop:
if (journal->j_flags & JBD2_UNMOUNT)
should_sleep = 0;
if (should_sleep) {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
finish_wait(&journal->j_wait_commit, &wait);
}
@@ -211,7 +210,7 @@ loop:
goto loop;
end_loop:
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
@@ -234,16 +233,16 @@ static int jbd2_journal_start_thread(journal_t *journal)
static void journal_kill_thread(journal_t *journal)
{
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_UNMOUNT;
while (journal->j_task) {
wake_up(&journal->j_wait_commit);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit, journal->j_task == NULL);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/*
@@ -297,7 +296,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct page *new_page;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
- struct jbd2_buffer_trigger_type *triggers;
journal_t *journal = transaction->t_journal;
/*
@@ -311,7 +309,17 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
*/
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
- new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+retry_alloc:
+ new_bh = alloc_buffer_head(GFP_NOFS);
+ if (!new_bh) {
+ /*
+ * Failure is not an option, but __GFP_NOFAIL is going
+ * away; so we retry ourselves here.
+ */
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_alloc;
+ }
+
/* keep subsequent assertions sane */
new_bh->b_state = 0;
init_buffer(new_bh, NULL, NULL);
@@ -328,21 +336,21 @@ repeat:
done_copy_out = 1;
new_page = virt_to_page(jh_in->b_frozen_data);
new_offset = offset_in_page(jh_in->b_frozen_data);
- triggers = jh_in->b_frozen_triggers;
} else {
new_page = jh2bh(jh_in)->b_page;
new_offset = offset_in_page(jh2bh(jh_in)->b_data);
- triggers = jh_in->b_triggers;
}
mapped_data = kmap_atomic(new_page, KM_USER0);
/*
- * Fire any commit trigger. Do this before checking for escaping,
- * as the trigger may modify the magic offset. If a copy-out
- * happens afterwards, it will have the correct data in the buffer.
+ * Fire data frozen trigger if data already wasn't frozen. Do this
+ * before checking for escaping, as the trigger may modify the magic
+ * offset. If a copy-out happens afterwards, it will have the correct
+ * data in the buffer.
*/
- jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset,
- triggers);
+ if (!done_copy_out)
+ jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
+ jh_in->b_triggers);
/*
* Check for escaping
@@ -443,7 +451,7 @@ int __jbd2_log_space_left(journal_t *journal)
{
int left = journal->j_free;
- assert_spin_locked(&journal->j_state_lock);
+ /* assert_spin_locked(&journal->j_state_lock); */
/*
* Be pessimistic here about the number of those free blocks which
@@ -488,9 +496,9 @@ int jbd2_log_start_commit(journal_t *journal, tid_t tid)
{
int ret;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
ret = __jbd2_log_start_commit(journal, tid);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
}
@@ -509,7 +517,7 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
transaction_t *transaction = NULL;
tid_t tid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_running_transaction && !current->journal_info) {
transaction = journal->j_running_transaction;
__jbd2_log_start_commit(journal, transaction->t_tid);
@@ -517,12 +525,12 @@ int jbd2_journal_force_commit_nested(journal_t *journal)
transaction = journal->j_committing_transaction;
if (!transaction) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
return 0; /* Nothing to retry */
}
tid = transaction->t_tid;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
jbd2_log_wait_commit(journal, tid);
return 1;
}
@@ -536,7 +544,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
{
int ret = 0;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
@@ -555,7 +563,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
*ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
}
@@ -567,26 +575,24 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
+ read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
- spin_lock(&journal->j_state_lock);
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
- spin_unlock(&journal->j_state_lock);
#endif
- spin_lock(&journal->j_state_lock);
while (tid_gt(tid, journal->j_commit_sequence)) {
jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
tid, journal->j_commit_sequence);
wake_up(&journal->j_wait_commit);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit,
!tid_gt(tid, journal->j_commit_sequence));
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
if (unlikely(is_journal_aborted(journal))) {
printk(KERN_EMERG "journal commit I/O error\n");
@@ -603,7 +609,7 @@ int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp)
{
unsigned long blocknr;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
J_ASSERT(journal->j_free > 1);
blocknr = journal->j_head;
@@ -611,7 +617,7 @@ int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp)
journal->j_free--;
if (journal->j_head == journal->j_last)
journal->j_head = journal->j_first;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return jbd2_journal_bmap(journal, blocknr, retp);
}
@@ -831,7 +837,7 @@ static journal_t * journal_init_common (void)
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
spin_lock_init(&journal->j_list_lock);
- spin_lock_init(&journal->j_state_lock);
+ rwlock_init(&journal->j_state_lock);
journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
journal->j_min_batch_time = 0;
@@ -1097,14 +1103,14 @@ void jbd2_journal_update_superblock(journal_t *journal, int wait)
set_buffer_uptodate(bh);
}
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence, journal->j_errno);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(journal->j_tail);
sb->s_errno = cpu_to_be32(journal->j_errno);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
@@ -1125,12 +1131,12 @@ out:
* any future commit will have to be careful to update the
* superblock again to re-record the true start of the log. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (sb->s_start)
journal->j_flags &= ~JBD2_FLUSHED;
else
journal->j_flags |= JBD2_FLUSHED;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/*
@@ -1392,13 +1398,9 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
- journal_superblock_t *sb;
-
if (!compat && !ro && !incompat)
return 1;
- sb = journal->j_superblock;
-
/* We can support any known requested features iff the
* superblock is in version 2. Otherwise we fail to support any
* extended sb features. */
@@ -1546,7 +1548,7 @@ int jbd2_journal_flush(journal_t *journal)
transaction_t *transaction = NULL;
unsigned long old_tail;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
/* Force everything buffered to the log... */
if (journal->j_running_transaction) {
@@ -1559,10 +1561,10 @@ int jbd2_journal_flush(journal_t *journal)
if (transaction) {
tid_t tid = transaction->t_tid;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd2_log_wait_commit(journal, tid);
} else {
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/* ...and flush everything in the log out to disk. */
@@ -1586,12 +1588,12 @@ int jbd2_journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
old_tail = journal->j_tail;
journal->j_tail = 0;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
jbd2_journal_update_superblock(journal, 1);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_tail = old_tail;
J_ASSERT(!journal->j_running_transaction);
@@ -1599,7 +1601,7 @@ int jbd2_journal_flush(journal_t *journal)
J_ASSERT(!journal->j_checkpoint_transactions);
J_ASSERT(journal->j_head == journal->j_tail);
J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 0;
}
@@ -1618,7 +1620,6 @@ int jbd2_journal_flush(journal_t *journal)
int jbd2_journal_wipe(journal_t *journal, int write)
{
- journal_superblock_t *sb;
int err = 0;
J_ASSERT (!(journal->j_flags & JBD2_LOADED));
@@ -1627,8 +1628,6 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (err)
return err;
- sb = journal->j_superblock;
-
if (!journal->j_tail)
goto no_recovery;
@@ -1666,12 +1665,12 @@ void __jbd2_journal_abort_hard(journal_t *journal)
printk(KERN_ERR "Aborting journal on device %s.\n",
journal->j_devname);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_flags |= JBD2_ABORT;
transaction = journal->j_running_transaction;
if (transaction)
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
/* Soft abort: record the abort error status in the journal superblock,
@@ -1756,12 +1755,12 @@ int jbd2_journal_errno(journal_t *journal)
{
int err;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
err = journal->j_errno;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
return err;
}
@@ -1776,12 +1775,12 @@ int jbd2_journal_clear_err(journal_t *journal)
{
int err = 0;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_flags & JBD2_ABORT)
err = -EROFS;
else
journal->j_errno = 0;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return err;
}
@@ -1794,10 +1793,10 @@ int jbd2_journal_clear_err(journal_t *journal)
*/
void jbd2_journal_ack_err(journal_t *journal)
{
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
if (journal->j_errno)
journal->j_flags |= JBD2_ACK_ERR;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
int jbd2_journal_blocks_per_page(struct inode *inode)
@@ -2202,8 +2201,6 @@ void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode)
void jbd2_journal_release_jbd_inode(journal_t *journal,
struct jbd2_inode *jinode)
{
- int writeout = 0;
-
if (!journal)
return;
restart:
@@ -2220,9 +2217,6 @@ restart:
goto restart;
}
- /* Do we need to wait for data writeback? */
- if (journal->j_committing_transaction == jinode->i_transaction)
- writeout = 1;
if (jinode->i_transaction) {
list_del(&jinode->i_list);
jinode->i_transaction = NULL;
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 049281b7cb8..2bc4d5f116f 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -285,12 +285,10 @@ int jbd2_journal_recover(journal_t *journal)
int jbd2_journal_skip_recovery(journal_t *journal)
{
int err;
- journal_superblock_t * sb;
struct recovery_info info;
memset (&info, 0, sizeof(info));
- sb = journal->j_superblock;
err = do_one_pass(journal, &info, PASS_SCAN);
@@ -299,7 +297,8 @@ int jbd2_journal_skip_recovery(journal_t *journal)
++journal->j_transaction_sequence;
} else {
#ifdef CONFIG_JBD2_DEBUG
- int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
+ int dropped = info.end_transaction -
+ be32_to_cpu(journal->j_superblock->s_sequence);
#endif
jbd_debug(1,
"JBD: ignoring %d transaction%s from the journal.\n",
@@ -365,11 +364,6 @@ static int do_one_pass(journal_t *journal,
int tag_bytes = journal_tag_bytes(journal);
__u32 crc32_sum = ~0; /* Transactional Checksums */
- /* Precompute the maximum metadata descriptors in a descriptor block */
- int MAX_BLOCKS_PER_DESC;
- MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
- / tag_bytes);
-
/*
* First thing is to establish what we expect to find in the log
* (in terms of transaction IDs), and where (in terms of log
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e214d68620a..f3479d6e0a8 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -26,6 +26,8 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/hrtimer.h>
+#include <linux/backing-dev.h>
+#include <linux/module.h>
static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
@@ -53,6 +55,9 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
transaction->t_tid = journal->j_transaction_sequence++;
transaction->t_expires = jiffies + journal->j_commit_interval;
spin_lock_init(&transaction->t_handle_lock);
+ atomic_set(&transaction->t_updates, 0);
+ atomic_set(&transaction->t_outstanding_credits, 0);
+ atomic_set(&transaction->t_handle_count, 0);
INIT_LIST_HEAD(&transaction->t_inode_list);
INIT_LIST_HEAD(&transaction->t_private_list);
@@ -77,71 +82,106 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
*/
/*
+ * Update transiaction's maximum wait time, if debugging is enabled.
+ *
+ * In order for t_max_wait to be reliable, it must be protected by a
+ * lock. But doing so will mean that start_this_handle() can not be
+ * run in parallel on SMP systems, which limits our scalability. So
+ * unless debugging is enabled, we no longer update t_max_wait, which
+ * means that maximum wait time reported by the jbd2_run_stats
+ * tracepoint will always be zero.
+ */
+static inline void update_t_max_wait(transaction_t *transaction)
+{
+#ifdef CONFIG_JBD2_DEBUG
+ unsigned long ts = jiffies;
+
+ if (jbd2_journal_enable_debug &&
+ time_after(transaction->t_start, ts)) {
+ ts = jbd2_time_diff(ts, transaction->t_start);
+ spin_lock(&transaction->t_handle_lock);
+ if (ts > transaction->t_max_wait)
+ transaction->t_max_wait = ts;
+ spin_unlock(&transaction->t_handle_lock);
+ }
+#endif
+}
+
+/*
* start_this_handle: Given a handle, deal with any locking or stalling
* needed to make sure that there is enough journal space for the handle
* to begin. Attach the handle to a transaction and set up the
* transaction's buffer credits.
*/
-static int start_this_handle(journal_t *journal, handle_t *handle)
+static int start_this_handle(journal_t *journal, handle_t *handle,
+ int gfp_mask)
{
transaction_t *transaction;
int needed;
int nblocks = handle->h_buffer_credits;
transaction_t *new_transaction = NULL;
- int ret = 0;
- unsigned long ts = jiffies;
if (nblocks > journal->j_max_transaction_buffers) {
printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
current->comm, nblocks,
journal->j_max_transaction_buffers);
- ret = -ENOSPC;
- goto out;
+ return -ENOSPC;
}
alloc_transaction:
if (!journal->j_running_transaction) {
- new_transaction = kzalloc(sizeof(*new_transaction),
- GFP_NOFS|__GFP_NOFAIL);
+ new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask);
if (!new_transaction) {
- ret = -ENOMEM;
- goto out;
+ /*
+ * If __GFP_FS is not present, then we may be
+ * being called from inside the fs writeback
+ * layer, so we MUST NOT fail. Since
+ * __GFP_NOFAIL is going away, we will arrange
+ * to retry the allocation ourselves.
+ */
+ if ((gfp_mask & __GFP_FS) == 0) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto alloc_transaction;
+ }
+ return -ENOMEM;
}
}
jbd_debug(3, "New handle %p going live.\n", handle);
-repeat:
-
/*
* We need to hold j_state_lock until t_updates has been incremented,
* for proper journal barrier handling
*/
- spin_lock(&journal->j_state_lock);
-repeat_locked:
+repeat:
+ read_lock(&journal->j_state_lock);
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
- spin_unlock(&journal->j_state_lock);
- ret = -EROFS;
- goto out;
+ read_unlock(&journal->j_state_lock);
+ kfree(new_transaction);
+ return -EROFS;
}
/* Wait on the journal's transaction barrier if necessary */
if (journal->j_barrier_count) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_transaction_locked,
journal->j_barrier_count == 0);
goto repeat;
}
if (!journal->j_running_transaction) {
- if (!new_transaction) {
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
+ if (!new_transaction)
goto alloc_transaction;
+ write_lock(&journal->j_state_lock);
+ if (!journal->j_running_transaction) {
+ jbd2_get_transaction(journal, new_transaction);
+ new_transaction = NULL;
}
- jbd2_get_transaction(journal, new_transaction);
- new_transaction = NULL;
+ write_unlock(&journal->j_state_lock);
+ goto repeat;
}
transaction = journal->j_running_transaction;
@@ -155,7 +195,7 @@ repeat_locked:
prepare_to_wait(&journal->j_wait_transaction_locked,
&wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
goto repeat;
@@ -166,8 +206,8 @@ repeat_locked:
* buffers requested by this operation, we need to stall pending a log
* checkpoint to free some more log space.
*/
- spin_lock(&transaction->t_handle_lock);
- needed = transaction->t_outstanding_credits + nblocks;
+ needed = atomic_add_return(nblocks,
+ &transaction->t_outstanding_credits);
if (needed > journal->j_max_transaction_buffers) {
/*
@@ -178,11 +218,11 @@ repeat_locked:
DEFINE_WAIT(wait);
jbd_debug(2, "Handle %p starting new commit...\n", handle);
- spin_unlock(&transaction->t_handle_lock);
+ atomic_sub(nblocks, &transaction->t_outstanding_credits);
prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
TASK_UNINTERRUPTIBLE);
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_transaction_locked, &wait);
goto repeat;
@@ -215,35 +255,31 @@ repeat_locked:
*/
if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
- spin_unlock(&transaction->t_handle_lock);
- __jbd2_log_wait_for_space(journal);
- goto repeat_locked;
+ atomic_sub(nblocks, &transaction->t_outstanding_credits);
+ read_unlock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
+ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
+ __jbd2_log_wait_for_space(journal);
+ write_unlock(&journal->j_state_lock);
+ goto repeat;
}
/* OK, account for the buffers that this operation expects to
- * use and add the handle to the running transaction. */
-
- if (time_after(transaction->t_start, ts)) {
- ts = jbd2_time_diff(ts, transaction->t_start);
- if (ts > transaction->t_max_wait)
- transaction->t_max_wait = ts;
- }
-
+ * use and add the handle to the running transaction.
+ */
+ update_t_max_wait(transaction);
handle->h_transaction = transaction;
- transaction->t_outstanding_credits += nblocks;
- transaction->t_updates++;
- transaction->t_handle_count++;
+ atomic_inc(&transaction->t_updates);
+ atomic_inc(&transaction->t_handle_count);
jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
- handle, nblocks, transaction->t_outstanding_credits,
+ handle, nblocks,
+ atomic_read(&transaction->t_outstanding_credits),
__jbd2_log_space_left(journal));
- spin_unlock(&transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
lock_map_acquire(&handle->h_lockdep_map);
-out:
- if (unlikely(new_transaction)) /* It's usually NULL */
- kfree(new_transaction);
- return ret;
+ kfree(new_transaction);
+ return 0;
}
static struct lock_class_key jbd2_handle_key;
@@ -278,7 +314,7 @@ static handle_t *new_handle(int nblocks)
*
* Return a pointer to a newly allocated handle, or NULL on failure
*/
-handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
+handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
{
handle_t *handle = journal_current_handle();
int err;
@@ -298,7 +334,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
current->journal_info = handle;
- err = start_this_handle(journal, handle);
+ err = start_this_handle(journal, handle, gfp_mask);
if (err < 0) {
jbd2_free_handle(handle);
current->journal_info = NULL;
@@ -308,6 +344,15 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
out:
return handle;
}
+EXPORT_SYMBOL(jbd2__journal_start);
+
+
+handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
+{
+ return jbd2__journal_start(journal, nblocks, GFP_NOFS);
+}
+EXPORT_SYMBOL(jbd2_journal_start);
+
/**
* int jbd2_journal_extend() - extend buffer credits.
@@ -342,7 +387,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
result = 1;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
/* Don't extend a locked-down transaction! */
if (handle->h_transaction->t_state != T_RUNNING) {
@@ -352,7 +397,7 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
}
spin_lock(&transaction->t_handle_lock);
- wanted = transaction->t_outstanding_credits + nblocks;
+ wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
if (wanted > journal->j_max_transaction_buffers) {
jbd_debug(3, "denied handle %p %d blocks: "
@@ -367,14 +412,14 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
}
handle->h_buffer_credits += nblocks;
- transaction->t_outstanding_credits += nblocks;
+ atomic_add(nblocks, &transaction->t_outstanding_credits);
result = 0;
jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
unlock:
spin_unlock(&transaction->t_handle_lock);
error_out:
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
out:
return result;
}
@@ -394,8 +439,7 @@ out:
* transaction capabable of guaranteeing the requested number of
* credits.
*/
-
-int jbd2_journal_restart(handle_t *handle, int nblocks)
+int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
@@ -410,29 +454,35 @@ int jbd2_journal_restart(handle_t *handle, int nblocks)
* First unlink the handle from its current transaction, and start the
* commit on that.
*/
- J_ASSERT(transaction->t_updates > 0);
+ J_ASSERT(atomic_read(&transaction->t_updates) > 0);
J_ASSERT(journal_current_handle() == handle);
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
- transaction->t_outstanding_credits -= handle->h_buffer_credits;
- transaction->t_updates--;
-
- if (!transaction->t_updates)
+ atomic_sub(handle->h_buffer_credits,
+ &transaction->t_outstanding_credits);
+ if (atomic_dec_and_test(&transaction->t_updates))
wake_up(&journal->j_wait_updates);
spin_unlock(&transaction->t_handle_lock);
jbd_debug(2, "restarting handle %p\n", handle);
__jbd2_log_start_commit(journal, transaction->t_tid);
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
lock_map_release(&handle->h_lockdep_map);
handle->h_buffer_credits = nblocks;
- ret = start_this_handle(journal, handle);
+ ret = start_this_handle(journal, handle, gfp_mask);
return ret;
}
+EXPORT_SYMBOL(jbd2__journal_restart);
+int jbd2_journal_restart(handle_t *handle, int nblocks)
+{
+ return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
+}
+EXPORT_SYMBOL(jbd2_journal_restart);
+
/**
* void jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
@@ -447,7 +497,7 @@ void jbd2_journal_lock_updates(journal_t *journal)
{
DEFINE_WAIT(wait);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
++journal->j_barrier_count;
/* Wait until there are no running updates */
@@ -458,19 +508,19 @@ void jbd2_journal_lock_updates(journal_t *journal)
break;
spin_lock(&transaction->t_handle_lock);
- if (!transaction->t_updates) {
+ if (!atomic_read(&transaction->t_updates)) {
spin_unlock(&transaction->t_handle_lock);
break;
}
prepare_to_wait(&journal->j_wait_updates, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock(&transaction->t_handle_lock);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
schedule();
finish_wait(&journal->j_wait_updates, &wait);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
}
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
/*
* We have now established a barrier against other normal updates, but
@@ -494,9 +544,9 @@ void jbd2_journal_unlock_updates (journal_t *journal)
J_ASSERT(journal->j_barrier_count != 0);
mutex_unlock(&journal->j_barrier);
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
--journal->j_barrier_count;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_transaction_locked);
}
@@ -725,6 +775,9 @@ done:
page = jh2bh(jh)->b_page;
offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
source = kmap_atomic(page, KM_USER0);
+ /* Fire data frozen trigger just before we copy the data */
+ jbd2_buffer_frozen_trigger(jh, source + offset,
+ jh->b_triggers);
memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
kunmap_atomic(source, KM_USER0);
@@ -963,15 +1016,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
jh->b_triggers = type;
}
-void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
+void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
struct jbd2_buffer_trigger_type *triggers)
{
struct buffer_head *bh = jh2bh(jh);
- if (!triggers || !triggers->t_commit)
+ if (!triggers || !triggers->t_frozen)
return;
- triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
+ triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
}
void jbd2_buffer_abort_trigger(struct journal_head *jh,
@@ -1235,7 +1288,8 @@ int jbd2_journal_stop(handle_t *handle)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
- int err;
+ int err, wait_for_commit = 0;
+ tid_t tid;
pid_t pid;
J_ASSERT(journal_current_handle() == handle);
@@ -1243,7 +1297,7 @@ int jbd2_journal_stop(handle_t *handle)
if (is_handle_aborted(handle))
err = -EIO;
else {
- J_ASSERT(transaction->t_updates > 0);
+ J_ASSERT(atomic_read(&transaction->t_updates) > 0);
err = 0;
}
@@ -1288,9 +1342,9 @@ int jbd2_journal_stop(handle_t *handle)
journal->j_last_sync_writer = pid;
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
commit_time = journal->j_average_commit_time;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
trans_time = ktime_to_ns(ktime_sub(ktime_get(),
transaction->t_start_time));
@@ -1311,14 +1365,8 @@ int jbd2_journal_stop(handle_t *handle)
if (handle->h_sync)
transaction->t_synchronous_commit = 1;
current->journal_info = NULL;
- spin_lock(&transaction->t_handle_lock);
- transaction->t_outstanding_credits -= handle->h_buffer_credits;
- transaction->t_updates--;
- if (!transaction->t_updates) {
- wake_up(&journal->j_wait_updates);
- if (journal->j_barrier_count)
- wake_up(&journal->j_wait_transaction_locked);
- }
+ atomic_sub(handle->h_buffer_credits,
+ &transaction->t_outstanding_credits);
/*
* If the handle is marked SYNC, we need to set another commit
@@ -1327,15 +1375,13 @@ int jbd2_journal_stop(handle_t *handle)
* transaction is too old now.
*/
if (handle->h_sync ||
- transaction->t_outstanding_credits >
- journal->j_max_transaction_buffers ||
- time_after_eq(jiffies, transaction->t_expires)) {
+ (atomic_read(&transaction->t_outstanding_credits) >
+ journal->j_max_transaction_buffers) ||
+ time_after_eq(jiffies, transaction->t_expires)) {
/* Do this even for aborted journals: an abort still
* completes the commit thread, it just doesn't write
* anything to disk. */
- tid_t tid = transaction->t_tid;
- spin_unlock(&transaction->t_handle_lock);
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
@@ -1346,11 +1392,25 @@ int jbd2_journal_stop(handle_t *handle)
* to wait for the commit to complete.
*/
if (handle->h_sync && !(current->flags & PF_MEMALLOC))
- err = jbd2_log_wait_commit(journal, tid);
- } else {
- spin_unlock(&transaction->t_handle_lock);
+ wait_for_commit = 1;
}
+ /*
+ * Once we drop t_updates, if it goes to zero the transaction
+ * could start commiting on us and eventually disappear. So
+ * once we do this, we must not dereference transaction
+ * pointer again.
+ */
+ tid = transaction->t_tid;
+ if (atomic_dec_and_test(&transaction->t_updates)) {
+ wake_up(&journal->j_wait_updates);
+ if (journal->j_barrier_count)
+ wake_up(&journal->j_wait_transaction_locked);
+ }
+
+ if (wait_for_commit)
+ err = jbd2_log_wait_commit(journal, tid);
+
lock_map_release(&handle->h_lockdep_map);
jbd2_free_handle(handle);
@@ -1716,7 +1776,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
goto zap_buffer_unlocked;
/* OK, we have data buffer in journaled mode */
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
@@ -1769,7 +1829,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
} else {
/* There is no currently-running transaction. So the
@@ -1783,7 +1843,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return ret;
} else {
/* The orphan record's transaction has
@@ -1807,7 +1867,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
jbd2_journal_put_journal_head(jh);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
return 0;
} else {
/* Good, the buffer belongs to the running transaction.
@@ -1826,7 +1886,7 @@ zap_buffer:
zap_buffer_no_jh:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
zap_buffer_unlocked:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
@@ -2133,9 +2193,9 @@ int jbd2_journal_begin_ordered_truncate(journal_t *journal,
/* Locks are here just to force reading of recent values, it is
* enough that the transaction was not committing before we started
* a transaction adding the inode to orphan list */
- spin_lock(&journal->j_state_lock);
+ read_lock(&journal->j_state_lock);
commit_trans = journal->j_committing_transaction;
- spin_unlock(&journal->j_state_lock);
+ read_unlock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
inode_trans = jinode->i_transaction;
spin_unlock(&journal->j_list_lock);
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index a33aab6b5e6..54a92fd02bb 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -234,8 +234,9 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
if (inode->i_mode != mode) {
struct iattr attr;
- attr.ia_valid = ATTR_MODE;
+ attr.ia_valid = ATTR_MODE | ATTR_CTIME;
attr.ia_mode = mode;
+ attr.ia_ctime = CURRENT_TIME_SEC;
rc = jffs2_do_setattr(inode, &attr);
if (rc < 0)
return rc;
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 55f1dde2fa8..404111b016c 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index c5e1450d79f..a906f538d11 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index f0294410868..617a1e5694c 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -2,11 +2,12 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
- * Created by Arjan van de Ven <arjanv@redhat.com>
- *
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
*
+ * Created by Arjan van de Ven <arjan@infradead.org>
+ *
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 7d1d72faa77..e471a9106fd 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -3,6 +3,7 @@
*
* Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
* University of Szeged, Hungary
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index cd02acafde8..ed25ae7c98e 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2007 Nokia Corporation. All rights reserved.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Richard Purdie <rpurdie@openedhand.com>
*
diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
index 546d1538d07..9696ad9ef5f 100644
--- a/fs/jffs2/compr_rtime.c
+++ b/fs/jffs2/compr_rtime.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 170d289ac78..a12b4f76337 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index b46661a4275..97fc45de6f8 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index ec353841392..e0b76c87a91 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index a113ecc3baf..c4f8eef5ca6 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 7aa4417e085..ed78a3cf3cb 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
@@ -222,16 +223,17 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
jffs2_free_raw_inode(ri);
- d_instantiate(dentry, inode);
D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
inode->i_ino, inode->i_mode, inode->i_nlink,
f->inocache->pino_nlink, inode->i_mapping->nrpages));
+
+ d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
return 0;
fail:
- make_bad_inode(inode);
- iput(inode);
+ iget_failed(inode);
jffs2_free_raw_inode(ri);
return ret;
}
@@ -360,8 +362,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
/* Eeek. Wave bye bye */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return PTR_ERR(fn);
+ ret = PTR_ERR(fn);
+ goto fail;
}
/* We use f->target field to store the target path. */
@@ -370,8 +372,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
memcpy(f->target, target, targetlen + 1);
@@ -386,30 +388,24 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
jffs2_complete_reservation(c);
ret = jffs2_init_security(inode, dir_i);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
+
ret = jffs2_init_acl_post(inode);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
dir_f = JFFS2_INODE_INFO(dir_i);
@@ -437,8 +433,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
mutex_unlock(&dir_f->sem);
- jffs2_clear_inode(inode);
- return PTR_ERR(fd);
+ ret = PTR_ERR(fd);
+ goto fail;
}
dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
@@ -453,7 +449,12 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
return 0;
+
+ fail:
+ iget_failed(inode);
+ return ret;
}
@@ -519,8 +520,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
/* Eeek. Wave bye bye */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return PTR_ERR(fn);
+ ret = PTR_ERR(fn);
+ goto fail;
}
/* No data here. Only a metadata node, which will be
obsoleted by the first data write
@@ -531,30 +532,24 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_complete_reservation(c);
ret = jffs2_init_security(inode, dir_i);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
+
ret = jffs2_init_acl_post(inode);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
dir_f = JFFS2_INODE_INFO(dir_i);
@@ -582,8 +577,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
mutex_unlock(&dir_f->sem);
- jffs2_clear_inode(inode);
- return PTR_ERR(fd);
+ ret = PTR_ERR(fd);
+ goto fail;
}
dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
@@ -599,7 +594,12 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
+ unlock_new_inode(inode);
return 0;
+
+ fail:
+ iget_failed(inode);
+ return ret;
}
static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
@@ -693,8 +693,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
/* Eeek. Wave bye bye */
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return PTR_ERR(fn);
+ ret = PTR_ERR(fn);
+ goto fail;
}
/* No data here. Only a metadata node, which will be
obsoleted by the first data write
@@ -705,30 +705,24 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
jffs2_complete_reservation(c);
ret = jffs2_init_security(inode, dir_i);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
+
ret = jffs2_init_acl_post(inode);
- if (ret) {
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ if (ret)
+ goto fail;
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
dir_f = JFFS2_INODE_INFO(dir_i);
@@ -759,8 +753,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
mutex_unlock(&dir_f->sem);
- jffs2_clear_inode(inode);
- return PTR_ERR(fd);
+ ret = PTR_ERR(fd);
+ goto fail;
}
dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
@@ -775,8 +769,12 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
-
+ unlock_new_inode(inode);
return 0;
+
+ fail:
+ iget_failed(inode);
+ return ret;
}
static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 6286ad9b00f..abac961f617 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 81349702443..1c0a08d711a 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 8bc2c80ab15..6b2964a1985 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
@@ -169,13 +170,13 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
- /* We have to do the simple_setsize() without f->sem held, since
+ /* We have to do the truncate_setsize() without f->sem held, since
some pages may be locked and waiting for it in readpage().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
- simple_setsize(inode, iattr->ia_size);
+ truncate_setsize(inode, iattr->ia_size);
inode->i_blocks = (inode->i_size + 511) >> 9;
}
@@ -225,7 +226,7 @@ int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
}
-void jffs2_clear_inode (struct inode *inode)
+void jffs2_evict_inode (struct inode *inode)
{
/* We can forget about this inode for now - drop all
* the nodelists associated with it, etc.
@@ -233,7 +234,9 @@ void jffs2_clear_inode (struct inode *inode)
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+ D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
jffs2_do_clear_inode(c, f);
}
@@ -465,7 +468,12 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i
inode->i_blocks = 0;
inode->i_size = 0;
- insert_inode_hash(inode);
+ if (insert_inode_locked(inode) < 0) {
+ make_bad_inode(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+ return ERR_PTR(-EINVAL);
+ }
return inode;
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index f5e96bd656e..846a7945249 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c
index 9d41f43e47b..859a598af02 100644
--- a/fs/jffs2/ioctl.c
+++ b/fs/jffs2/ioctl.c
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index c6923da9826..2e4a86763c0 100644
--- a/fs/jffs2/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 85ef6dbb1be..6784bc89add 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -2,6 +2,7 @@
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
+ * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index a881a42f19e..523a9169105 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -24,7 +24,6 @@
#ifdef __ECOS
#include "os-ecos.h"
#else
-#include <linux/mtd/compatmac.h> /* For compatibility with older kernels */
#include "os-linux.h"
#endif
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 4791aacf308..00bae7cc2e4 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -171,7 +171,7 @@ extern const struct inode_operations jffs2_symlink_inode_operations;
int jffs2_setattr (struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
struct inode *jffs2_iget(struct super_block *, unsigned long);
-void jffs2_clear_inode (struct inode *);
+void jffs2_evict_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
struct jffs2_raw_inode *ri);
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 511e2d609d1..662bba09950 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -135,7 +135,7 @@ static const struct super_operations jffs2_super_operations =
.write_super = jffs2_write_super,
.statfs = jffs2_statfs,
.remount_fs = jffs2_remount_fs,
- .clear_inode = jffs2_clear_inode,
+ .evict_inode = jffs2_evict_inode,
.dirty_inode = jffs2_dirty_inode,
.sync_fs = jffs2_sync_fs,
};
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index a2d58c96f1b..9b572ca40a4 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -588,7 +588,7 @@ static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *re
void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
- /* It's called from jffs2_clear_inode() on inode removing.
+ /* It's called from jffs2_evict_inode() on inode removing.
When an inode with XATTR is removed, those XATTRs must be removed. */
struct jffs2_xattr_ref *ref, *_ref;
@@ -626,7 +626,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
- /* success of check_xattr_ref_inode() means taht inode (ic) dose not have
+ /* success of check_xattr_ref_inode() means that inode (ic) dose not have
* duplicate name/value pairs. If duplicate name/value pair would be found,
* one will be removed.
*/
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 127263cc865..c5ce6c1d1ff 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/quotaops.h>
#include "jfs_incore.h"
@@ -107,11 +108,18 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
return rc;
}
- rc = inode_setattr(inode, iattr);
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(inode)) {
+ rc = vmtruncate(inode, iattr->ia_size);
+ if (rc)
+ return rc;
+ }
- if (!rc && (iattr->ia_valid & ATTR_MODE))
- rc = jfs_acl_chmod(inode);
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
+ if (iattr->ia_valid & ATTR_MODE)
+ rc = jfs_acl_chmod(inode);
return rc;
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index ed9ba6fe04f..9978803ceed 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -145,31 +145,32 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
}
-void jfs_delete_inode(struct inode *inode)
+void jfs_evict_inode(struct inode *inode)
{
- jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
+ jfs_info("In jfs_evict_inode, inode = 0x%p", inode);
- if (!is_bad_inode(inode))
+ if (!inode->i_nlink && !is_bad_inode(inode)) {
dquot_initialize(inode);
- if (!is_bad_inode(inode) &&
- (JFS_IP(inode)->fileset == FILESYSTEM_I)) {
- truncate_inode_pages(&inode->i_data, 0);
+ if (JFS_IP(inode)->fileset == FILESYSTEM_I) {
+ truncate_inode_pages(&inode->i_data, 0);
- if (test_cflag(COMMIT_Freewmap, inode))
- jfs_free_zero_link(inode);
+ if (test_cflag(COMMIT_Freewmap, inode))
+ jfs_free_zero_link(inode);
- diFree(inode);
+ diFree(inode);
- /*
- * Free the inode from the quota allocation.
- */
- dquot_initialize(inode);
- dquot_free_inode(inode);
- dquot_drop(inode);
+ /*
+ * Free the inode from the quota allocation.
+ */
+ dquot_initialize(inode);
+ dquot_free_inode(inode);
+ }
+ } else {
+ truncate_inode_pages(&inode->i_data, 0);
}
-
- clear_inode(inode);
+ end_writeback(inode);
+ dquot_drop(inode);
}
void jfs_dirty_inode(struct inode *inode)
@@ -303,8 +304,17 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ int ret;
+
+ ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata,
jfs_get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
@@ -317,9 +327,24 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
- return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, jfs_get_block, NULL);
+
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+
+ return ret;
}
const struct address_space_operations jfs_aops = {
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 11042b1f44b..155e91eff07 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -27,7 +27,7 @@ extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
extern int jfs_write_inode(struct inode *, struct writeback_control *);
-extern void jfs_delete_inode(struct inode *);
+extern void jfs_evict_inode(struct inode *);
extern void jfs_dirty_inode(struct inode *);
extern void jfs_truncate(struct inode *);
extern void jfs_truncate_nolock(struct inode *, loff_t);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index b38f96bef82..ec8c3e4baca 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -132,11 +132,6 @@ static void jfs_destroy_inode(struct inode *inode)
kmem_cache_free(jfs_inode_cachep, ji);
}
-static void jfs_clear_inode(struct inode *inode)
-{
- dquot_drop(inode);
-}
-
static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
@@ -765,8 +760,7 @@ static const struct super_operations jfs_super_operations = {
.destroy_inode = jfs_destroy_inode,
.dirty_inode = jfs_dirty_inode,
.write_inode = jfs_write_inode,
- .delete_inode = jfs_delete_inode,
- .clear_inode = jfs_clear_inode,
+ .evict_inode = jfs_evict_inode,
.put_super = jfs_put_super,
.sync_fs = jfs_sync_fs,
.freeze_fs = jfs_freeze,
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index fa96bbb2634..2d7f165d0f1 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -86,46 +86,25 @@ struct ea_buffer {
#define EA_MALLOC 0x0008
+static int is_known_namespace(const char *name)
+{
+ if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ return false;
+
+ return true;
+}
+
/*
* These three routines are used to recognize on-disk extended attributes
* that are in a recognized namespace. If the attribute is not recognized,
* "os2." is prepended to the name
*/
-static inline int is_os2_xattr(struct jfs_ea *ea)
+static int is_os2_xattr(struct jfs_ea *ea)
{
- /*
- * Check for "system."
- */
- if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
- return false;
- /*
- * Check for "user."
- */
- if ((ea->namelen >= XATTR_USER_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return false;
- /*
- * Check for "security."
- */
- if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN))
- return false;
- /*
- * Check for "trusted."
- */
- if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) &&
- !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
- return false;
- /*
- * Add any other valid namespace prefixes here
- */
-
- /*
- * We assume it's OS/2's flat namespace
- */
- return true;
+ return !is_known_namespace(ea->name);
}
static inline int name_size(struct jfs_ea *ea)
@@ -764,13 +743,23 @@ static int can_set_xattr(struct inode *inode, const char *name,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return can_set_system_xattr(inode, name, value, value_len);
+ if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) {
+ /*
+ * This makes sure that we aren't trying to set an
+ * attribute in a different namespace by prefixing it
+ * with "os2."
+ */
+ if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN))
+ return -EOPNOTSUPP;
+ return 0;
+ }
+
/*
* Don't allow setting an attribute in an unknown namespace.
*/
if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
- strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
- strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
return -EOPNOTSUPP;
return 0;
@@ -952,19 +941,8 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
int xattr_size;
ssize_t size;
int namelen = strlen(name);
- char *os2name = NULL;
char *value;
- if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
- os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
- GFP_KERNEL);
- if (!os2name)
- return -ENOMEM;
- strcpy(os2name, name + XATTR_OS2_PREFIX_LEN);
- name = os2name;
- namelen -= XATTR_OS2_PREFIX_LEN;
- }
-
down_read(&JFS_IP(inode)->xattr_sem);
xattr_size = ea_get(inode, &ea_buf, 0);
@@ -1002,8 +980,6 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
out:
up_read(&JFS_IP(inode)->xattr_sem);
- kfree(os2name);
-
return size;
}
@@ -1012,6 +988,19 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data,
{
int err;
+ if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
+ /*
+ * skip past "os2." prefix
+ */
+ name += XATTR_OS2_PREFIX_LEN;
+ /*
+ * Don't allow retrieving properly prefixed attributes
+ * by prepending them with "os2."
+ */
+ if (is_known_namespace(name))
+ return -EOPNOTSUPP;
+ }
+
err = __jfs_getxattr(dentry->d_inode, name, data, buf_size);
return err;
diff --git a/fs/libfs.c b/fs/libfs.c
index 09e1016eb77..0a9da95317f 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -327,77 +327,35 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
}
/**
- * simple_setsize - handle core mm and vfs requirements for file size change
- * @inode: inode
- * @newsize: new file size
- *
- * Returns 0 on success, -error on failure.
- *
- * simple_setsize must be called with inode_mutex held.
- *
- * simple_setsize will check that the requested new size is OK (see
- * inode_newsize_ok), and then will perform the necessary i_size update
- * and pagecache truncation (if necessary). It will be typically be called
- * from the filesystem's setattr function when ATTR_SIZE is passed in.
- *
- * The inode itself must have correct permissions and attributes to allow
- * i_size to be changed, this function then just checks that the new size
- * requested is valid.
- *
- * In the case of simple in-memory filesystems with inodes stored solely
- * in the inode cache, and file data in the pagecache, nothing more needs
- * to be done to satisfy a truncate request. Filesystems with on-disk
- * blocks for example will need to free them in the case of truncate, in
- * that case it may be easier not to use simple_setsize (but each of its
- * components will likely be required at some point to update pagecache
- * and inode etc).
- */
-int simple_setsize(struct inode *inode, loff_t newsize)
-{
- loff_t oldsize;
- int error;
-
- error = inode_newsize_ok(inode, newsize);
- if (error)
- return error;
-
- oldsize = inode->i_size;
- i_size_write(inode, newsize);
- truncate_pagecache(inode, oldsize, newsize);
-
- return error;
-}
-EXPORT_SYMBOL(simple_setsize);
-
-/**
- * simple_setattr - setattr for simple in-memory filesystem
+ * simple_setattr - setattr for simple filesystem
* @dentry: dentry
* @iattr: iattr structure
*
* Returns 0 on success, -error on failure.
*
- * simple_setattr implements setattr for an in-memory filesystem which
- * does not store its own file data or metadata (eg. uses the page cache
- * and inode cache as its data store).
+ * simple_setattr is a simple ->setattr implementation without a proper
+ * implementation of size changes.
+ *
+ * It can either be used for in-memory filesystems or special files
+ * on simple regular filesystems. Anything that needs to change on-disk
+ * or wire state on size changes needs its own setattr method.
*/
int simple_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int error;
+ WARN_ON_ONCE(inode->i_op->truncate);
+
error = inode_change_ok(inode, iattr);
if (error)
return error;
- if (iattr->ia_valid & ATTR_SIZE) {
- error = simple_setsize(inode, iattr->ia_size);
- if (error)
- return error;
- }
-
- generic_setattr(inode, iattr);
-
- return error;
+ if (iattr->ia_valid & ATTR_SIZE)
+ truncate_setsize(inode, iattr->ia_size);
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
+ return 0;
}
EXPORT_SYMBOL(simple_setattr);
@@ -489,7 +447,8 @@ int simple_write_end(struct file *file, struct address_space *mapping,
* unique inode values later for this filesystem, then you must take care
* to pass it an appropriate max_reserved value to avoid collisions.
*/
-int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files)
+int simple_fill_super(struct super_block *s, unsigned long magic,
+ struct tree_descr *files)
{
struct inode *inode;
struct dentry *root;
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 72d1893ddd3..9777eb5b552 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -434,8 +434,11 @@ static int __logfs_create(struct inode *dir, struct dentry *dentry,
int ret;
ta = kzalloc(sizeof(*ta), GFP_KERNEL);
- if (!ta)
+ if (!ta) {
+ inode->i_nlink--;
+ iput(inode);
return -ENOMEM;
+ }
ta->state = CREATE_1;
ta->ino = inode->i_ino;
@@ -821,7 +824,7 @@ const struct inode_operations logfs_dir_iops = {
};
const struct file_operations logfs_dir_fops = {
.fsync = logfs_fsync,
- .ioctl = logfs_ioctl,
+ .unlocked_ioctl = logfs_ioctl,
.readdir = logfs_readdir,
.read = generic_read_dir,
};
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index abe1cafbd4c..e86376b87af 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -181,9 +181,9 @@ static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
}
-int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = file->f_path.dentry->d_inode;
struct logfs_inode *li = logfs_inode(inode);
unsigned int oldflags, flags;
int err;
@@ -232,15 +232,19 @@ static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
struct inode *inode = dentry->d_inode;
int err = 0;
- if (attr->ia_valid & ATTR_SIZE)
+ err = inode_change_ok(inode, attr);
+ if (err)
+ return err;
+
+ if (attr->ia_valid & ATTR_SIZE) {
err = logfs_truncate(inode, attr->ia_size);
- attr->ia_valid &= ~ATTR_SIZE;
+ if (err)
+ return err;
+ }
- if (!err)
- err = inode_change_ok(inode, attr);
- if (!err)
- err = inode_setattr(inode, attr);
- return err;
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
const struct inode_operations logfs_reg_iops = {
@@ -251,7 +255,7 @@ const struct file_operations logfs_reg_fops = {
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
.fsync = logfs_fsync,
- .ioctl = logfs_ioctl,
+ .unlocked_ioctl = logfs_ioctl,
.llseek = generic_file_llseek,
.mmap = generic_file_readonly_mmap,
.open = generic_file_open,
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index f602e230e16..d8c71ece098 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -235,33 +235,21 @@ static struct inode *logfs_alloc_inode(struct super_block *sb)
* purpose is to create a new inode that will not trigger the warning if such
* an inode is still in use. An ugly hack, no doubt. Suggections for
* improvement are welcome.
+ *
+ * AV: that's what ->put_super() is for...
*/
struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino)
{
struct inode *inode;
- inode = logfs_alloc_inode(sb);
+ inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
inode->i_mode = S_IFREG;
inode->i_ino = ino;
- inode->i_sb = sb;
-
- /* This is a blatant copy of alloc_inode code. We'd need alloc_inode
- * to be nonstatic, alas. */
- {
- struct address_space * const mapping = &inode->i_data;
-
- mapping->a_ops = &logfs_reg_aops;
- mapping->host = inode;
- mapping->flags = 0;
- mapping_set_gfp_mask(mapping, GFP_NOFS);
- mapping->assoc_mapping = NULL;
- mapping->backing_dev_info = &default_backing_dev_info;
- inode->i_mapping = mapping;
- inode->i_nlink = 1;
- }
+ inode->i_data.a_ops = &logfs_reg_aops;
+ mapping_set_gfp_mask(&inode->i_data, GFP_NOFS);
return inode;
}
@@ -277,7 +265,7 @@ struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino)
err = logfs_read_inode(inode);
if (err) {
- destroy_meta_inode(inode);
+ iput(inode);
return ERR_PTR(err);
}
logfs_inode_setops(inode);
@@ -298,18 +286,8 @@ static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
-void destroy_meta_inode(struct inode *inode)
-{
- if (inode) {
- if (inode->i_data.nrpages)
- truncate_inode_pages(&inode->i_data, 0);
- logfs_clear_inode(inode);
- kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
- }
-}
-
/* called with inode_lock held */
-static void logfs_drop_inode(struct inode *inode)
+static int logfs_drop_inode(struct inode *inode)
{
struct logfs_super *super = logfs_super(inode->i_sb);
struct logfs_inode *li = logfs_inode(inode);
@@ -317,7 +295,7 @@ static void logfs_drop_inode(struct inode *inode)
spin_lock(&logfs_inode_lock);
list_move(&li->li_freeing_list, &super->s_freeing_list);
spin_unlock(&logfs_inode_lock);
- generic_drop_inode(inode);
+ return generic_drop_inode(inode);
}
static void logfs_set_ino_generation(struct super_block *sb,
@@ -384,12 +362,21 @@ static int logfs_sync_fs(struct super_block *sb, int wait)
return 0;
}
+static void logfs_put_super(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ /* kill the meta-inodes */
+ iput(super->s_master_inode);
+ iput(super->s_segfile_inode);
+ iput(super->s_mapping_inode);
+}
+
const struct super_operations logfs_super_operations = {
.alloc_inode = logfs_alloc_inode,
- .clear_inode = logfs_clear_inode,
- .delete_inode = logfs_delete_inode,
.destroy_inode = logfs_destroy_inode,
+ .evict_inode = logfs_evict_inode,
.drop_inode = logfs_drop_inode,
+ .put_super = logfs_put_super,
.write_inode = logfs_write_inode,
.statfs = logfs_statfs,
.sync_fs = logfs_sync_fs,
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 4b0e0616b35..f46ee8b0e13 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -889,8 +889,6 @@ void logfs_cleanup_journal(struct super_block *sb)
struct logfs_super *super = logfs_super(sb);
btree_grim_visitor32(&super->s_reserved_segments, 0, NULL);
- destroy_meta_inode(super->s_master_inode);
- super->s_master_inode = NULL;
kfree(super->s_compressed_je);
kfree(super->s_je);
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index c838c4d7211..b8786264d24 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -504,8 +504,7 @@ extern const struct inode_operations logfs_reg_iops;
extern const struct file_operations logfs_reg_fops;
extern const struct address_space_operations logfs_reg_aops;
int logfs_readpage(struct file *file, struct page *page);
-int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg);
+long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int logfs_fsync(struct file *file, int datasync);
/* gc.c */
@@ -525,13 +524,11 @@ struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino);
struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino);
int logfs_init_inode_cache(void);
void logfs_destroy_inode_cache(void);
-void destroy_meta_inode(struct inode *inode);
void logfs_set_blocks(struct inode *inode, u64 no);
/* these logically belong into inode.c but actually reside in readwrite.c */
int logfs_read_inode(struct inode *inode);
int __logfs_write_inode(struct inode *inode, long flags);
-void logfs_delete_inode(struct inode *inode);
-void logfs_clear_inode(struct inode *inode);
+void logfs_evict_inode(struct inode *inode);
/* journal.c */
void logfs_write_anchor(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 0718d112a1a..6127baf0e18 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1972,31 +1972,6 @@ static struct page *inode_to_page(struct inode *inode)
return page;
}
-/* Cheaper version of write_inode. All changes are concealed in
- * aliases, which are moved back. No write to the medium happens.
- */
-void logfs_clear_inode(struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
- struct logfs_inode *li = logfs_inode(inode);
- struct logfs_block *block = li->li_block;
- struct page *page;
-
- /* Only deleted files may be dirty at this point */
- BUG_ON(inode->i_state & I_DIRTY && inode->i_nlink);
- if (!block)
- return;
- if ((logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN)) {
- block->ops->free_block(inode->i_sb, block);
- return;
- }
-
- BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
- page = inode_to_page(inode);
- BUG_ON(!page); /* FIXME: Use emergency page */
- logfs_put_write_page(page);
-}
-
static int do_write_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -2164,18 +2139,40 @@ static int do_delete_inode(struct inode *inode)
* ZOMBIE inodes have already been deleted before and should remain dead,
* if it weren't for valid checking. No need to kill them again here.
*/
-void logfs_delete_inode(struct inode *inode)
+void logfs_evict_inode(struct inode *inode)
{
+ struct super_block *sb = inode->i_sb;
struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_block *block = li->li_block;
+ struct page *page;
- if (!(li->li_flags & LOGFS_IF_ZOMBIE)) {
- li->li_flags |= LOGFS_IF_ZOMBIE;
- if (i_size_read(inode) > 0)
- logfs_truncate(inode, 0);
- do_delete_inode(inode);
+ if (!inode->i_nlink) {
+ if (!(li->li_flags & LOGFS_IF_ZOMBIE)) {
+ li->li_flags |= LOGFS_IF_ZOMBIE;
+ if (i_size_read(inode) > 0)
+ logfs_truncate(inode, 0);
+ do_delete_inode(inode);
+ }
}
truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ end_writeback(inode);
+
+ /* Cheaper version of write_inode. All changes are concealed in
+ * aliases, which are moved back. No write to the medium happens.
+ */
+ /* Only deleted files may be dirty at this point */
+ BUG_ON(inode->i_state & I_DIRTY && inode->i_nlink);
+ if (!block)
+ return;
+ if ((logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN)) {
+ block->ops->free_block(inode->i_sb, block);
+ return;
+ }
+
+ BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
+ page = inode_to_page(inode);
+ BUG_ON(!page); /* FIXME: Use emergency page */
+ logfs_put_write_page(page);
}
void btree_write_block(struct logfs_block *block)
@@ -2272,7 +2269,6 @@ void logfs_cleanup_rw(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
- destroy_meta_inode(super->s_segfile_inode);
logfs_mempool_destroy(super->s_block_pool);
logfs_mempool_destroy(super->s_shadow_pool);
}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index a9657afb70a..9d518735325 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -929,5 +929,4 @@ void logfs_cleanup_areas(struct super_block *sb)
for_each_area(i)
free_area(super->s_area[i]);
free_area(super->s_journal_area);
- destroy_meta_inode(super->s_mapping_inode);
}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index d651e10a1e9..5336155c5d8 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -342,24 +342,27 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
goto fail;
}
+ /* at that point we know that ->put_super() will be called */
super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
if (!super->s_erase_page)
- goto fail;
+ return -ENOMEM;
memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE);
/* FIXME: check for read-only mounts */
err = logfs_make_writeable(sb);
- if (err)
- goto fail1;
+ if (err) {
+ __free_page(super->s_erase_page);
+ return err;
+ }
log_super("LogFS: Finished mounting\n");
simple_set_mnt(mnt, sb);
return 0;
-fail1:
- __free_page(super->s_erase_page);
fail:
- iput(logfs_super(sb)->s_master_inode);
+ iput(super->s_master_inode);
+ iput(super->s_segfile_inode);
+ iput(super->s_mapping_inode);
return -EIO;
}
@@ -580,10 +583,14 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
sb->s_flags |= MS_ACTIVE;
err = logfs_get_sb_final(sb, mnt);
if (err)
- goto err1;
- return 0;
+ deactivate_locked_super(sb);
+ return err;
err1:
+ /* no ->s_root, no ->put_super() */
+ iput(super->s_master_inode);
+ iput(super->s_segfile_inode);
+ iput(super->s_mapping_inode);
deactivate_locked_super(sb);
return err;
err0:
diff --git a/fs/mbcache.c b/fs/mbcache.c
index ec88ff3d04a..cf4e6cdfd15 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -79,15 +79,11 @@ EXPORT_SYMBOL(mb_cache_entry_find_next);
struct mb_cache {
struct list_head c_cache_list;
const char *c_name;
- struct mb_cache_op c_op;
atomic_t c_entry_count;
int c_bucket_bits;
-#ifndef MB_CACHE_INDEXES_COUNT
- int c_indexes_count;
-#endif
- struct kmem_cache *c_entry_cache;
+ struct kmem_cache *c_entry_cache;
struct list_head *c_block_hash;
- struct list_head *c_indexes_hash[0];
+ struct list_head *c_index_hash;
};
@@ -101,21 +97,11 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static inline int
-mb_cache_indexes(struct mb_cache *cache)
-{
-#ifdef MB_CACHE_INDEXES_COUNT
- return MB_CACHE_INDEXES_COUNT;
-#else
- return cache->c_indexes_count;
-#endif
-}
-
/*
* What the mbcache registers as to get shrunk dynamically.
*/
-static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
static struct shrinker mb_cache_shrinker = {
.shrink = mb_cache_shrink_fn,
@@ -132,12 +118,9 @@ __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
static void
__mb_cache_entry_unhash(struct mb_cache_entry *ce)
{
- int n;
-
if (__mb_cache_entry_is_hashed(ce)) {
list_del_init(&ce->e_block_list);
- for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
- list_del(&ce->e_indexes[n].o_list);
+ list_del(&ce->e_index.o_list);
}
}
@@ -148,16 +131,8 @@ __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
struct mb_cache *cache = ce->e_cache;
mb_assert(!(ce->e_used || ce->e_queued));
- if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
- /* free failed -- put back on the lru list
- for freeing later. */
- spin_lock(&mb_cache_spinlock);
- list_add(&ce->e_lru_list, &mb_cache_lru_list);
- spin_unlock(&mb_cache_spinlock);
- } else {
- kmem_cache_free(cache->c_entry_cache, ce);
- atomic_dec(&cache->c_entry_count);
- }
+ kmem_cache_free(cache->c_entry_cache, ce);
+ atomic_dec(&cache->c_entry_count);
}
@@ -191,31 +166,22 @@ forget:
* This function is called by the kernel memory management when memory
* gets low.
*
+ * @shrink: (ignored)
* @nr_to_scan: Number of objects to scan
* @gfp_mask: (ignored)
*
* Returns the number of objects which are present in the cache.
*/
static int
-mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
+mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(free_list);
- struct list_head *l, *ltmp;
+ struct mb_cache *cache;
+ struct mb_cache_entry *entry, *tmp;
int count = 0;
- spin_lock(&mb_cache_spinlock);
- list_for_each(l, &mb_cache_list) {
- struct mb_cache *cache =
- list_entry(l, struct mb_cache, c_cache_list);
- mb_debug("cache %s (%d)", cache->c_name,
- atomic_read(&cache->c_entry_count));
- count += atomic_read(&cache->c_entry_count);
- }
mb_debug("trying to free %d entries", nr_to_scan);
- if (nr_to_scan == 0) {
- spin_unlock(&mb_cache_spinlock);
- goto out;
- }
+ spin_lock(&mb_cache_spinlock);
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
struct mb_cache_entry *ce =
list_entry(mb_cache_lru_list.next,
@@ -223,12 +189,15 @@ mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce);
}
+ list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
+ mb_debug("cache %s (%d)", cache->c_name,
+ atomic_read(&cache->c_entry_count));
+ count += atomic_read(&cache->c_entry_count);
+ }
spin_unlock(&mb_cache_spinlock);
- list_for_each_safe(l, ltmp, &free_list) {
- __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
- e_lru_list), gfp_mask);
+ list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
+ __mb_cache_entry_forget(entry, gfp_mask);
}
-out:
return (count / 100) * sysctl_vfs_cache_pressure;
}
@@ -242,72 +211,49 @@ out:
* memory was available.
*
* @name: name of the cache (informal)
- * @cache_op: contains the callback called when freeing a cache entry
- * @entry_size: The size of a cache entry, including
- * struct mb_cache_entry
- * @indexes_count: number of additional indexes in the cache. Must equal
- * MB_CACHE_INDEXES_COUNT if the number of indexes is
- * hardwired.
* @bucket_bits: log2(number of hash buckets)
*/
struct mb_cache *
-mb_cache_create(const char *name, struct mb_cache_op *cache_op,
- size_t entry_size, int indexes_count, int bucket_bits)
+mb_cache_create(const char *name, int bucket_bits)
{
- int m=0, n, bucket_count = 1 << bucket_bits;
+ int n, bucket_count = 1 << bucket_bits;
struct mb_cache *cache = NULL;
- if(entry_size < sizeof(struct mb_cache_entry) +
- indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
- return NULL;
-
- cache = kmalloc(sizeof(struct mb_cache) +
- indexes_count * sizeof(struct list_head), GFP_KERNEL);
+ cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
if (!cache)
- goto fail;
+ return NULL;
cache->c_name = name;
- cache->c_op.free = NULL;
- if (cache_op)
- cache->c_op.free = cache_op->free;
atomic_set(&cache->c_entry_count, 0);
cache->c_bucket_bits = bucket_bits;
-#ifdef MB_CACHE_INDEXES_COUNT
- mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
-#else
- cache->c_indexes_count = indexes_count;
-#endif
cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
GFP_KERNEL);
if (!cache->c_block_hash)
goto fail;
for (n=0; n<bucket_count; n++)
INIT_LIST_HEAD(&cache->c_block_hash[n]);
- for (m=0; m<indexes_count; m++) {
- cache->c_indexes_hash[m] = kmalloc(bucket_count *
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!cache->c_indexes_hash[m])
- goto fail;
- for (n=0; n<bucket_count; n++)
- INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
- }
- cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
+ cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
+ GFP_KERNEL);
+ if (!cache->c_index_hash)
+ goto fail;
+ for (n=0; n<bucket_count; n++)
+ INIT_LIST_HEAD(&cache->c_index_hash[n]);
+ cache->c_entry_cache = kmem_cache_create(name,
+ sizeof(struct mb_cache_entry), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
if (!cache->c_entry_cache)
- goto fail;
+ goto fail2;
spin_lock(&mb_cache_spinlock);
list_add(&cache->c_cache_list, &mb_cache_list);
spin_unlock(&mb_cache_spinlock);
return cache;
+fail2:
+ kfree(cache->c_index_hash);
+
fail:
- if (cache) {
- while (--m >= 0)
- kfree(cache->c_indexes_hash[m]);
- kfree(cache->c_block_hash);
- kfree(cache);
- }
+ kfree(cache->c_block_hash);
+ kfree(cache);
return NULL;
}
@@ -356,7 +302,6 @@ mb_cache_destroy(struct mb_cache *cache)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
- int n;
spin_lock(&mb_cache_spinlock);
list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
@@ -383,8 +328,7 @@ mb_cache_destroy(struct mb_cache *cache)
kmem_cache_destroy(cache->c_entry_cache);
- for (n=0; n < mb_cache_indexes(cache); n++)
- kfree(cache->c_indexes_hash[n]);
+ kfree(cache->c_index_hash);
kfree(cache->c_block_hash);
kfree(cache);
}
@@ -428,17 +372,16 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
*
* @bdev: device the cache entry belongs to
* @block: block number
- * @keys: array of additional keys. There must be indexes_count entries
- * in the array (as specified when creating the cache).
+ * @key: lookup key
*/
int
mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
- sector_t block, unsigned int keys[])
+ sector_t block, unsigned int key)
{
struct mb_cache *cache = ce->e_cache;
unsigned int bucket;
struct list_head *l;
- int error = -EBUSY, n;
+ int error = -EBUSY;
bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
cache->c_bucket_bits);
@@ -453,12 +396,9 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
ce->e_bdev = bdev;
ce->e_block = block;
list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
- for (n=0; n<mb_cache_indexes(cache); n++) {
- ce->e_indexes[n].o_key = keys[n];
- bucket = hash_long(keys[n], cache->c_bucket_bits);
- list_add(&ce->e_indexes[n].o_list,
- &cache->c_indexes_hash[n][bucket]);
- }
+ ce->e_index.o_key = key;
+ bucket = hash_long(key, cache->c_bucket_bits);
+ list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
error = 0;
out:
spin_unlock(&mb_cache_spinlock);
@@ -554,13 +494,12 @@ cleanup:
static struct mb_cache_entry *
__mb_cache_entry_find(struct list_head *l, struct list_head *head,
- int index, struct block_device *bdev, unsigned int key)
+ struct block_device *bdev, unsigned int key)
{
while (l != head) {
struct mb_cache_entry *ce =
- list_entry(l, struct mb_cache_entry,
- e_indexes[index].o_list);
- if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
+ list_entry(l, struct mb_cache_entry, e_index.o_list);
+ if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
DEFINE_WAIT(wait);
if (!list_empty(&ce->e_lru_list))
@@ -602,23 +541,20 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
* returned cache entry is locked for shared access ("multiple readers").
*
* @cache: the cache to search
- * @index: the number of the additonal index to search (0<=index<indexes_count)
* @bdev: the device the cache entry should belong to
* @key: the key in the index
*/
struct mb_cache_entry *
-mb_cache_entry_find_first(struct mb_cache *cache, int index,
- struct block_device *bdev, unsigned int key)
+mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
+ unsigned int key)
{
unsigned int bucket = hash_long(key, cache->c_bucket_bits);
struct list_head *l;
struct mb_cache_entry *ce;
- mb_assert(index < mb_cache_indexes(cache));
spin_lock(&mb_cache_spinlock);
- l = cache->c_indexes_hash[index][bucket].next;
- ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
- index, bdev, key);
+ l = cache->c_index_hash[bucket].next;
+ ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
spin_unlock(&mb_cache_spinlock);
return ce;
}
@@ -639,12 +575,11 @@ mb_cache_entry_find_first(struct mb_cache *cache, int index,
* }
*
* @prev: The previous match
- * @index: the number of the additonal index to search (0<=index<indexes_count)
* @bdev: the device the cache entry should belong to
* @key: the key in the index
*/
struct mb_cache_entry *
-mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
+mb_cache_entry_find_next(struct mb_cache_entry *prev,
struct block_device *bdev, unsigned int key)
{
struct mb_cache *cache = prev->e_cache;
@@ -652,11 +587,9 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
struct list_head *l;
struct mb_cache_entry *ce;
- mb_assert(index < mb_cache_indexes(cache));
spin_lock(&mb_cache_spinlock);
- l = prev->e_indexes[index].o_list.next;
- ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
- index, bdev, key);
+ l = prev->e_index.o_list.next;
+ ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
__mb_cache_entry_release_unlock(prev);
return ce;
}
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 482779fe4e7..3f32bcb0d9b 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -200,13 +200,13 @@ void minix_free_inode(struct inode * inode)
ino = inode->i_ino;
if (ino < 1 || ino > sbi->s_ninodes) {
printk("minix_free_inode: inode 0 or nonexistent inode\n");
- goto out;
+ return;
}
bit = ino & ((1<<k) - 1);
ino >>= k;
if (ino >= sbi->s_imap_blocks) {
printk("minix_free_inode: nonexistent imap in superblock\n");
- goto out;
+ return;
}
minix_clear_inode(inode); /* clear on-disk copy */
@@ -217,8 +217,6 @@ void minix_free_inode(struct inode * inode)
printk("minix_free_inode: bit %lu already cleared\n", bit);
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
- out:
- clear_inode(inode); /* clear in-memory copy */
}
struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 91969589131..085a9262c69 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -75,10 +75,6 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
if (!IS_ERR(page))
kmap(page);
return page;
-
-fail:
- dir_put_page(page);
- return ERR_PTR(-EIO);
}
static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
@@ -275,8 +271,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode)
got_it:
pos = page_offset(page) + p - (char *)page_address(page);
- err = __minix_write_begin(NULL, page->mapping, pos, sbi->s_dirsize,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
if (err)
goto out_unlock;
memcpy (namx, name, namelen);
@@ -301,8 +296,7 @@ out_unlock:
int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = (struct inode*)mapping->host;
+ struct inode *inode = page->mapping->host;
char *kaddr = page_address(page);
loff_t pos = page_offset(page) + (char*)de - kaddr;
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
@@ -310,8 +304,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
int err;
lock_page(page);
- err = __minix_write_begin(NULL, mapping, pos, len,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = minix_prepare_chunk(page, pos, len);
if (err == 0) {
if (sbi->s_version == MINIX_V3)
((minix3_dirent *) de)->inode = 0;
@@ -329,16 +322,14 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
int minix_make_empty(struct inode *inode, struct inode *dir)
{
- struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
+ struct page *page = grab_cache_page(inode->i_mapping, 0);
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
char *kaddr;
int err;
if (!page)
return -ENOMEM;
- err = __minix_write_begin(NULL, mapping, 0, 2 * sbi->s_dirsize,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
if (err) {
unlock_page(page);
goto fail;
@@ -429,8 +420,7 @@ not_empty:
void minix_set_link(struct minix_dir_entry *de, struct page *page,
struct inode *inode)
{
- struct address_space *mapping = page->mapping;
- struct inode *dir = mapping->host;
+ struct inode *dir = page->mapping->host;
struct minix_sb_info *sbi = minix_sb(dir->i_sb);
loff_t pos = page_offset(page) +
(char *)de-(char*)page_address(page);
@@ -438,8 +428,7 @@ void minix_set_link(struct minix_dir_entry *de, struct page *page,
lock_page(page);
- err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
if (err == 0) {
if (sbi->s_version == MINIX_V3)
((minix3_dirent *) de)->inode = inode->i_ino;
diff --git a/fs/minix/file.c b/fs/minix/file.c
index d5320ff23fa..4493ce695ab 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -23,7 +23,29 @@ const struct file_operations minix_file_operations = {
.splice_read = generic_file_splice_read,
};
+static int minix_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
const struct inode_operations minix_file_inode_operations = {
.truncate = minix_truncate,
+ .setattr = minix_setattr,
.getattr = minix_getattr,
};
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 756f8c93780..e39d6bf2e8f 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -24,12 +24,17 @@ static int minix_write_inode(struct inode *inode,
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
static int minix_remount (struct super_block * sb, int * flags, char * data);
-static void minix_delete_inode(struct inode *inode)
+static void minix_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- inode->i_size = 0;
- minix_truncate(inode);
- minix_free_inode(inode);
+ if (!inode->i_nlink) {
+ inode->i_size = 0;
+ minix_truncate(inode);
+ }
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
+ if (!inode->i_nlink)
+ minix_free_inode(inode);
}
static void minix_put_super(struct super_block *sb)
@@ -96,7 +101,7 @@ static const struct super_operations minix_sops = {
.alloc_inode = minix_alloc_inode,
.destroy_inode = minix_destroy_inode,
.write_inode = minix_write_inode,
- .delete_inode = minix_delete_inode,
+ .evict_inode = minix_evict_inode,
.put_super = minix_put_super,
.statfs = minix_statfs,
.remount_fs = minix_remount,
@@ -357,20 +362,26 @@ static int minix_readpage(struct file *file, struct page *page)
return block_read_full_page(page,minix_get_block);
}
-int __minix_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len)
{
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- minix_get_block);
+ return __block_write_begin(page, pos, len, minix_get_block);
}
static int minix_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return __minix_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep,
+ minix_get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t minix_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index 111f34ee9e3..407b1c84911 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -53,9 +53,7 @@ extern int minix_new_block(struct inode * inode);
extern void minix_free_block(struct inode *inode, unsigned long block);
extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
-extern int __minix_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
+extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
extern void V1_minix_truncate(struct inode *);
extern void V2_minix_truncate(struct inode *);
diff --git a/fs/namei.c b/fs/namei.c
index 868d0cb9d47..17ea76bf2fb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -282,8 +282,7 @@ int inode_permission(struct inode *inode, int mask)
if (retval)
return retval;
- return security_inode_permission(inode,
- mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND));
+ return security_inode_permission(inode, mask);
}
/**
@@ -484,13 +483,8 @@ ok:
static __always_inline void set_root(struct nameidata *nd)
{
- if (!nd->root.mnt) {
- struct fs_struct *fs = current->fs;
- read_lock(&fs->lock);
- nd->root = fs->root;
- path_get(&nd->root);
- read_unlock(&fs->lock);
- }
+ if (!nd->root.mnt)
+ get_fs_root(current->fs, &nd->root);
}
static int link_path_walk(const char *, struct nameidata *);
@@ -1016,11 +1010,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, struct namei
nd->path = nd->root;
path_get(&nd->root);
} else if (dfd == AT_FDCWD) {
- struct fs_struct *fs = current->fs;
- read_lock(&fs->lock);
- nd->path = fs->pwd;
- path_get(&fs->pwd);
- read_unlock(&fs->lock);
+ get_fs_pwd(current->fs, &nd->path);
} else {
struct dentry *dentry;
@@ -1484,8 +1474,7 @@ static int handle_truncate(struct path *path)
*/
error = locks_verify_locked(inode);
if (!error)
- error = security_path_truncate(path, 0,
- ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+ error = security_path_truncate(path);
if (!error) {
error = do_truncate(path->dentry, 0,
ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
@@ -2635,7 +2624,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
{
int error;
int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
- const char *old_name;
+ const unsigned char *old_name;
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
diff --git a/fs/namespace.c b/fs/namespace.c
index 88058de59c7..2e10cb19c5b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -29,6 +29,7 @@
#include <linux/log2.h>
#include <linux/idr.h>
#include <linux/fs_struct.h>
+#include <linux/fsnotify.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -150,6 +151,9 @@ struct vfsmount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
+#ifdef CONFIG_FSNOTIFY
+ INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
+#endif
#ifdef CONFIG_SMP
mnt->mnt_writers = alloc_percpu(int);
if (!mnt->mnt_writers)
@@ -610,6 +614,7 @@ static inline void __mntput(struct vfsmount *mnt)
* provides barriers, so count_mnt_writers() below is safe. AV
*/
WARN_ON(count_mnt_writers(mnt));
+ fsnotify_vfsmount_delete(mnt);
dput(mnt->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
@@ -783,7 +788,6 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
{ MNT_NOATIME, ",noatime" },
{ MNT_NODIRATIME, ",nodiratime" },
{ MNT_RELATIME, ",relatime" },
- { MNT_STRICTATIME, ",strictatime" },
{ 0, NULL }
};
const struct proc_fs_info *fs_infop;
@@ -1984,7 +1988,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
if (flags & MS_RDONLY)
mnt_flags |= MNT_READONLY;
- flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
MS_STRICTATIME);
@@ -2208,10 +2212,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
goto out1;
}
- read_lock(&current->fs->lock);
- root = current->fs->root;
- path_get(&current->fs->root);
- read_unlock(&current->fs->lock);
+ get_fs_root(current->fs, &root);
down_write(&namespace_sem);
mutex_lock(&old.dentry->d_inode->i_mutex);
error = -EINVAL;
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index fa338515402..b4de38cf49f 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -43,7 +43,7 @@
#define NCP_DEFAULT_TIME_OUT 10
#define NCP_DEFAULT_RETRY_COUNT 20
-static void ncp_delete_inode(struct inode *);
+static void ncp_evict_inode(struct inode *);
static void ncp_put_super(struct super_block *);
static int ncp_statfs(struct dentry *, struct kstatfs *);
static int ncp_show_options(struct seq_file *, struct vfsmount *);
@@ -100,7 +100,7 @@ static const struct super_operations ncp_sops =
.alloc_inode = ncp_alloc_inode,
.destroy_inode = ncp_destroy_inode,
.drop_inode = generic_delete_inode,
- .delete_inode = ncp_delete_inode,
+ .evict_inode = ncp_evict_inode,
.put_super = ncp_put_super,
.statfs = ncp_statfs,
.remount_fs = ncp_remount,
@@ -282,19 +282,19 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info)
}
static void
-ncp_delete_inode(struct inode *inode)
+ncp_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
if (S_ISDIR(inode->i_mode)) {
- DDPRINTK("ncp_delete_inode: put directory %ld\n", inode->i_ino);
+ DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
}
if (ncp_make_closed(inode) != 0) {
/* We can't do anything but complain. */
- printk(KERN_ERR "ncp_delete_inode: could not close\n");
+ printk(KERN_ERR "ncp_evict_inode: could not close\n");
}
- clear_inode(inode);
}
static void ncp_stop_tasks(struct ncp_server *server) {
@@ -728,8 +728,8 @@ out_fput:
out_bdi:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
- * The previously used put_filp(ncp_filp); was bogous, since
- * it doesn't proper unlocking.
+ * The previously used put_filp(ncp_filp); was bogus, since
+ * it doesn't perform proper unlocking.
*/
fput(ncp_filp);
out:
@@ -924,9 +924,8 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
tmpattr.ia_valid = ATTR_MODE;
tmpattr.ia_mode = attr->ia_mode;
- result = inode_setattr(inode, &tmpattr);
- if (result)
- goto out;
+ setattr_copy(inode, &tmpattr);
+ mark_inode_dirty(inode);
}
}
#endif
@@ -954,15 +953,12 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
result = ncp_make_closed(inode);
if (result)
goto out;
- {
- struct iattr tmpattr;
-
- tmpattr.ia_valid = ATTR_SIZE;
- tmpattr.ia_size = attr->ia_size;
-
- result = inode_setattr(inode, &tmpattr);
+
+ if (attr->ia_size != i_size_read(inode)) {
+ result = vmtruncate(inode, attr->ia_size);
if (result)
goto out;
+ mark_inode_dirty(inode);
}
}
if ((attr->ia_valid & ATTR_CTIME) != 0) {
@@ -1002,8 +998,12 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
NCP_FINFO(inode)->nwattr = info.attributes;
#endif
}
- if (!result)
- result = inode_setattr(inode, attr);
+ if (result)
+ goto out;
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
out:
unlock_kernel();
return result;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 023c03d0207..84a8cfc4e38 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -20,7 +20,6 @@
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
-#include <linux/smp_lock.h>
#include <linux/ncp_fs.h>
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index a43d07e7b92..26a510a7be0 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -61,8 +61,8 @@ config NFS_V3_ACL
If unsure, say N.
config NFS_V4
- bool "NFS client support for NFS version 4 (EXPERIMENTAL)"
- depends on NFS_FS && EXPERIMENTAL
+ bool "NFS client support for NFS version 4"
+ depends on NFS_FS
select RPCSEC_GSS_KRB5
help
This option enables support for version 4 of the NFS protocol
@@ -72,16 +72,16 @@ config NFS_V4
space programs which can be found in the Linux nfs-utils package,
available from http://linux-nfs.org/.
- If unsure, say N.
+ If unsure, say Y.
config NFS_V4_1
- bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
+ bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
depends on NFS_V4 && EXPERIMENTAL
help
This option enables support for minor version 1 of the NFSv4 protocol
(draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
- Unless you're an NFS developer, say N.
+ If unsure, say N.
config ROOT_NFS
bool "Root file system on NFS"
@@ -100,3 +100,20 @@ config NFS_FSCACHE
help
Say Y here if you want NFS data to be cached locally on disc through
the general filesystem cache manager
+
+config NFS_USE_LEGACY_DNS
+ bool "Use the legacy NFS DNS resolver"
+ depends on NFS_V4
+ help
+ The kernel now provides a method for translating a host name into an
+ IP address. Select Y here if you would rather use your own DNS
+ resolver script.
+
+ If unsure, say N
+
+config NFS_USE_KERNEL_DNS
+ bool
+ depends on NFS_V4 && !NFS_USE_LEGACY_DNS
+ select DNS_RESOLVER
+ select KEYS
+ default y
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 36dfdae9512..e17b49e2eab 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -45,7 +45,7 @@ unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
#define NFS_CALLBACK_MAXPORTNR (65535U)
-static int param_set_portnr(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
unsigned long num;
int ret;
@@ -58,11 +58,10 @@ static int param_set_portnr(const char *val, struct kernel_param *kp)
*((unsigned int *)kp->arg) = num;
return 0;
}
-
-static int param_get_portnr(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
+};
#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index a08770a7e85..930d10fecda 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -37,8 +37,8 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
if (inode == NULL)
goto out_putclient;
nfsi = NFS_I(inode);
- down_read(&nfsi->rwsem);
- delegation = nfsi->delegation;
+ rcu_read_lock();
+ delegation = rcu_dereference(nfsi->delegation);
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
goto out_iput;
res->size = i_size_read(inode);
@@ -53,7 +53,7 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
args->bitmap[1];
res->status = 0;
out_iput:
- up_read(&nfsi->rwsem);
+ rcu_read_unlock();
iput(inode);
out_putclient:
nfs_put_client(clp);
@@ -62,16 +62,6 @@ out:
return res->status;
}
-static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
-{
-#if defined(CONFIG_NFS_V4_1)
- if (clp->cl_minorversion > 0)
- return nfs41_validate_delegation_stateid;
-#endif
- return nfs4_validate_delegation_stateid;
-}
-
-
__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
{
struct nfs_client *clp;
@@ -92,8 +82,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
inode = nfs_delegation_find_inode(clp, &args->fh);
if (inode != NULL) {
/* Set up a helper thread to actually return the delegation */
- switch (nfs_async_inode_return_delegation(inode, &args->stateid,
- nfs_validate_delegation_stateid(clp))) {
+ switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
case 0:
res = 0;
break;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7ec9b34a59f..4e7df2adb21 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -150,6 +150,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_minorversion = cl_init->minorversion;
+ clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
@@ -178,7 +179,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
clp->cl_session = NULL;
}
- clp->cl_call_sync = _nfs4_call_sync;
+ clp->cl_mvops = nfs_v4_minor_ops[0];
#endif /* CONFIG_NFS_V4_1 */
}
@@ -188,7 +189,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
static void nfs4_destroy_callback(struct nfs_client *clp)
{
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down(clp->cl_minorversion);
+ nfs_callback_down(clp->cl_mvops->minor_version);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -1126,7 +1127,7 @@ static int nfs4_init_callback(struct nfs_client *clp)
return error;
}
- error = nfs_callback_up(clp->cl_minorversion,
+ error = nfs_callback_up(clp->cl_mvops->minor_version,
clp->cl_rpcclient->cl_xprt);
if (error < 0) {
dprintk("%s: failed to start callback. Error = %d\n",
@@ -1143,10 +1144,8 @@ static int nfs4_init_callback(struct nfs_client *clp)
*/
static int nfs4_init_client_minor_version(struct nfs_client *clp)
{
- clp->cl_call_sync = _nfs4_call_sync;
-
#if defined(CONFIG_NFS_V4_1)
- if (clp->cl_minorversion) {
+ if (clp->cl_mvops->minor_version) {
struct nfs4_session *session = NULL;
/*
* Create the session and mark it expired.
@@ -1158,7 +1157,13 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
return -ENOMEM;
clp->cl_session = session;
- clp->cl_call_sync = _nfs4_call_sync_session;
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ clp->cl_cons_state = NFS_CS_SESSION_INITING;
}
#endif /* CONFIG_NFS_V4_1 */
@@ -1286,6 +1291,55 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
#endif /* CONFIG_NFS_V4_1 */
}
+static int nfs4_server_common_setup(struct nfs_server *server,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_fattr *fattr;
+ int error;
+
+ BUG_ON(!server->nfs_client);
+ BUG_ON(!server->nfs_client->rpc_ops);
+ BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return -ENOMEM;
+
+ /* We must ensure the session is initialised first */
+ error = nfs4_init_session(server);
+ if (error < 0)
+ goto out;
+
+ /* Probe the root fh to retrieve its FSID and filehandle */
+ error = nfs4_get_rootfh(server, mntfh);
+ if (error < 0)
+ goto out;
+
+ dprintk("Server FSID: %llx:%llx\n",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+ dprintk("Mount FH: %d\n", mntfh->size);
+
+ nfs4_session_set_rwsize(server);
+
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
+ if (error < 0)
+ goto out;
+
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
+ spin_lock(&nfs_client_lock);
+ list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
+ list_add_tail(&server->master_link, &nfs_volume_list);
+ spin_unlock(&nfs_client_lock);
+
+ server->mount_time = jiffies;
+out:
+ nfs_free_fattr(fattr);
+ return error;
+}
+
/*
* Create a version 4 volume record
*/
@@ -1346,7 +1400,6 @@ error:
struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
struct nfs_fh *mntfh)
{
- struct nfs_fattr *fattr;
struct nfs_server *server;
int error;
@@ -1356,55 +1409,19 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
if (!server)
return ERR_PTR(-ENOMEM);
- error = -ENOMEM;
- fattr = nfs_alloc_fattr();
- if (fattr == NULL)
- goto error;
-
/* set up the general RPC client */
error = nfs4_init_server(server, data);
if (error < 0)
goto error;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
- error = nfs4_init_session(server);
- if (error < 0)
- goto error;
-
- /* Probe the root fh to retrieve its FSID */
- error = nfs4_get_rootfh(server, mntfh);
- if (error < 0)
- goto error;
-
- dprintk("Server FSID: %llx:%llx\n",
- (unsigned long long) server->fsid.major,
- (unsigned long long) server->fsid.minor);
- dprintk("Mount FH: %d\n", mntfh->size);
-
- nfs4_session_set_rwsize(server);
-
- error = nfs_probe_fsinfo(server, mntfh, fattr);
+ error = nfs4_server_common_setup(server, mntfh);
if (error < 0)
goto error;
- if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
- server->namelen = NFS4_MAXNAMLEN;
-
- spin_lock(&nfs_client_lock);
- list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
- list_add_tail(&server->master_link, &nfs_volume_list);
- spin_unlock(&nfs_client_lock);
-
- server->mount_time = jiffies;
dprintk("<-- nfs4_create_server() = %p\n", server);
- nfs_free_fattr(fattr);
return server;
error:
- nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_server() = error %d\n", error);
return ERR_PTR(error);
@@ -1418,7 +1435,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
{
struct nfs_client *parent_client;
struct nfs_server *server, *parent_server;
- struct nfs_fattr *fattr;
int error;
dprintk("--> nfs4_create_referral_server()\n");
@@ -1427,11 +1443,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (!server)
return ERR_PTR(-ENOMEM);
- error = -ENOMEM;
- fattr = nfs_alloc_fattr();
- if (fattr == NULL)
- goto error;
-
parent_server = NFS_SB(data->sb);
parent_client = parent_server->nfs_client;
@@ -1448,7 +1459,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
data->authflavor,
parent_server->client->cl_xprt->prot,
parent_server->client->cl_timeout,
- parent_client->cl_minorversion);
+ parent_client->cl_mvops->minor_version);
if (error < 0)
goto error;
@@ -1456,40 +1467,14 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
if (error < 0)
goto error;
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
- /* Probe the root fh to retrieve its FSID and filehandle */
- error = nfs4_get_rootfh(server, mntfh);
- if (error < 0)
- goto error;
-
- /* probe the filesystem info for this server filesystem */
- error = nfs_probe_fsinfo(server, mntfh, fattr);
+ error = nfs4_server_common_setup(server, mntfh);
if (error < 0)
goto error;
- if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
- server->namelen = NFS4_MAXNAMLEN;
-
- dprintk("Referral FSID: %llx:%llx\n",
- (unsigned long long) server->fsid.major,
- (unsigned long long) server->fsid.minor);
-
- spin_lock(&nfs_client_lock);
- list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
- list_add_tail(&server->master_link, &nfs_volume_list);
- spin_unlock(&nfs_client_lock);
-
- server->mount_time = jiffies;
-
- nfs_free_fattr(fattr);
dprintk("<-- nfs_create_referral_server() = %p\n", server);
return server;
error:
- nfs_free_fattr(fattr);
nfs_free_server(server);
dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
return ERR_PTR(error);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 30163454397..b9c3c43cea1 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -268,14 +268,6 @@ out:
return status;
}
-/* Sync all data to disk upon delegation return */
-static void nfs_msync_inode(struct inode *inode)
-{
- filemap_fdatawrite(inode->i_mapping);
- nfs_wb_all(inode);
- filemap_fdatawait(inode->i_mapping);
-}
-
/*
* Basic procedure for returning a delegation to the server
*/
@@ -367,7 +359,7 @@ int nfs_inode_return_delegation(struct inode *inode)
delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
spin_unlock(&clp->cl_lock);
if (delegation != NULL) {
- nfs_msync_inode(inode);
+ nfs_wb_all(inode);
err = __nfs_inode_return_delegation(inode, delegation, 1);
}
}
@@ -471,9 +463,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
/*
* Asynchronous delegation recall!
*/
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
- int (*validate_stateid)(struct nfs_delegation *delegation,
- const nfs4_stateid *stateid))
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
{
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_delegation *delegation;
@@ -481,7 +471,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
- if (!validate_stateid(delegation, stateid)) {
+ if (!clp->cl_mvops->validate_stateid(delegation, stateid)) {
rcu_read_unlock();
return -ENOENT;
}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 69e7b814012..2026304bda1 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,9 +34,7 @@ enum {
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
int nfs_inode_return_delegation(struct inode *inode);
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid,
- int (*validate_stateid)(struct nfs_delegation *delegation,
- const nfs4_stateid *stateid));
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 782b431ef91..29539ceeb74 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1652,16 +1652,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- /*
- * ... prune child dentries and writebacks if needed.
- */
- if (atomic_read(&old_dentry->d_count) > 1) {
- if (S_ISREG(old_inode->i_mode))
- nfs_wb_all(old_inode);
- shrink_dcache_parent(old_dentry);
- }
nfs_inode_return_delegation(old_inode);
-
if (new_inode != NULL)
nfs_inode_return_delegation(new_inode);
@@ -1710,7 +1701,7 @@ static void nfs_access_free_list(struct list_head *head)
}
}
-int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
+int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(head);
struct nfs_inode *nfsi;
@@ -1953,7 +1944,7 @@ int nfs_permission(struct inode *inode, int mask)
if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
goto out;
/* Is this sys_access() ? */
- if (mask & MAY_ACCESS)
+ if (mask & (MAY_ACCESS | MAY_CHDIR))
goto force_lookup;
switch (inode->i_mode & S_IFMT) {
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index ad4cd31d605..064a8096167 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -69,6 +69,7 @@ struct nfs_direct_req {
/* I/O parameters */
struct nfs_open_context *ctx; /* file open context info */
+ struct nfs_lock_context *l_ctx; /* Lock context info */
struct kiocb * iocb; /* controlling i/o request */
struct inode * inode; /* target file of i/o */
@@ -160,6 +161,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->rewrite_list);
dreq->iocb = NULL;
dreq->ctx = NULL;
+ dreq->l_ctx = NULL;
spin_lock_init(&dreq->lock);
atomic_set(&dreq->io_count, 0);
dreq->count = 0;
@@ -173,6 +175,8 @@ static void nfs_direct_req_free(struct kref *kref)
{
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
+ if (dreq->l_ctx != NULL)
+ nfs_put_lock_context(dreq->l_ctx);
if (dreq->ctx != NULL)
put_nfs_open_context(dreq->ctx);
kmem_cache_free(nfs_direct_cachep, dreq);
@@ -336,6 +340,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
+ data->args.lock_context = dreq->l_ctx;
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -416,24 +421,28 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
- ssize_t result = 0;
+ ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
dreq = nfs_direct_req_alloc();
- if (!dreq)
- return -ENOMEM;
+ if (dreq == NULL)
+ goto out;
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
+ dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (dreq->l_ctx == NULL)
+ goto out_release;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
+out_release:
nfs_direct_req_release(dreq);
-
+out:
return result;
}
@@ -574,6 +583,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->args.offset = 0;
data->args.count = 0;
data->args.context = dreq->ctx;
+ data->args.lock_context = dreq->l_ctx;
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
@@ -761,6 +771,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
+ data->args.lock_context = dreq->l_ctx;
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
@@ -845,7 +856,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos,
size_t count)
{
- ssize_t result = 0;
+ ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
size_t wsize = NFS_SERVER(inode)->wsize;
@@ -853,7 +864,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
dreq = nfs_direct_req_alloc();
if (!dreq)
- return -ENOMEM;
+ goto out;
nfs_alloc_commit_data(dreq);
if (dreq->commit_data == NULL || count < wsize)
@@ -861,14 +872,18 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
+ dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (dreq->l_ctx != NULL)
+ goto out_release;
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
if (!result)
result = nfs_direct_wait(dreq);
+out_release:
nfs_direct_req_release(dreq);
-
+out:
return result;
}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 76fd235d002..dba50a5625d 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -6,6 +6,29 @@
* Resolves DNS hostnames into valid ip addresses
*/
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/dns_resolver.h>
+
+ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen)
+{
+ ssize_t ret;
+ char *ip_addr = NULL;
+ int ip_len;
+
+ ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
+ if (ip_len > 0)
+ ret = rpc_pton(ip_addr, ip_len, sa, salen);
+ else
+ ret = -ESRCH;
+ kfree(ip_addr);
+ return ret;
+}
+
+#else
+
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
@@ -346,3 +369,4 @@ void nfs_dns_resolver_destroy(void)
nfs_cache_unregister(&nfs_dns_resolve);
}
+#endif
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
index a3f0938babf..199bb5543a9 100644
--- a/fs/nfs/dns_resolve.h
+++ b/fs/nfs/dns_resolve.h
@@ -6,8 +6,20 @@
#define NFS_DNS_HOSTNAME_MAXLEN (128)
+
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+static inline int nfs_dns_resolver_init(void)
+{
+ return 0;
+}
+
+static inline void nfs_dns_resolver_destroy(void)
+{}
+#else
extern int nfs_dns_resolver_init(void);
extern void nfs_dns_resolver_destroy(void);
+#endif
+
extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
struct sockaddr *sa, size_t salen);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 36a5e74f51b..2d141a74ae8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -27,6 +27,7 @@
#include <linux/pagemap.h>
#include <linux/aio.h>
#include <linux/gfp.h>
+#include <linux/swap.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -202,37 +203,11 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
}
/*
- * Helper for nfs_file_flush() and nfs_file_fsync()
- *
- * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
- * disk, but it retrieves and clears ctx->error after synching, despite
- * the two being set at the same time in nfs_context_set_write_error().
- * This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occured, and hence cause it to
- * fall back to doing a synchronous write.
- */
-static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode)
-{
- int have_error, status;
- int ret = 0;
-
- have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
- status = nfs_wb_all(inode);
- have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
- if (have_error)
- ret = xchg(&ctx->error, 0);
- if (!ret)
- ret = status;
- return ret;
-}
-
-/*
* Flush all dirty pages, and check for write errors.
*/
static int
nfs_file_flush(struct file *file, fl_owner_t id)
{
- struct nfs_open_context *ctx = nfs_file_open_context(file);
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
@@ -245,7 +220,7 @@ nfs_file_flush(struct file *file, fl_owner_t id)
return 0;
/* Flush writes to the server and return any errors */
- return nfs_do_fsync(ctx, inode);
+ return vfs_fsync(file, 0);
}
static ssize_t
@@ -320,6 +295,13 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
* Flush any dirty pages for this process, and check for write errors.
* The return status from this call provides a reliable indication of
* whether any write errors occurred for this process.
+ *
+ * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
+ * disk, but it retrieves and clears ctx->error after synching, despite
+ * the two being set at the same time in nfs_context_set_write_error().
+ * This is because the former is used to notify the _next_ call to
+ * nfs_file_write() that a write error occured, and hence cause it to
+ * fall back to doing a synchronous write.
*/
static int
nfs_file_fsync(struct file *file, int datasync)
@@ -327,13 +309,23 @@ nfs_file_fsync(struct file *file, int datasync)
struct dentry *dentry = file->f_path.dentry;
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct inode *inode = dentry->d_inode;
+ int have_error, status;
+ int ret = 0;
+
dprintk("NFS: fsync file(%s/%s) datasync %d\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
datasync);
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
- return nfs_do_fsync(ctx, inode);
+ have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ status = nfs_commit_inode(inode, FLUSH_SYNC);
+ have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ if (have_error)
+ ret = xchg(&ctx->error, 0);
+ if (!ret)
+ ret = status;
+ return ret;
}
/*
@@ -493,11 +485,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
*/
static int nfs_release_page(struct page *page, gfp_t gfp)
{
+ struct address_space *mapping = page->mapping;
+
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
/* Only do I/O if gfp is a superset of GFP_KERNEL */
- if ((gfp & GFP_KERNEL) == GFP_KERNEL)
- nfs_wb_page(page->mapping->host, page);
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+ if (current_is_kswapd())
+ how = 0;
+ nfs_commit_inode(mapping->host, how);
+ }
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
@@ -639,7 +639,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
/* Return error values for O_DSYNC and IS_SYNC() */
if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
- int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
+ int err = vfs_fsync(iocb->ki_filp, 0);
if (err < 0)
result = err;
}
@@ -675,7 +675,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
written = ret;
if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
- int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
+ int err = vfs_fsync(filp, 0);
if (err < 0)
ret = err;
}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 7428f7d6273..a70e446e160 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -146,7 +146,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
goto out;
}
- if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_MODE)
+ if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
|| !S_ISDIR(fsinfo.fattr->mode)) {
printk(KERN_ERR "nfs4_get_rootfh:"
" getroot encountered non-directory\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 099b3518fee..7d2d6c72aa7 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -98,7 +98,7 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino;
}
-void nfs_clear_inode(struct inode *inode)
+static void nfs_clear_inode(struct inode *inode)
{
/*
* The following should never happen...
@@ -110,6 +110,13 @@ void nfs_clear_inode(struct inode *inode)
nfs_fscache_release_inode_cookie(inode);
}
+void nfs_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+ nfs_clear_inode(inode);
+}
+
/**
* nfs_sync_mapping - helper to flush all mmapped dirty data to disk
*/
@@ -413,10 +420,8 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
/* Write all dirty data */
- if (S_ISREG(inode->i_mode)) {
- filemap_write_and_wait(inode->i_mapping);
+ if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
- }
fattr = nfs_alloc_fattr();
if (fattr == NULL)
@@ -530,6 +535,68 @@ out:
return err;
}
+static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
+{
+ atomic_set(&l_ctx->count, 1);
+ l_ctx->lockowner = current->files;
+ l_ctx->pid = current->tgid;
+ INIT_LIST_HEAD(&l_ctx->list);
+}
+
+static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
+{
+ struct nfs_lock_context *pos;
+
+ list_for_each_entry(pos, &ctx->lock_context.list, list) {
+ if (pos->lockowner != current->files)
+ continue;
+ if (pos->pid != current->tgid)
+ continue;
+ atomic_inc(&pos->count);
+ return pos;
+ }
+ return NULL;
+}
+
+struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
+{
+ struct nfs_lock_context *res, *new = NULL;
+ struct inode *inode = ctx->path.dentry->d_inode;
+
+ spin_lock(&inode->i_lock);
+ res = __nfs_find_lock_context(ctx);
+ if (res == NULL) {
+ spin_unlock(&inode->i_lock);
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+ nfs_init_lock_context(new);
+ spin_lock(&inode->i_lock);
+ res = __nfs_find_lock_context(ctx);
+ if (res == NULL) {
+ list_add_tail(&new->list, &ctx->lock_context.list);
+ new->open_context = ctx;
+ res = new;
+ new = NULL;
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ kfree(new);
+ return res;
+}
+
+void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
+{
+ struct nfs_open_context *ctx = l_ctx->open_context;
+ struct inode *inode = ctx->path.dentry->d_inode;
+
+ if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
+ return;
+ list_del(&l_ctx->list);
+ spin_unlock(&inode->i_lock);
+ kfree(l_ctx);
+}
+
/**
* nfs_close_context - Common close_context() routine NFSv2/v3
* @ctx: pointer to context
@@ -566,11 +633,11 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
path_get(&ctx->path);
ctx->cred = get_rpccred(cred);
ctx->state = NULL;
- ctx->lockowner = current->files;
ctx->flags = 0;
ctx->error = 0;
ctx->dir_cookie = 0;
- atomic_set(&ctx->count, 1);
+ nfs_init_lock_context(&ctx->lock_context);
+ ctx->lock_context.open_context = ctx;
}
return ctx;
}
@@ -578,7 +645,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
if (ctx != NULL)
- atomic_inc(&ctx->count);
+ atomic_inc(&ctx->lock_context.count);
return ctx;
}
@@ -586,7 +653,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
{
struct inode *inode = ctx->path.dentry->d_inode;
- if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
+ if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
return;
list_del(&ctx->list);
spin_unlock(&inode->i_lock);
@@ -1338,8 +1405,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
* to open() calls that passed nfs_atomic_lookup, but failed to call
* nfs_open().
*/
-void nfs4_clear_inode(struct inode *inode)
+void nfs4_evict_inode(struct inode *inode)
{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index d8bd619e386..c961bc92c10 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -205,16 +205,17 @@ extern struct rpc_procinfo nfs4_procedures[];
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
/* dir.c */
-extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
+extern int nfs_access_cache_shrinker(struct shrinker *shrink,
+ int nr_to_scan, gfp_t gfp_mask);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
-extern void nfs_clear_inode(struct inode *);
+extern void nfs_evict_inode(struct inode *);
#ifdef CONFIG_NFS_V4
-extern void nfs4_clear_inode(struct inode *);
+extern void nfs4_evict_inode(struct inode *);
#endif
void nfs_zap_acl_cache(struct inode *inode);
extern int nfs_wait_bit_killable(void *word);
@@ -369,10 +370,9 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
* Helper for restarting RPC calls in the possible presence of NFSv4.1
* sessions.
*/
-static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
+static inline int nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
{
if (nfs4_has_session(clp))
- rpc_restart_call_prepare(task);
- else
- rpc_restart_call(task);
+ return rpc_restart_call_prepare(task);
+ return rpc_restart_call(task);
}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 81cf1425791..db8846a0e82 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -233,7 +233,7 @@ nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs
static int
nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
u32 offset = (u32)args->offset;
u32 count = args->count;
@@ -393,8 +393,7 @@ nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *arg
static int
nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
{
- struct rpc_task *task = req->rq_task;
- struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
u32 count = args->count;
@@ -575,7 +574,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
static int
nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
p = xdr_encode_fhandle(p, args->fh);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 75dcfc7da36..9769704f8ce 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -330,7 +330,7 @@ nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *arg
static int
nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
u32 count = args->count;
@@ -471,7 +471,7 @@ nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
static int
nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
u32 count = args->count;
@@ -675,7 +675,7 @@ static int
nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
struct nfs3_getaclargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
p = xdr_encode_fhandle(p, args->fh);
@@ -802,7 +802,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
static int
nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
unsigned int replen;
p = xdr_encode_fhandle(p, args->fh);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c538c6106e1..311e15cc8af 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -45,10 +45,29 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
NFS4CLNT_SESSION_RESET,
- NFS4CLNT_SESSION_DRAINING,
NFS4CLNT_RECALL_SLOT,
};
+enum nfs4_session_state {
+ NFS4_SESSION_INITING,
+ NFS4_SESSION_DRAINING,
+};
+
+struct nfs4_minor_version_ops {
+ u32 minor_version;
+
+ int (*call_sync)(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+ int (*validate_stateid)(struct nfs_delegation *,
+ const nfs4_stateid *);
+ const struct nfs4_state_recovery_ops *reboot_recovery_ops;
+ const struct nfs4_state_recovery_ops *nograce_recovery_ops;
+ const struct nfs4_state_maintenance_ops *state_renewal_ops;
+};
+
/*
* struct rpc_sequence ensures that RPC calls are sent in the exact
* order that they appear on the list.
@@ -89,7 +108,6 @@ struct nfs_unique_id {
*/
struct nfs4_state_owner {
struct nfs_unique_id so_owner_id;
- struct nfs_client *so_client;
struct nfs_server *so_server;
struct rb_node so_client_node;
@@ -99,7 +117,6 @@ struct nfs4_state_owner {
atomic_t so_count;
unsigned long so_flags;
struct list_head so_states;
- struct list_head so_delegations;
struct nfs_seqid_counter so_seqid;
struct rpc_sequence so_sequence;
};
@@ -125,10 +142,20 @@ enum {
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
*/
+struct nfs4_lock_owner {
+ unsigned int lo_type;
+#define NFS4_ANY_LOCK_TYPE (0U)
+#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
+#define NFS4_POSIX_LOCK_TYPE (1U << 1)
+ union {
+ fl_owner_t posix_owner;
+ pid_t flock_owner;
+ } lo_u;
+};
+
struct nfs4_lock_state {
struct list_head ls_locks; /* Other lock stateids */
struct nfs4_state * ls_state; /* Pointer to open state */
- fl_owner_t ls_owner; /* POSIX lock owner */
#define NFS_LOCK_INITIALIZED 1
int ls_flags;
struct nfs_seqid_counter ls_seqid;
@@ -136,6 +163,7 @@ struct nfs4_lock_state {
struct nfs_unique_id ls_id;
nfs4_stateid ls_stateid;
atomic_t ls_count;
+ struct nfs4_lock_owner ls_owner;
};
/* bits for nfs4_state->flags */
@@ -219,11 +247,15 @@ extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nam
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
+extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
-extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
-extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
#if defined(CONFIG_NFS_V4_1)
-extern int nfs4_setup_sequence(struct nfs_client *clp,
+static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
+{
+ return server->nfs_client->cl_session;
+}
+
+extern int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
int cache_reply, struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
@@ -234,7 +266,12 @@ extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
#else /* CONFIG_NFS_v4_1 */
-static inline int nfs4_setup_sequence(struct nfs_client *clp,
+static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
+{
+ return NULL;
+}
+
+static inline int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
int cache_reply, struct rpc_task *task)
{
@@ -247,7 +284,7 @@ static inline int nfs4_init_session(struct nfs_server *server)
}
#endif /* CONFIG_NFS_V4_1 */
-extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
+extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
@@ -284,7 +321,7 @@ extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
+extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 70015dd60a9..7ffbb98ddec 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -303,15 +303,19 @@ do_state_recovery:
}
-static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
+static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
{
- struct nfs_client *clp = server->nfs_client;
spin_lock(&clp->cl_lock);
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
}
+static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
+{
+ do_renew_lease(server->nfs_client, timestamp);
+}
+
#if defined(CONFIG_NFS_V4_1)
/*
@@ -356,7 +360,7 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
{
struct rpc_task *task;
- if (!test_bit(NFS4CLNT_SESSION_DRAINING, &ses->clp->cl_state)) {
+ if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
if (task)
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
@@ -370,12 +374,11 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
complete(&ses->complete);
}
-static void nfs41_sequence_free_slot(const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
+static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
- tbl = &clp->cl_session->fc_slot_table;
+ tbl = &res->sr_session->fc_slot_table;
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
@@ -385,18 +388,17 @@ static void nfs41_sequence_free_slot(const struct nfs_client *clp,
spin_lock(&tbl->slot_tbl_lock);
nfs4_free_slot(tbl, res->sr_slotid);
- nfs41_check_drain_session_complete(clp->cl_session);
+ nfs41_check_drain_session_complete(res->sr_session);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
}
-static void nfs41_sequence_done(struct nfs_client *clp,
- struct nfs4_sequence_res *res,
- int rpc_status)
+static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
unsigned long timestamp;
struct nfs4_slot_table *tbl;
struct nfs4_slot *slot;
+ struct nfs_client *clp;
/*
* sr_status remains 1 if an RPC level error occurred. The server
@@ -411,25 +413,51 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
goto out;
+ tbl = &res->sr_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
+
/* Check the SEQUENCE operation status */
- if (res->sr_status == 0) {
- tbl = &clp->cl_session->fc_slot_table;
- slot = tbl->slots + res->sr_slotid;
+ switch (res->sr_status) {
+ case 0:
/* Update the slot's sequence and clientid lease timer */
++slot->seq_nr;
timestamp = res->sr_renewal_time;
- spin_lock(&clp->cl_lock);
- if (time_before(clp->cl_last_renewal, timestamp))
- clp->cl_last_renewal = timestamp;
- spin_unlock(&clp->cl_lock);
+ clp = res->sr_session->clp;
+ do_renew_lease(clp, timestamp);
/* Check sequence flags */
if (atomic_read(&clp->cl_count) > 1)
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+ break;
+ case -NFS4ERR_DELAY:
+ /* The server detected a resend of the RPC call and
+ * returned NFS4ERR_DELAY as per Section 2.10.6.2
+ * of RFC5661.
+ */
+ dprintk("%s: slot=%d seq=%d: Operation in progress\n",
+ __func__, res->sr_slotid, slot->seq_nr);
+ goto out_retry;
+ default:
+ /* Just update the slot sequence no. */
+ ++slot->seq_nr;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
- nfs41_sequence_free_slot(clp, res);
+ nfs41_sequence_free_slot(res);
+ return 1;
+out_retry:
+ if (!rpc_restart_call(task))
+ goto out;
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return 0;
+}
+
+static int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+{
+ if (res->sr_session == NULL)
+ return 1;
+ return nfs41_sequence_done(task, res);
}
/*
@@ -480,12 +508,11 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
return 0;
- memset(res, 0, sizeof(*res));
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state) &&
+ if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/*
* The state manager will wait until the slot table is empty.
@@ -525,6 +552,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
res->sr_session = session;
res->sr_slotid = slotid;
res->sr_renewal_time = jiffies;
+ res->sr_status_flags = 0;
/*
* sr_status is only set in decode_sequence, and so will remain
* set to 1 if an rpc level failure occurs.
@@ -533,33 +561,33 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
return 0;
}
-int nfs4_setup_sequence(struct nfs_client *clp,
+int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
struct rpc_task *task)
{
+ struct nfs4_session *session = nfs4_get_session(server);
int ret = 0;
+ if (session == NULL) {
+ args->sa_session = NULL;
+ res->sr_session = NULL;
+ goto out;
+ }
+
dprintk("--> %s clp %p session %p sr_slotid %d\n",
- __func__, clp, clp->cl_session, res->sr_slotid);
+ __func__, session->clp, session, res->sr_slotid);
- if (!nfs4_has_session(clp))
- goto out;
- ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
+ ret = nfs41_setup_sequence(session, args, res, cache_reply,
task);
- if (ret && ret != -EAGAIN) {
- /* terminate rpc task */
- task->tk_status = ret;
- task->tk_action = NULL;
- }
out:
dprintk("<-- %s status=%d\n", __func__, ret);
return ret;
}
struct nfs41_call_sync_data {
- struct nfs_client *clp;
+ const struct nfs_server *seq_server;
struct nfs4_sequence_args *seq_args;
struct nfs4_sequence_res *seq_res;
int cache_reply;
@@ -569,9 +597,9 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
- dprintk("--> %s data->clp->cl_session %p\n", __func__,
- data->clp->cl_session);
- if (nfs4_setup_sequence(data->clp, data->seq_args,
+ dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
+
+ if (nfs4_setup_sequence(data->seq_server, data->seq_args,
data->seq_res, data->cache_reply, task))
return;
rpc_call_start(task);
@@ -587,7 +615,7 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs41_call_sync_data *data = calldata;
- nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
+ nfs41_sequence_done(task, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
@@ -600,8 +628,7 @@ struct rpc_call_ops nfs41_call_priv_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
-static int nfs4_call_sync_sequence(struct nfs_client *clp,
- struct rpc_clnt *clnt,
+static int nfs4_call_sync_sequence(struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -611,13 +638,13 @@ static int nfs4_call_sync_sequence(struct nfs_client *clp,
int ret;
struct rpc_task *task;
struct nfs41_call_sync_data data = {
- .clp = clp,
+ .seq_server = server,
.seq_args = args,
.seq_res = res,
.cache_reply = cache_reply,
};
struct rpc_task_setup task_setup = {
- .rpc_client = clnt,
+ .rpc_client = server->client,
.rpc_message = msg,
.callback_ops = &nfs41_call_sync_ops,
.callback_data = &data
@@ -642,10 +669,15 @@ int _nfs4_call_sync_session(struct nfs_server *server,
struct nfs4_sequence_res *res,
int cache_reply)
{
- return nfs4_call_sync_sequence(server->nfs_client, server->client,
- msg, args, res, cache_reply, 0);
+ return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
}
+#else
+static int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+{
+ return 1;
+}
#endif /* CONFIG_NFS_V4_1 */
int _nfs4_call_sync(struct nfs_server *server,
@@ -659,18 +691,9 @@ int _nfs4_call_sync(struct nfs_server *server,
}
#define nfs4_call_sync(server, msg, args, res, cache_reply) \
- (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
+ (server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
&(res)->seq_res, (cache_reply))
-static void nfs4_sequence_done(const struct nfs_server *server,
- struct nfs4_sequence_res *res, int rpc_status)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(server->nfs_client))
- nfs41_sequence_done(server->nfs_client, res, rpc_status);
-#endif /* CONFIG_NFS_V4_1 */
-}
-
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -745,19 +768,14 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
- if (flags & O_EXCL) {
- if (nfs4_has_persistent_session(server->nfs_client)) {
- /* GUARDED */
- p->o_arg.u.attrs = &p->attrs;
- memcpy(&p->attrs, attrs, sizeof(p->attrs));
- } else { /* EXCLUSIVE4_1 */
- u32 *s = (u32 *) p->o_arg.u.verifier.data;
- s[0] = jiffies;
- s[1] = current->pid;
- }
- } else if (flags & O_CREAT) {
+ if (flags & O_CREAT) {
+ u32 *s;
+
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
+ s = (u32 *) p->o_arg.u.verifier.data;
+ s[0] = jiffies;
+ s[1] = current->pid;
}
p->c_arg.fh = &p->o_res.fh;
p->c_arg.stateid = &p->o_res.stateid;
@@ -1255,8 +1273,6 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
- if (RPC_ASSASSINATED(task))
- return;
if (data->rpc_status == 0) {
memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
sizeof(data->o_res.stateid.data));
@@ -1356,13 +1372,13 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
}
/* Update sequence id. */
data->o_arg.id = sp->so_owner_id.id;
- data->o_arg.clientid = sp->so_client->cl_clientid;
+ data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
- if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
+ if (nfs4_setup_sequence(data->o_arg.server,
&data->o_arg.seq_args,
&data->o_res.seq_res, 1, task))
return;
@@ -1385,11 +1401,9 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
- nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
- task->tk_status);
-
- if (RPC_ASSASSINATED(task))
+ if (!nfs4_sequence_done(task, &data->o_res.seq_res))
return;
+
if (task->tk_status == 0) {
switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
@@ -1773,7 +1787,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
/* Use that stateid */
} else if (state != NULL) {
- nfs4_copy_stateid(&arg.stateid, state, current->files);
+ nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
@@ -1838,8 +1852,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
- nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
- if (RPC_ASSASSINATED(task))
+ if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
@@ -1903,7 +1916,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
nfs_fattr_init(calldata->res.fattr);
calldata->timestamp = jiffies;
- if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
+ if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
&calldata->arg.seq_args, &calldata->res.seq_res,
1, task))
return;
@@ -2648,7 +2661,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
- nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
+ if (!nfs4_sequence_done(task, &res->seq_res))
+ return 0;
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
@@ -3093,7 +3107,8 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
dprintk("--> %s\n", __func__);
- nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, server->nfs_client);
@@ -3116,8 +3131,8 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
- task->tk_status);
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
@@ -3145,8 +3160,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
- task->tk_status);
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
@@ -3196,10 +3212,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
nfs4_schedule_state_recovery(clp);
return;
}
- spin_lock(&clp->cl_lock);
- if (time_before(clp->cl_last_renewal,timestamp))
- clp->cl_last_renewal = timestamp;
- spin_unlock(&clp->cl_lock);
+ do_renew_lease(clp, timestamp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
@@ -3240,10 +3253,7 @@ int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
if (status < 0)
return status;
- spin_lock(&clp->cl_lock);
- if (time_before(clp->cl_last_renewal,now))
- clp->cl_last_renewal = now;
- spin_unlock(&clp->cl_lock);
+ do_renew_lease(clp, now);
return 0;
}
@@ -3464,9 +3474,11 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
}
static int
-_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
{
- if (!clp || task->tk_status >= 0)
+ struct nfs_client *clp = server->nfs_client;
+
+ if (task->tk_status >= 0)
return 0;
switch(task->tk_status) {
case -NFS4ERR_ADMIN_REVOKED:
@@ -3498,8 +3510,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
- if (server)
- nfs_inc_server_stats(server, NFSIOS_DELAY);
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
case -EKEYEXPIRED:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
@@ -3520,12 +3531,6 @@ do_state_recovery:
return -EAGAIN;
}
-static int
-nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
-{
- return _nfs4_async_handle_error(task, server, server->nfs_client, state);
-}
-
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
unsigned short port, struct rpc_cred *cred,
struct nfs4_setclientid_res *res)
@@ -3641,8 +3646,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
- nfs4_sequence_done(data->res.server, &data->res.seq_res,
- task->tk_status);
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
switch (task->tk_status) {
case -NFS4ERR_STALE_STATEID:
@@ -3672,7 +3677,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
d_data = (struct nfs4_delegreturndata *)data;
- if (nfs4_setup_sequence(d_data->res.server->nfs_client,
+ if (nfs4_setup_sequence(d_data->res.server,
&d_data->args.seq_args,
&d_data->res.seq_res, 1, task))
return;
@@ -3892,9 +3897,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
- nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
- task->tk_status);
- if (RPC_ASSASSINATED(task))
+ if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
switch (task->tk_status) {
case 0:
@@ -3927,7 +3930,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
return;
}
calldata->timestamp = jiffies;
- if (nfs4_setup_sequence(calldata->server->nfs_client,
+ if (nfs4_setup_sequence(calldata->server,
&calldata->arg.seq_args,
&calldata->res.seq_res, 1, task))
return;
@@ -4082,7 +4085,8 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
- if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
+ if (nfs4_setup_sequence(data->server,
+ &data->arg.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
@@ -4101,12 +4105,10 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
dprintk("%s: begin!\n", __func__);
- nfs4_sequence_done(data->server, &data->res.seq_res,
- task->tk_status);
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
data->rpc_status = task->tk_status;
- if (RPC_ASSASSINATED(task))
- goto out;
if (data->arg.new_lock_owner != 0) {
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
@@ -4424,6 +4426,34 @@ out:
return err;
}
+static void nfs4_release_lockowner_release(void *calldata)
+{
+ kfree(calldata);
+}
+
+const struct rpc_call_ops nfs4_release_lockowner_ops = {
+ .rpc_release = nfs4_release_lockowner_release,
+};
+
+void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
+{
+ struct nfs_server *server = lsp->ls_state->owner->so_server;
+ struct nfs_release_lockowner_args *args;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
+ };
+
+ if (server->nfs_client->cl_mvops->minor_version != 0)
+ return;
+ args = kmalloc(sizeof(*args), GFP_NOFS);
+ if (!args)
+ return;
+ args->lock_owner.clientid = server->nfs_client->cl_clientid;
+ args->lock_owner.id = lsp->ls_id.id;
+ msg.rpc_argp = args;
+ rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
+}
+
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
@@ -4611,7 +4641,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
(struct nfs4_get_lease_time_data *)calldata;
dprintk("--> %s\n", __func__);
- nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
+ if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
+ return;
switch (task->tk_status) {
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
@@ -4805,13 +4836,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
if (!session)
return NULL;
- /*
- * The create session reply races with the server back
- * channel probe. Mark the client NFS_CS_SESSION_INITING
- * so that the client back channel can find the
- * nfs_client struct
- */
- clp->cl_cons_state = NFS_CS_SESSION_INITING;
init_completion(&session->complete);
tbl = &session->fc_slot_table;
@@ -4824,6 +4848,8 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+ session->session_state = 1<<NFS4_SESSION_INITING;
+
session->clp = clp;
return session;
}
@@ -5040,6 +5066,10 @@ int nfs4_init_session(struct nfs_server *server)
if (!nfs4_has_session(clp))
return 0;
+ session = clp->cl_session;
+ if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
+ return 0;
+
rsize = server->rsize;
if (rsize == 0)
rsize = NFS_MAX_FILE_IO_SIZE;
@@ -5047,7 +5077,6 @@ int nfs4_init_session(struct nfs_server *server)
if (wsize == 0)
wsize = NFS_MAX_FILE_IO_SIZE;
- session = clp->cl_session;
session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
@@ -5060,69 +5089,70 @@ int nfs4_init_session(struct nfs_server *server)
/*
* Renew the cl_session lease.
*/
-static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
-{
+struct nfs4_sequence_data {
+ struct nfs_client *clp;
struct nfs4_sequence_args args;
struct nfs4_sequence_res res;
-
- struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
- .rpc_argp = &args,
- .rpc_resp = &res,
- .rpc_cred = cred,
- };
-
- args.sa_cache_this = 0;
-
- return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
- &res, args.sa_cache_this, 1);
-}
+};
static void nfs41_sequence_release(void *data)
{
- struct nfs_client *clp = (struct nfs_client *)data;
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
if (atomic_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
+ kfree(calldata);
+}
+
+static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
+{
+ switch(task->tk_status) {
+ case -NFS4ERR_DELAY:
+ case -EKEYEXPIRED:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return -EAGAIN;
+ default:
+ nfs4_schedule_state_recovery(clp);
+ }
+ return 0;
}
static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
{
- struct nfs_client *clp = (struct nfs_client *)data;
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
- nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
+ if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
+ return;
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
goto out;
- if (_nfs4_async_handle_error(task, NULL, clp, NULL)
- == -EAGAIN) {
- nfs_restart_rpc(task, clp);
+ if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
return;
}
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
out:
- kfree(task->tk_msg.rpc_argp);
- kfree(task->tk_msg.rpc_resp);
-
dprintk("<-- %s\n", __func__);
}
static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
{
- struct nfs_client *clp;
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
struct nfs4_sequence_args *args;
struct nfs4_sequence_res *res;
- clp = (struct nfs_client *)data;
args = task->tk_msg.rpc_argp;
res = task->tk_msg.rpc_resp;
- if (nfs4_setup_sequence(clp, args, res, 0, task))
+ if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
return;
rpc_call_start(task);
}
@@ -5133,32 +5163,67 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_release = nfs41_sequence_release,
};
-static int nfs41_proc_async_sequence(struct nfs_client *clp,
- struct rpc_cred *cred)
+static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
{
- struct nfs4_sequence_args *args;
- struct nfs4_sequence_res *res;
+ struct nfs4_sequence_data *calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
.rpc_cred = cred,
};
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs41_sequence_ops,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+ };
if (!atomic_inc_not_zero(&clp->cl_count))
- return -EIO;
- args = kzalloc(sizeof(*args), GFP_NOFS);
- res = kzalloc(sizeof(*res), GFP_NOFS);
- if (!args || !res) {
- kfree(args);
- kfree(res);
+ return ERR_PTR(-EIO);
+ calldata = kmalloc(sizeof(*calldata), GFP_NOFS);
+ if (calldata == NULL) {
nfs_put_client(clp);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
- res->sr_slotid = NFS4_MAX_SLOT_TABLE;
- msg.rpc_argp = args;
- msg.rpc_resp = res;
+ calldata->res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ msg.rpc_argp = &calldata->args;
+ msg.rpc_resp = &calldata->res;
+ calldata->clp = clp;
+ task_setup_data.callback_data = calldata;
- return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
- &nfs41_sequence_ops, (void *)clp);
+ return rpc_run_task(&task_setup_data);
+}
+
+static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct rpc_task *task;
+ int ret = 0;
+
+ task = _nfs41_proc_sequence(clp, cred);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct rpc_task *task;
+ int ret;
+
+ task = _nfs41_proc_sequence(clp, cred);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ ret = rpc_wait_for_completion_task(task);
+ if (!ret)
+ ret = task->tk_status;
+ rpc_put_task(task);
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
}
struct nfs4_reclaim_complete_data {
@@ -5172,13 +5237,31 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
struct nfs4_reclaim_complete_data *calldata = data;
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
- if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args,
+ if (nfs41_setup_sequence(calldata->clp->cl_session,
+ &calldata->arg.seq_args,
&calldata->res.seq_res, 0, task))
return;
rpc_call_start(task);
}
+static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
+{
+ switch(task->tk_status) {
+ case 0:
+ case -NFS4ERR_COMPLETE_ALREADY:
+ case -NFS4ERR_WRONG_CRED: /* What to do here? */
+ break;
+ case -NFS4ERR_DELAY:
+ case -EKEYEXPIRED:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return -EAGAIN;
+ default:
+ nfs4_schedule_state_recovery(clp);
+ }
+ return 0;
+}
+
static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
{
struct nfs4_reclaim_complete_data *calldata = data;
@@ -5186,32 +5269,13 @@ static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
struct nfs4_sequence_res *res = &calldata->res.seq_res;
dprintk("--> %s\n", __func__);
- nfs41_sequence_done(clp, res, task->tk_status);
- switch (task->tk_status) {
- case 0:
- case -NFS4ERR_COMPLETE_ALREADY:
- break;
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_DEADSESSION:
- /*
- * Handle the session error, but do not retry the operation, as
- * we have no way of telling whether the clientid had to be
- * reset before we got our reply. If reset, a new wave of
- * reclaim operations will follow, containing their own reclaim
- * complete. We don't want our retry to get on the way of
- * recovery by incorrectly indicating to the server that we're
- * done reclaiming state since the process had to be restarted.
- */
- _nfs4_async_handle_error(task, NULL, clp, NULL);
- break;
- default:
- if (_nfs4_async_handle_error(
- task, NULL, clp, NULL) == -EAGAIN) {
- rpc_restart_call_prepare(task);
- return;
- }
- }
+ if (!nfs41_sequence_done(task, res))
+ return;
+ if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
dprintk("<-- %s\n", __func__);
}
@@ -5325,28 +5389,30 @@ struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
};
#endif
-/*
- * Per minor version reboot and network partition recovery ops
- */
-
-struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
- &nfs40_reboot_recovery_ops,
-#if defined(CONFIG_NFS_V4_1)
- &nfs41_reboot_recovery_ops,
-#endif
+static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
+ .minor_version = 0,
+ .call_sync = _nfs4_call_sync,
+ .validate_stateid = nfs4_validate_delegation_stateid,
+ .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
+ .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
+ .state_renewal_ops = &nfs40_state_renewal_ops,
};
-struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
- &nfs40_nograce_recovery_ops,
#if defined(CONFIG_NFS_V4_1)
- &nfs41_nograce_recovery_ops,
-#endif
+static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
+ .minor_version = 1,
+ .call_sync = _nfs4_call_sync_session,
+ .validate_stateid = nfs41_validate_delegation_stateid,
+ .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
+ .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
+ .state_renewal_ops = &nfs41_state_renewal_ops,
};
+#endif
-struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
- &nfs40_state_renewal_ops,
+const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ [0] = &nfs_v4_0_minor_ops,
#if defined(CONFIG_NFS_V4_1)
- &nfs41_state_renewal_ops,
+ [1] = &nfs_v4_1_minor_ops,
#endif
};
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index d87f10327b7..72b6c580af1 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -54,14 +54,14 @@
void
nfs4_renew_state(struct work_struct *work)
{
- struct nfs4_state_maintenance_ops *ops;
+ const struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease;
unsigned long last, now;
- ops = nfs4_state_renewal_ops[clp->cl_minorversion];
+ ops = clp->cl_mvops->state_renewal_ops;
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 34acf5926fd..3e2f19b04c0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -145,7 +145,9 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
struct nfs4_session *ses = clp->cl_session;
int max_slots;
- if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
+ if (ses == NULL)
+ return;
+ if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&ses->fc_slot_table.slot_tbl_lock);
max_slots = ses->fc_slot_table.max_slots;
while (max_slots--) {
@@ -167,7 +169,7 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
struct nfs4_slot_table *tbl = &ses->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
+ set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
if (tbl->highest_used_slotid != -1) {
INIT_COMPLETION(ses->complete);
spin_unlock(&tbl->slot_tbl_lock);
@@ -371,7 +373,6 @@ nfs4_alloc_state_owner(void)
return NULL;
spin_lock_init(&sp->so_lock);
INIT_LIST_HEAD(&sp->so_states);
- INIT_LIST_HEAD(&sp->so_delegations);
rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
sp->so_seqid.sequence = &sp->so_sequence;
spin_lock_init(&sp->so_sequence.lock);
@@ -384,7 +385,7 @@ static void
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
{
if (!RB_EMPTY_NODE(&sp->so_client_node)) {
- struct nfs_client *clp = sp->so_client;
+ struct nfs_client *clp = sp->so_server->nfs_client;
spin_lock(&clp->cl_lock);
rb_erase(&sp->so_client_node, &clp->cl_state_owners);
@@ -406,7 +407,6 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
new = nfs4_alloc_state_owner();
if (new == NULL)
return NULL;
- new->so_client = clp;
new->so_server = server;
new->so_cred = cred;
spin_lock(&clp->cl_lock);
@@ -423,7 +423,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
- struct nfs_client *clp = sp->so_client;
+ struct nfs_client *clp = sp->so_server->nfs_client;
struct rpc_cred *cred = sp->so_cred;
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
@@ -602,12 +602,21 @@ void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
* that is compatible with current->files
*/
static struct nfs4_lock_state *
-__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
{
struct nfs4_lock_state *pos;
list_for_each_entry(pos, &state->lock_states, ls_locks) {
- if (pos->ls_owner != fl_owner)
+ if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
continue;
+ switch (pos->ls_owner.lo_type) {
+ case NFS4_POSIX_LOCK_TYPE:
+ if (pos->ls_owner.lo_u.posix_owner != fl_owner)
+ continue;
+ break;
+ case NFS4_FLOCK_LOCK_TYPE:
+ if (pos->ls_owner.lo_u.flock_owner != fl_pid)
+ continue;
+ }
atomic_inc(&pos->ls_count);
return pos;
}
@@ -619,10 +628,10 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
* exists, return an uninitialized one.
*
*/
-static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
{
struct nfs4_lock_state *lsp;
- struct nfs_client *clp = state->owner->so_client;
+ struct nfs_client *clp = state->owner->so_server->nfs_client;
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
if (lsp == NULL)
@@ -633,7 +642,18 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
lsp->ls_seqid.sequence = &lsp->ls_sequence;
atomic_set(&lsp->ls_count, 1);
lsp->ls_state = state;
- lsp->ls_owner = fl_owner;
+ lsp->ls_owner.lo_type = type;
+ switch (lsp->ls_owner.lo_type) {
+ case NFS4_FLOCK_LOCK_TYPE:
+ lsp->ls_owner.lo_u.flock_owner = fl_pid;
+ break;
+ case NFS4_POSIX_LOCK_TYPE:
+ lsp->ls_owner.lo_u.posix_owner = fl_owner;
+ break;
+ default:
+ kfree(lsp);
+ return NULL;
+ }
spin_lock(&clp->cl_lock);
nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
spin_unlock(&clp->cl_lock);
@@ -643,7 +663,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
{
- struct nfs_client *clp = lsp->ls_state->owner->so_client;
+ struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
spin_lock(&clp->cl_lock);
nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
@@ -657,13 +677,13 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
* exists, return an uninitialized one.
*
*/
-static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
+static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
{
struct nfs4_lock_state *lsp, *new = NULL;
for(;;) {
spin_lock(&state->state_lock);
- lsp = __nfs4_find_lock_state(state, owner);
+ lsp = __nfs4_find_lock_state(state, owner, pid, type);
if (lsp != NULL)
break;
if (new != NULL) {
@@ -674,7 +694,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
break;
}
spin_unlock(&state->state_lock);
- new = nfs4_alloc_lock_state(state, owner);
+ new = nfs4_alloc_lock_state(state, owner, pid, type);
if (new == NULL)
return NULL;
}
@@ -701,6 +721,8 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
+ if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
+ nfs4_release_lockowner(lsp);
nfs4_free_lock_state(lsp);
}
@@ -728,7 +750,12 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
if (fl->fl_ops != NULL)
return 0;
- lsp = nfs4_get_lock_state(state, fl->fl_owner);
+ if (fl->fl_flags & FL_POSIX)
+ lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
+ else if (fl->fl_flags & FL_FLOCK)
+ lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
+ else
+ return -EINVAL;
if (lsp == NULL)
return -ENOMEM;
fl->fl_u.nfs4_fl.owner = lsp;
@@ -740,7 +767,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
*/
-void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
+void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
{
struct nfs4_lock_state *lsp;
int seq;
@@ -753,7 +780,7 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
return;
spin_lock(&state->state_lock);
- lsp = __nfs4_find_lock_state(state, fl_owner);
+ lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
spin_unlock(&state->state_lock);
@@ -1041,11 +1068,11 @@ restart:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
- nfs4_state_mark_reclaim_nograce(sp->so_client, state);
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
break;
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
- nfs4_state_mark_reclaim_nograce(sp->so_client, state);
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -1120,8 +1147,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
return;
- nfs4_reclaim_complete(clp,
- nfs4_reboot_recovery_ops[clp->cl_minorversion]);
+ nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
@@ -1211,8 +1237,8 @@ restart:
static int nfs4_check_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
- struct nfs4_state_maintenance_ops *ops =
- nfs4_state_renewal_ops[clp->cl_minorversion];
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
int status = -NFS4ERR_EXPIRED;
/* Is the client already known to have an expired lease? */
@@ -1235,8 +1261,8 @@ out:
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
- struct nfs4_state_recovery_ops *ops =
- nfs4_reboot_recovery_ops[clp->cl_minorversion];
+ const struct nfs4_state_recovery_ops *ops =
+ clp->cl_mvops->reboot_recovery_ops;
int status = -ENOENT;
cred = ops->get_clid_cred(clp);
@@ -1444,7 +1470,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* First recover reboot state... */
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
- nfs4_reboot_recovery_ops[clp->cl_minorversion]);
+ clp->cl_mvops->reboot_recovery_ops);
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
continue;
@@ -1458,7 +1484,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
- nfs4_nograce_recovery_ops[clp->cl_minorversion]);
+ clp->cl_mvops->nograce_recovery_ops);
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 6bdef28efa3..08ef9129113 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -202,14 +202,17 @@ static int nfs4_stat_to_errno(int);
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
+#define encode_lockowner_maxsz (7)
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
- 1 + encode_stateid_maxsz + 8)
+ 1 + encode_stateid_maxsz + 1 + \
+ encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
(8 + decode_lockowner_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
-#define encode_lockt_maxsz (op_encode_hdr_maxsz + 12)
+#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
+ encode_lockowner_maxsz)
#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
@@ -217,6 +220,11 @@ static int nfs4_stat_to_errno(int);
4)
#define decode_locku_maxsz (op_decode_hdr_maxsz + \
decode_stateid_maxsz)
+#define encode_release_lockowner_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_lockowner_maxsz)
+#define decode_release_lockowner_maxsz \
+ (op_decode_hdr_maxsz)
#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
@@ -471,6 +479,12 @@ static int nfs4_stat_to_errno(int);
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_locku_maxsz)
+#define NFS4_enc_release_lockowner_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_lockowner_maxsz)
+#define NFS4_dec_release_lockowner_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -744,7 +758,7 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
struct compound_hdr *hdr)
{
__be32 *p;
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
/* initialize running count of expected bytes in reply.
* NOTE: the replied tag SHOULD be the same is the one sent,
@@ -862,8 +876,8 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
*p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
- *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
}
else if (iap->ia_valid & ATTR_ATIME) {
bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
@@ -1042,6 +1056,17 @@ static inline uint64_t nfs4_lock_length(struct file_lock *fl)
return fl->fl_end - fl->fl_start + 1;
}
+static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 28);
+ p = xdr_encode_hyper(p, lowner->clientid);
+ *p++ = cpu_to_be32(16);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ xdr_encode_hyper(p, lowner->id);
+}
+
/*
* opcode,type,reclaim,offset,length,new_lock_owner = 32
* open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
@@ -1058,14 +1083,11 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
*p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){
- p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32);
+ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
*p++ = cpu_to_be32(args->open_seqid->sequence->counter);
p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
*p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
- p = xdr_encode_hyper(p, args->lock_owner.clientid);
- *p++ = cpu_to_be32(16);
- p = xdr_encode_opaque_fixed(p, "lock id:", 8);
- xdr_encode_hyper(p, args->lock_owner.id);
+ encode_lockowner(xdr, &args->lock_owner);
}
else {
p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
@@ -1080,15 +1102,12 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
{
__be32 *p;
- p = reserve_space(xdr, 52);
+ p = reserve_space(xdr, 24);
*p++ = cpu_to_be32(OP_LOCKT);
*p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
p = xdr_encode_hyper(p, args->fl->fl_start);
p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
- p = xdr_encode_hyper(p, args->lock_owner.clientid);
- *p++ = cpu_to_be32(16);
- p = xdr_encode_opaque_fixed(p, "lock id:", 8);
- xdr_encode_hyper(p, args->lock_owner.id);
+ encode_lockowner(xdr, &args->lock_owner);
hdr->nops++;
hdr->replen += decode_lockt_maxsz;
}
@@ -1108,6 +1127,17 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
hdr->replen += decode_locku_maxsz;
}
+static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(OP_RELEASE_LOCKOWNER);
+ encode_lockowner(xdr, lowner);
+ hdr->nops++;
+ hdr->replen += decode_release_lockowner_maxsz;
+}
+
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
int len = name->len;
@@ -1172,7 +1202,7 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
break;
default:
clp = arg->server->nfs_client;
- if (clp->cl_minorversion > 0) {
+ if (clp->cl_mvops->minor_version > 0) {
if (nfs4_has_persistent_session(clp)) {
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
encode_attrs(xdr, arg->u.attrs, arg->server);
@@ -1324,14 +1354,14 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
hdr->replen += decode_putrootfh_maxsz;
}
-static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
+static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx)
{
nfs4_stateid stateid;
__be32 *p;
p = reserve_space(xdr, NFS4_STATEID_SIZE);
if (ctx->state != NULL) {
- nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
+ nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
} else
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
@@ -1344,7 +1374,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_READ);
- encode_stateid(xdr, args->context);
+ encode_stateid(xdr, args->context, args->lock_context);
p = reserve_space(xdr, 12);
p = xdr_encode_hyper(p, args->offset);
@@ -1523,7 +1553,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_WRITE);
- encode_stateid(xdr, args->context);
+ encode_stateid(xdr, args->context, args->lock_context);
p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->offset);
@@ -1704,7 +1734,7 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
{
#if defined(CONFIG_NFS_V4_1)
if (args->sa_session)
- return args->sa_session->clp->cl_minorversion;
+ return args->sa_session->clp->cl_mvops->minor_version;
#endif /* CONFIG_NFS_V4_1 */
return 0;
}
@@ -2048,6 +2078,20 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_
return 0;
}
+static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = 0,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
/*
* Encode a READLINK request
*/
@@ -2395,7 +2439,7 @@ static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .minorversion = args->client->cl_minorversion,
+ .minorversion = args->client->cl_mvops->minor_version,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -2413,7 +2457,7 @@ static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .minorversion = args->client->cl_minorversion,
+ .minorversion = args->client->cl_mvops->minor_version,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -2431,7 +2475,7 @@ static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .minorversion = session->clp->cl_minorversion,
+ .minorversion = session->clp->cl_mvops->minor_version,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -3973,6 +4017,11 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
return status;
}
+static int decode_release_lockowner(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
+}
+
static int decode_lookup(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_LOOKUP);
@@ -5259,6 +5308,19 @@ out:
return status;
}
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_release_lockowner(&xdr);
+ return status;
+}
+
/*
* Decode READLINK response
*/
@@ -5866,6 +5928,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+ PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
#if defined(CONFIG_NFS_V4_1)
PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
PROC(CREATE_SESSION, enc_create_session, dec_create_session),
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 6bd19d843af..df101d9f546 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -105,7 +105,7 @@ static char nfs_root_name[256] __initdata = "";
static __be32 servaddr __initdata = 0;
/* Name of directory to mount */
-static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, };
+static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, };
/* NFS-related data */
static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index a3654e57b58..919490232e1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -79,6 +79,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
req->wb_pgbase = offset;
req->wb_bytes = count;
req->wb_context = get_nfs_open_context(ctx);
+ req->wb_lock_context = nfs_get_lock_context(ctx);
kref_init(&req->wb_kref);
return req;
}
@@ -141,11 +142,16 @@ void nfs_clear_request(struct nfs_page *req)
{
struct page *page = req->wb_page;
struct nfs_open_context *ctx = req->wb_context;
+ struct nfs_lock_context *l_ctx = req->wb_lock_context;
if (page != NULL) {
page_cache_release(page);
req->wb_page = NULL;
}
+ if (l_ctx != NULL) {
+ nfs_put_lock_context(l_ctx);
+ req->wb_lock_context = NULL;
+ }
if (ctx != NULL) {
put_nfs_open_context(ctx);
req->wb_context = NULL;
@@ -235,7 +241,7 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev,
{
if (req->wb_context->cred != prev->wb_context->cred)
return 0;
- if (req->wb_context->lockowner != prev->wb_context->lockowner)
+ if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
return 0;
if (req->wb_context->state != prev->wb_context->state)
return 0;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6e2b06e6ca7..87adc274424 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -190,6 +190,7 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
data->args.pages = data->pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
+ data->args.lock_context = req->wb_lock_context;
data->res.fattr = &data->fattr;
data->res.count = count;
@@ -410,7 +411,7 @@ void nfs_read_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
- if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode),
&data->args.seq_args, &data->res.seq_res,
0, task))
return;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 04214fc5c30..ee26316ad1f 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -270,7 +270,7 @@ static const struct super_operations nfs_sops = {
.write_inode = nfs_write_inode,
.put_super = nfs_put_super,
.statfs = nfs_statfs,
- .clear_inode = nfs_clear_inode,
+ .evict_inode = nfs_evict_inode,
.umount_begin = nfs_umount_begin,
.show_options = nfs_show_options,
.show_stats = nfs_show_stats,
@@ -340,7 +340,7 @@ static const struct super_operations nfs4_sops = {
.write_inode = nfs_write_inode,
.put_super = nfs_put_super,
.statfs = nfs_statfs,
- .clear_inode = nfs4_clear_inode,
+ .evict_inode = nfs4_evict_inode,
.umount_begin = nfs_umount_begin,
.show_options = nfs_show_options,
.show_stats = nfs_show_stats,
@@ -546,6 +546,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
{
struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
+ if (nfss->flags & NFS_MOUNT_LEGACY_INTERFACE)
+ return;
+
switch (sap->sa_family) {
case AF_INET: {
struct sockaddr_in *sin = (struct sockaddr_in *)sap;
@@ -570,6 +573,22 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
nfs_show_mountd_netid(m, nfss, showdefaults);
}
+#ifdef CONFIG_NFS_V4
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct nfs_client *clp = nfss->nfs_client;
+
+ seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
+ seq_printf(m, ",minorversion=%u", clp->cl_minorversion);
+}
+#else
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+}
+#endif
+
/*
* Describe the mount options in force on this server representation
*/
@@ -631,11 +650,9 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
if (version != 4)
nfs_show_mountd_options(m, nfss, showdefaults);
+ else
+ nfs_show_nfsv4_options(m, nfss, showdefaults);
-#ifdef CONFIG_NFS_V4
- if (clp->rpc_ops->version == 4)
- seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
-#endif
if (nfss->options & NFS_OPTION_FSCACHE)
seq_printf(m, ",fsc");
}
@@ -1766,6 +1783,7 @@ static int nfs_validate_mount_data(void *options,
* can deal with.
*/
args->flags = data->flags & NFS_MOUNT_FLAGMASK;
+ args->flags |= NFS_MOUNT_LEGACY_INTERFACE;
args->rsize = data->rsize;
args->wsize = data->wsize;
args->timeo = data->timeo;
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index a2242af6a17..2f84adaad42 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -110,7 +110,7 @@ void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
struct nfs_unlinkdata *data = calldata;
struct nfs_server *server = NFS_SERVER(data->dir);
- if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
+ if (nfs4_setup_sequence(server, &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 91679e2631e..874972d9427 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
-static struct nfs_page *nfs_find_and_lock_request(struct page *page)
+static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
{
struct inode *inode = page->mapping->host;
struct nfs_page *req;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
* request as dirty (in which case we don't care).
*/
spin_unlock(&inode->i_lock);
- ret = nfs_wait_on_request(req);
+ if (!nonblock)
+ ret = nfs_wait_on_request(req);
+ else
+ ret = -EAGAIN;
nfs_release_request(req);
if (ret != 0)
return ERR_PTR(ret);
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
* May return an error if the user signalled nfs_wait_on_request().
*/
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
- struct page *page)
+ struct page *page, bool nonblock)
{
struct nfs_page *req;
int ret = 0;
- req = nfs_find_and_lock_request(page);
+ req = nfs_find_and_lock_request(page, nonblock);
if (!req)
goto out;
ret = PTR_ERR(req);
@@ -283,12 +286,20 @@ out:
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
{
struct inode *inode = page->mapping->host;
+ int ret;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
nfs_pageio_cond_complete(pgio, page->index);
- return nfs_page_async_flush(pgio, page);
+ ret = nfs_page_async_flush(pgio, page,
+ wbc->sync_mode == WB_SYNC_NONE ||
+ wbc->nonblocking != 0);
+ if (ret == -EAGAIN) {
+ redirty_page_for_writepage(wbc, page);
+ ret = 0;
+ }
+ return ret;
}
/*
@@ -689,7 +700,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
return 0;
- do_flush = req->wb_page != page || req->wb_context != ctx;
+ do_flush = req->wb_page != page || req->wb_context != ctx ||
+ req->wb_lock_context->lockowner != current->files ||
+ req->wb_lock_context->pid != current->tgid;
nfs_release_request(req);
if (!do_flush)
return 0;
@@ -813,6 +826,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
data->args.pages = data->pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
+ data->args.lock_context = req->wb_lock_context;
data->args.stable = NFS_UNSTABLE;
if (how & FLUSH_STABLE) {
data->args.stable = NFS_DATA_SYNC;
@@ -1036,9 +1050,9 @@ out:
void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
- if (nfs4_setup_sequence(clp, &data->args.seq_args,
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+ &data->args.seq_args,
&data->res.seq_res, 1, task))
return;
rpc_call_start(task);
@@ -1379,7 +1393,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
.rpc_release = nfs_commit_release,
};
-static int nfs_commit_inode(struct inode *inode, int how)
+int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
int may_wait = how & FLUSH_SYNC;
@@ -1443,11 +1457,6 @@ out_mark_dirty:
return ret;
}
#else
-static int nfs_commit_inode(struct inode *inode, int how)
-{
- return 0;
-}
-
static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
{
return 0;
@@ -1546,7 +1555,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
nfs_fscache_release_page(page, GFP_KERNEL);
- req = nfs_find_and_lock_request(page);
+ req = nfs_find_and_lock_request(page, false);
ret = PTR_ERR(req);
if (IS_ERR(req))
goto out;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 69b266db7f5..3dfef062396 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4215,8 +4215,8 @@ nfs4_state_shutdown(void)
nfs4_lock_state();
nfs4_release_reclaim();
__nfs4_state_shutdown();
- nfsd4_destroy_callback_queue();
nfs4_unlock_state();
+ nfsd4_destroy_callback_queue();
}
/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index f8931acb05f..1a468bbd330 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1756,6 +1756,10 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
struct nfs4_acl *acl = NULL;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
+ struct path path = {
+ .mnt = exp->ex_path.mnt,
+ .dentry = dentry,
+ };
BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
@@ -1776,7 +1780,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
FATTR4_WORD0_MAXNAME)) ||
(bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- err = vfs_statfs(dentry, &statfs);
+ err = vfs_statfs(&path, &statfs);
if (err)
goto out_nfserr;
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 1709138affb..96360a83cb9 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -443,8 +443,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
if (size_change)
put_write_access(inode);
if (!err)
- if (EX_ISSYNC(fhp->fh_export))
- write_inode_now(inode, 1);
+ commit_metadata(fhp);
out:
return err;
@@ -935,7 +934,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
} else
err = nfserrno(host_err);
out:
@@ -1046,7 +1045,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
@@ -2034,8 +2033,14 @@ out:
__be32
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
{
- __be32 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
- if (!err && vfs_statfs(fhp->fh_dentry,stat))
+ struct path path = {
+ .mnt = fhp->fh_export->ex_path.mnt,
+ .dentry = fhp->fh_dentry,
+ };
+ __be32 err;
+
+ err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
+ if (!err && vfs_statfs(&path, stat))
err = nfserr_io;
return err;
}
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index effdbdbe6c1..3dbdc1d356b 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -26,6 +26,8 @@
#include "nilfs.h"
#include "bmap.h"
#include "sb.h"
+#include "btree.h"
+#include "direct.h"
#include "btnode.h"
#include "mdt.h"
#include "dat.h"
@@ -533,7 +535,7 @@ void nilfs_bmap_init_gc(struct nilfs_bmap *bmap)
void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
{
- memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union));
+ memcpy(gcbmap, bmap, sizeof(*bmap));
init_rwsem(&gcbmap->b_sem);
lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode;
@@ -541,7 +543,7 @@ void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap)
{
- memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union));
+ memcpy(bmap, gcbmap, sizeof(*bmap));
init_rwsem(&bmap->b_sem);
lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key);
bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 9980d7dbab9..a20569b1992 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -32,11 +32,6 @@
#define NILFS_BMAP_INVALID_PTR 0
-#define nilfs_bmap_dkey_to_key(dkey) le64_to_cpu(dkey)
-#define nilfs_bmap_key_to_dkey(key) cpu_to_le64(key)
-#define nilfs_bmap_dptr_to_ptr(dptr) le64_to_cpu(dptr)
-#define nilfs_bmap_ptr_to_dptr(ptr) cpu_to_le64(ptr)
-
#define nilfs_bmap_keydiff_abs(diff) ((diff) < 0 ? -(diff) : (diff))
@@ -71,7 +66,7 @@ struct nilfs_bmap_operations {
int (*bop_delete)(struct nilfs_bmap *, __u64);
void (*bop_clear)(struct nilfs_bmap *);
- int (*bop_propagate)(const struct nilfs_bmap *, struct buffer_head *);
+ int (*bop_propagate)(struct nilfs_bmap *, struct buffer_head *);
void (*bop_lookup_dirty_buffers)(struct nilfs_bmap *,
struct list_head *);
@@ -110,6 +105,7 @@ static inline int nilfs_bmap_is_new_ptr(unsigned long ptr)
* @b_last_allocated_ptr: last allocated ptr for data block
* @b_ptr_type: pointer type
* @b_state: state
+ * @b_nchildren_per_block: maximum number of child nodes for non-root nodes
*/
struct nilfs_bmap {
union {
@@ -123,6 +119,7 @@ struct nilfs_bmap {
__u64 b_last_allocated_ptr;
int b_ptr_type;
int b_state;
+ __u16 b_nchildren_per_block;
};
/* pointer type */
@@ -224,6 +221,13 @@ static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
nilfs_dat_abort_end(dat, &req->bpr_req);
}
+static inline void nilfs_bmap_set_target_v(struct nilfs_bmap *bmap, __u64 key,
+ __u64 ptr)
+{
+ bmap->b_last_allocated_key = key;
+ bmap->b_last_allocated_ptr = ptr;
+}
+
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
const struct buffer_head *);
diff --git a/fs/nilfs2/bmap_union.h b/fs/nilfs2/bmap_union.h
deleted file mode 100644
index d41509bff47..00000000000
--- a/fs/nilfs2/bmap_union.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * bmap_union.h - NILFS block mapping.
- *
- * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Written by Koji Sato <koji@osrg.net>.
- */
-
-#ifndef _NILFS_BMAP_UNION_H
-#define _NILFS_BMAP_UNION_H
-
-#include "bmap.h"
-#include "direct.h"
-#include "btree.h"
-
-/**
- * nilfs_bmap_union -
- * @bi_bmap: bmap structure
- * @bi_btree: direct map structure
- * @bi_direct: B-tree structure
- */
-union nilfs_bmap_union {
- struct nilfs_bmap bi_bmap;
- struct nilfs_direct bi_direct;
- struct nilfs_btree bi_btree;
-};
-
-#endif /* _NILFS_BMAP_UNION_H */
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 447ce47a330..f78ab1044d1 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -96,10 +96,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
- sector_t pblocknr, struct buffer_head **pbh)
+ sector_t pblocknr, int mode,
+ struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc);
+ struct page *page;
int err;
bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
@@ -107,6 +109,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
return -ENOMEM;
err = -EEXIST; /* internal code */
+ page = bh->b_page;
if (buffer_uptodate(bh) || buffer_dirty(bh))
goto found;
@@ -125,7 +128,16 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
}
}
}
- lock_buffer(bh);
+
+ if (mode == READA) {
+ if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
+ err = -EBUSY; /* internal code */
+ brelse(bh);
+ goto out_locked;
+ }
+ } else { /* mode == READ */
+ lock_buffer(bh);
+ }
if (buffer_uptodate(bh)) {
unlock_buffer(bh);
err = -EEXIST; /* internal code */
@@ -136,15 +148,16 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(READ, bh);
+ submit_bh(mode, bh);
bh->b_blocknr = blocknr; /* set back to the given block address */
+ *submit_ptr = pblocknr;
err = 0;
found:
*pbh = bh;
out_locked:
- unlock_page(bh->b_page);
- page_cache_release(bh->b_page);
+ unlock_page(page);
+ page_cache_release(page);
return err;
}
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 07da83f0771..79037494f1e 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -42,8 +42,8 @@ void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);
-int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
- struct buffer_head **);
+int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int,
+ struct buffer_head **, sector_t *);
void nilfs_btnode_delete(struct buffer_head *);
int nilfs_btnode_prepare_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index b27a342c5af..300c2bc00c3 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -66,30 +66,10 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
/*
* B-tree node operations
*/
-static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
- struct buffer_head **bhp)
-{
- struct address_space *btnc =
- &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
- int err;
-
- err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
- if (err)
- return err == -EEXIST ? 0 : err;
-
- wait_on_buffer(*bhp);
- if (!buffer_uptodate(*bhp)) {
- brelse(*bhp);
- return -EIO;
- }
- return 0;
-}
-
-static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
+static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp)
{
- struct address_space *btnc =
- &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
+ struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
@@ -101,71 +81,55 @@ static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
return 0;
}
-static inline int
-nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
+static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node)
{
return node->bn_flags;
}
-static inline void
+static void
nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags)
{
node->bn_flags = flags;
}
-static inline int nilfs_btree_node_root(const struct nilfs_btree_node *node)
+static int nilfs_btree_node_root(const struct nilfs_btree_node *node)
{
return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT;
}
-static inline int
-nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
+static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node)
{
return node->bn_level;
}
-static inline void
+static void
nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level)
{
node->bn_level = level;
}
-static inline int
-nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
+static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node)
{
return le16_to_cpu(node->bn_nchildren);
}
-static inline void
+static void
nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren)
{
node->bn_nchildren = cpu_to_le16(nchildren);
}
-static inline int nilfs_btree_node_size(const struct nilfs_btree *btree)
+static int nilfs_btree_node_size(const struct nilfs_bmap *btree)
{
- return 1 << btree->bt_bmap.b_inode->i_blkbits;
+ return 1 << btree->b_inode->i_blkbits;
}
-static inline int
-nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node,
- const struct nilfs_btree *btree)
+static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree)
{
- return nilfs_btree_node_root(node) ?
- NILFS_BTREE_ROOT_NCHILDREN_MIN :
- NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
+ return btree->b_nchildren_per_block;
}
-static inline int
-nilfs_btree_node_nchildren_max(const struct nilfs_btree_node *node,
- const struct nilfs_btree *btree)
-{
- return nilfs_btree_node_root(node) ?
- NILFS_BTREE_ROOT_NCHILDREN_MAX :
- NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree));
-}
-
-static inline __le64 *
+static __le64 *
nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
{
return (__le64 *)((char *)(node + 1) +
@@ -173,45 +137,40 @@ nilfs_btree_node_dkeys(const struct nilfs_btree_node *node)
0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE));
}
-static inline __le64 *
-nilfs_btree_node_dptrs(const struct nilfs_btree_node *node,
- const struct nilfs_btree *btree)
+static __le64 *
+nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax)
{
- return (__le64 *)(nilfs_btree_node_dkeys(node) +
- nilfs_btree_node_nchildren_max(node, btree));
+ return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax);
}
-static inline __u64
+static __u64
nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index)
{
- return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(node) + index));
+ return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index));
}
-static inline void
+static void
nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key)
{
- *(nilfs_btree_node_dkeys(node) + index) = nilfs_bmap_key_to_dkey(key);
+ *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key);
}
-static inline __u64
-nilfs_btree_node_get_ptr(const struct nilfs_btree *btree,
- const struct nilfs_btree_node *node, int index)
+static __u64
+nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index,
+ int ncmax)
{
- return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(node, btree) +
- index));
+ return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index));
}
-static inline void
-nilfs_btree_node_set_ptr(struct nilfs_btree *btree,
- struct nilfs_btree_node *node, int index, __u64 ptr)
+static void
+nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr,
+ int ncmax)
{
- *(nilfs_btree_node_dptrs(node, btree) + index) =
- nilfs_bmap_ptr_to_dptr(ptr);
+ *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr);
}
-static void nilfs_btree_node_init(struct nilfs_btree *btree,
- struct nilfs_btree_node *node,
- int flags, int level, int nchildren,
+static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags,
+ int level, int nchildren, int ncmax,
const __u64 *keys, const __u64 *ptrs)
{
__le64 *dkeys;
@@ -223,29 +182,28 @@ static void nilfs_btree_node_init(struct nilfs_btree *btree,
nilfs_btree_node_set_nchildren(node, nchildren);
dkeys = nilfs_btree_node_dkeys(node);
- dptrs = nilfs_btree_node_dptrs(node, btree);
+ dptrs = nilfs_btree_node_dptrs(node, ncmax);
for (i = 0; i < nchildren; i++) {
- dkeys[i] = nilfs_bmap_key_to_dkey(keys[i]);
- dptrs[i] = nilfs_bmap_ptr_to_dptr(ptrs[i]);
+ dkeys[i] = cpu_to_le64(keys[i]);
+ dptrs[i] = cpu_to_le64(ptrs[i]);
}
}
/* Assume the buffer heads corresponding to left and right are locked. */
-static void nilfs_btree_node_move_left(struct nilfs_btree *btree,
- struct nilfs_btree_node *left,
+static void nilfs_btree_node_move_left(struct nilfs_btree_node *left,
struct nilfs_btree_node *right,
- int n)
+ int n, int lncmax, int rncmax)
{
__le64 *ldkeys, *rdkeys;
__le64 *ldptrs, *rdptrs;
int lnchildren, rnchildren;
ldkeys = nilfs_btree_node_dkeys(left);
- ldptrs = nilfs_btree_node_dptrs(left, btree);
+ ldptrs = nilfs_btree_node_dptrs(left, lncmax);
lnchildren = nilfs_btree_node_get_nchildren(left);
rdkeys = nilfs_btree_node_dkeys(right);
- rdptrs = nilfs_btree_node_dptrs(right, btree);
+ rdptrs = nilfs_btree_node_dptrs(right, rncmax);
rnchildren = nilfs_btree_node_get_nchildren(right);
memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys));
@@ -260,21 +218,20 @@ static void nilfs_btree_node_move_left(struct nilfs_btree *btree,
}
/* Assume that the buffer heads corresponding to left and right are locked. */
-static void nilfs_btree_node_move_right(struct nilfs_btree *btree,
- struct nilfs_btree_node *left,
+static void nilfs_btree_node_move_right(struct nilfs_btree_node *left,
struct nilfs_btree_node *right,
- int n)
+ int n, int lncmax, int rncmax)
{
__le64 *ldkeys, *rdkeys;
__le64 *ldptrs, *rdptrs;
int lnchildren, rnchildren;
ldkeys = nilfs_btree_node_dkeys(left);
- ldptrs = nilfs_btree_node_dptrs(left, btree);
+ ldptrs = nilfs_btree_node_dptrs(left, lncmax);
lnchildren = nilfs_btree_node_get_nchildren(left);
rdkeys = nilfs_btree_node_dkeys(right);
- rdptrs = nilfs_btree_node_dptrs(right, btree);
+ rdptrs = nilfs_btree_node_dptrs(right, rncmax);
rnchildren = nilfs_btree_node_get_nchildren(right);
memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys));
@@ -289,16 +246,15 @@ static void nilfs_btree_node_move_right(struct nilfs_btree *btree,
}
/* Assume that the buffer head corresponding to node is locked. */
-static void nilfs_btree_node_insert(struct nilfs_btree *btree,
- struct nilfs_btree_node *node,
- __u64 key, __u64 ptr, int index)
+static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index,
+ __u64 key, __u64 ptr, int ncmax)
{
__le64 *dkeys;
__le64 *dptrs;
int nchildren;
dkeys = nilfs_btree_node_dkeys(node);
- dptrs = nilfs_btree_node_dptrs(node, btree);
+ dptrs = nilfs_btree_node_dptrs(node, ncmax);
nchildren = nilfs_btree_node_get_nchildren(node);
if (index < nchildren) {
memmove(dkeys + index + 1, dkeys + index,
@@ -306,16 +262,15 @@ static void nilfs_btree_node_insert(struct nilfs_btree *btree,
memmove(dptrs + index + 1, dptrs + index,
(nchildren - index) * sizeof(*dptrs));
}
- dkeys[index] = nilfs_bmap_key_to_dkey(key);
- dptrs[index] = nilfs_bmap_ptr_to_dptr(ptr);
+ dkeys[index] = cpu_to_le64(key);
+ dptrs[index] = cpu_to_le64(ptr);
nchildren++;
nilfs_btree_node_set_nchildren(node, nchildren);
}
/* Assume that the buffer head corresponding to node is locked. */
-static void nilfs_btree_node_delete(struct nilfs_btree *btree,
- struct nilfs_btree_node *node,
- __u64 *keyp, __u64 *ptrp, int index)
+static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index,
+ __u64 *keyp, __u64 *ptrp, int ncmax)
{
__u64 key;
__u64 ptr;
@@ -324,9 +279,9 @@ static void nilfs_btree_node_delete(struct nilfs_btree *btree,
int nchildren;
dkeys = nilfs_btree_node_dkeys(node);
- dptrs = nilfs_btree_node_dptrs(node, btree);
- key = nilfs_bmap_dkey_to_key(dkeys[index]);
- ptr = nilfs_bmap_dptr_to_ptr(dptrs[index]);
+ dptrs = nilfs_btree_node_dptrs(node, ncmax);
+ key = le64_to_cpu(dkeys[index]);
+ ptr = le64_to_cpu(dptrs[index]);
nchildren = nilfs_btree_node_get_nchildren(node);
if (keyp != NULL)
*keyp = key;
@@ -382,40 +337,92 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
return s == 0;
}
-static inline struct nilfs_btree_node *
-nilfs_btree_get_root(const struct nilfs_btree *btree)
+/**
+ * nilfs_btree_node_broken - verify consistency of btree node
+ * @node: btree node block to be examined
+ * @size: node size (in bytes)
+ * @blocknr: block number
+ *
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ */
+static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
+ size_t size, sector_t blocknr)
{
- return (struct nilfs_btree_node *)btree->bt_bmap.b_u.u_data;
+ int level, flags, nchildren;
+ int ret = 0;
+
+ level = nilfs_btree_node_get_level(node);
+ flags = nilfs_btree_node_get_flags(node);
+ nchildren = nilfs_btree_node_get_nchildren(node);
+
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+ level >= NILFS_BTREE_LEVEL_MAX ||
+ (flags & NILFS_BTREE_NODE_ROOT) ||
+ nchildren < 0 ||
+ nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
+ printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
+ "level = %d, flags = 0x%x, nchildren = %d\n",
+ (unsigned long long)blocknr, level, flags, nchildren);
+ ret = 1;
+ }
+ return ret;
}
-static inline struct nilfs_btree_node *
+int nilfs_btree_broken_node_block(struct buffer_head *bh)
+{
+ int ret;
+
+ if (buffer_nilfs_checked(bh))
+ return 0;
+
+ ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
+ bh->b_size, bh->b_blocknr);
+ if (likely(!ret))
+ set_buffer_nilfs_checked(bh);
+ return ret;
+}
+
+static struct nilfs_btree_node *
+nilfs_btree_get_root(const struct nilfs_bmap *btree)
+{
+ return (struct nilfs_btree_node *)btree->b_u.u_data;
+}
+
+static struct nilfs_btree_node *
nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level)
{
return (struct nilfs_btree_node *)path[level].bp_bh->b_data;
}
-static inline struct nilfs_btree_node *
+static struct nilfs_btree_node *
nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level)
{
return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data;
}
-static inline int nilfs_btree_height(const struct nilfs_btree *btree)
+static int nilfs_btree_height(const struct nilfs_bmap *btree)
{
return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1;
}
-static inline struct nilfs_btree_node *
-nilfs_btree_get_node(const struct nilfs_btree *btree,
+static struct nilfs_btree_node *
+nilfs_btree_get_node(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
- int level)
+ int level, int *ncmaxp)
{
- return (level == nilfs_btree_height(btree) - 1) ?
- nilfs_btree_get_root(btree) :
- nilfs_btree_get_nonroot_node(path, level);
+ struct nilfs_btree_node *node;
+
+ if (level == nilfs_btree_height(btree) - 1) {
+ node = nilfs_btree_get_root(btree);
+ *ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX;
+ } else {
+ node = nilfs_btree_get_nonroot_node(path, level);
+ *ncmaxp = nilfs_btree_nchildren_per_block(btree);
+ }
+ return node;
}
-static inline int
+static int
nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
{
if (unlikely(nilfs_btree_node_get_level(node) != level)) {
@@ -427,13 +434,83 @@ nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
return 0;
}
-static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
+struct nilfs_btree_readahead_info {
+ struct nilfs_btree_node *node; /* parent node */
+ int max_ra_blocks; /* max nof blocks to read ahead */
+ int index; /* current index on the parent node */
+ int ncmax; /* nof children in the parent node */
+};
+
+static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
+ struct buffer_head **bhp,
+ const struct nilfs_btree_readahead_info *ra)
+{
+ struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache;
+ struct buffer_head *bh, *ra_bh;
+ sector_t submit_ptr = 0;
+ int ret;
+
+ ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr);
+ if (ret) {
+ if (ret != -EEXIST)
+ return ret;
+ goto out_check;
+ }
+
+ if (ra) {
+ int i, n;
+ __u64 ptr2;
+
+ /* read ahead sibling nodes */
+ for (n = ra->max_ra_blocks, i = ra->index + 1;
+ n > 0 && i < ra->ncmax; n--, i++) {
+ ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
+
+ ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA,
+ &ra_bh, &submit_ptr);
+ if (likely(!ret || ret == -EEXIST))
+ brelse(ra_bh);
+ else if (ret != -EBUSY)
+ break;
+ if (!buffer_locked(bh))
+ goto out_no_wait;
+ }
+ }
+
+ wait_on_buffer(bh);
+
+ out_no_wait:
+ if (!buffer_uptodate(bh)) {
+ brelse(bh);
+ return -EIO;
+ }
+
+ out_check:
+ if (nilfs_btree_broken_node_block(bh)) {
+ clear_buffer_uptodate(bh);
+ brelse(bh);
+ return -EINVAL;
+ }
+
+ *bhp = bh;
+ return 0;
+}
+
+static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
+ struct buffer_head **bhp)
+{
+ return __nilfs_btree_get_block(btree, ptr, bhp, NULL);
+}
+
+static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
- __u64 key, __u64 *ptrp, int minlevel)
+ __u64 key, __u64 *ptrp, int minlevel,
+ int readahead)
{
struct nilfs_btree_node *node;
+ struct nilfs_btree_readahead_info p, *ra;
__u64 ptr;
- int level, index, found, ret;
+ int level, index, found, ncmax, ret;
node = nilfs_btree_get_root(btree);
level = nilfs_btree_node_get_level(node);
@@ -441,14 +518,27 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
return -ENOENT;
found = nilfs_btree_node_lookup(node, key, &index);
- ptr = nilfs_btree_node_get_ptr(btree, node, index);
+ ptr = nilfs_btree_node_get_ptr(node, index,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
path[level].bp_bh = NULL;
path[level].bp_index = index;
- for (level--; level >= minlevel; level--) {
- ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
+ ncmax = nilfs_btree_nchildren_per_block(btree);
+
+ while (--level >= minlevel) {
+ ra = NULL;
+ if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) {
+ p.node = nilfs_btree_get_node(btree, path, level + 1,
+ &p.ncmax);
+ p.index = index;
+ p.max_ra_blocks = 7;
+ ra = &p;
+ }
+ ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh,
+ ra);
if (ret < 0)
return ret;
+
node = nilfs_btree_get_nonroot_node(path, level);
if (nilfs_btree_bad_node(node, level))
return -EINVAL;
@@ -456,9 +546,9 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
found = nilfs_btree_node_lookup(node, key, &index);
else
index = 0;
- if (index < nilfs_btree_node_nchildren_max(node, btree))
- ptr = nilfs_btree_node_get_ptr(btree, node, index);
- else {
+ if (index < ncmax) {
+ ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
+ } else {
WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
/* insert */
ptr = NILFS_BMAP_INVALID_PTR;
@@ -474,22 +564,24 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
return 0;
}
-static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
+static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
__u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
__u64 ptr;
- int index, level, ret;
+ int index, level, ncmax, ret;
node = nilfs_btree_get_root(btree);
index = nilfs_btree_node_get_nchildren(node) - 1;
if (index < 0)
return -ENOENT;
level = nilfs_btree_node_get_level(node);
- ptr = nilfs_btree_node_get_ptr(btree, node, index);
+ ptr = nilfs_btree_node_get_ptr(node, index,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
path[level].bp_bh = NULL;
path[level].bp_index = index;
+ ncmax = nilfs_btree_nchildren_per_block(btree);
for (level--; level > 0; level--) {
ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
@@ -499,7 +591,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
if (nilfs_btree_bad_node(node, level))
return -EINVAL;
index = nilfs_btree_node_get_nchildren(node) - 1;
- ptr = nilfs_btree_node_get_ptr(btree, node, index);
+ ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
path[level].bp_index = index;
}
@@ -511,51 +603,45 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
return 0;
}
-static int nilfs_btree_lookup(const struct nilfs_bmap *bmap,
+static int nilfs_btree_lookup(const struct nilfs_bmap *btree,
__u64 key, int level, __u64 *ptrp)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
- __u64 ptr;
int ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
-
- if (ptrp != NULL)
- *ptrp = ptr;
+ ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0);
nilfs_btree_free_path(path);
return ret;
}
-static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
+static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
__u64 key, __u64 *ptrp, unsigned maxblocks)
{
- struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
struct inode *dat = NULL;
__u64 ptr, ptr2;
sector_t blocknr;
int level = NILFS_BTREE_LEVEL_NODE_MIN;
- int ret, cnt, index, maxlevel;
+ int ret, cnt, index, maxlevel, ncmax;
+ struct nilfs_btree_readahead_info p;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level);
+ ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1);
if (ret < 0)
goto out;
- if (NILFS_BMAP_USE_VBN(bmap)) {
- dat = nilfs_bmap_get_dat(bmap);
+ if (NILFS_BMAP_USE_VBN(btree)) {
+ dat = nilfs_bmap_get_dat(btree);
ret = nilfs_dat_translate(dat, ptr, &blocknr);
if (ret < 0)
goto out;
@@ -566,14 +652,14 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
goto end;
maxlevel = nilfs_btree_height(btree) - 1;
- node = nilfs_btree_get_node(btree, path, level);
+ node = nilfs_btree_get_node(btree, path, level, &ncmax);
index = path[level].bp_index + 1;
for (;;) {
while (index < nilfs_btree_node_get_nchildren(node)) {
if (nilfs_btree_node_get_key(node, index) !=
key + cnt)
goto end;
- ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
+ ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax);
if (dat) {
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
if (ret < 0)
@@ -589,20 +675,24 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
break;
/* look-up right sibling node */
- node = nilfs_btree_get_node(btree, path, level + 1);
- index = path[level + 1].bp_index + 1;
- if (index >= nilfs_btree_node_get_nchildren(node) ||
- nilfs_btree_node_get_key(node, index) != key + cnt)
+ p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax);
+ p.index = path[level + 1].bp_index + 1;
+ p.max_ra_blocks = 7;
+ if (p.index >= nilfs_btree_node_get_nchildren(p.node) ||
+ nilfs_btree_node_get_key(p.node, p.index) != key + cnt)
break;
- ptr2 = nilfs_btree_node_get_ptr(btree, node, index);
- path[level + 1].bp_index = index;
+ ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax);
+ path[level + 1].bp_index = p.index;
brelse(path[level].bp_bh);
path[level].bp_bh = NULL;
- ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh);
+
+ ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh,
+ &p);
if (ret < 0)
goto out;
node = nilfs_btree_get_nonroot_node(path, level);
+ ncmax = nilfs_btree_nchildren_per_block(btree);
index = 0;
path[level].bp_index = index;
}
@@ -614,7 +704,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap,
return ret;
}
-static void nilfs_btree_promote_key(struct nilfs_btree *btree,
+static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 key)
{
@@ -636,16 +726,18 @@ static void nilfs_btree_promote_key(struct nilfs_btree *btree,
}
}
-static void nilfs_btree_do_insert(struct nilfs_btree *btree,
+static void nilfs_btree_do_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
+ int ncblk;
if (level < nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_nonroot_node(path, level);
- nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
- path[level].bp_index);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
+ nilfs_btree_node_insert(node, path[level].bp_index,
+ *keyp, *ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -655,22 +747,24 @@ static void nilfs_btree_do_insert(struct nilfs_btree *btree,
0));
} else {
node = nilfs_btree_get_root(btree);
- nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
- path[level].bp_index);
+ nilfs_btree_node_insert(node, path[level].bp_index,
+ *keyp, *ptrp,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
}
}
-static void nilfs_btree_carry_left(struct nilfs_btree *btree,
+static void nilfs_btree_carry_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
- int nchildren, lnchildren, n, move;
+ int nchildren, lnchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
lnchildren = nilfs_btree_node_get_nchildren(left);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + lnchildren + 1) / 2 - lnchildren;
@@ -680,7 +774,7 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
move = 1;
}
- nilfs_btree_node_move_left(btree, left, node, n);
+ nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -705,17 +799,18 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
}
-static void nilfs_btree_carry_right(struct nilfs_btree *btree,
+static void nilfs_btree_carry_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
- int nchildren, rnchildren, n, move;
+ int nchildren, rnchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
rnchildren = nilfs_btree_node_get_nchildren(right);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + rnchildren + 1) / 2 - rnchildren;
@@ -725,7 +820,7 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
move = 1;
}
- nilfs_btree_node_move_right(btree, node, right, n);
+ nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -751,18 +846,19 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
nilfs_btree_do_insert(btree, path, level, keyp, ptrp);
}
-static void nilfs_btree_split(struct nilfs_btree *btree,
+static void nilfs_btree_split(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
__u64 newkey;
__u64 newptr;
- int nchildren, n, move;
+ int nchildren, n, move, ncblk;
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
move = 0;
n = (nchildren + 1) / 2;
@@ -771,7 +867,7 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
move = 1;
}
- nilfs_btree_node_move_right(btree, node, right, n);
+ nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -783,8 +879,8 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
if (move) {
path[level].bp_index -= nilfs_btree_node_get_nchildren(node);
- nilfs_btree_node_insert(btree, right, *keyp, *ptrp,
- path[level].bp_index);
+ nilfs_btree_node_insert(right, path[level].bp_index,
+ *keyp, *ptrp, ncblk);
*keyp = nilfs_btree_node_get_key(right, 0);
*ptrp = path[level].bp_newreq.bpr_ptr;
@@ -805,19 +901,21 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
path[level + 1].bp_index++;
}
-static void nilfs_btree_grow(struct nilfs_btree *btree,
+static void nilfs_btree_grow(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *root, *child;
- int n;
+ int n, ncblk;
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_sib_node(path, level);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(root);
- nilfs_btree_node_move_right(btree, root, child, n);
+ nilfs_btree_node_move_right(root, child, n,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
nilfs_btree_node_set_level(root, level + 1);
if (!buffer_dirty(path[level].bp_sib_bh))
@@ -832,11 +930,11 @@ static void nilfs_btree_grow(struct nilfs_btree *btree,
*ptrp = path[level].bp_newreq.bpr_ptr;
}
-static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree,
+static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path)
{
struct nilfs_btree_node *node;
- int level;
+ int level, ncmax;
if (path == NULL)
return NILFS_BMAP_INVALID_PTR;
@@ -844,29 +942,30 @@ static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree,
/* left sibling */
level = NILFS_BTREE_LEVEL_NODE_MIN;
if (path[level].bp_index > 0) {
- node = nilfs_btree_get_node(btree, path, level);
- return nilfs_btree_node_get_ptr(btree, node,
- path[level].bp_index - 1);
+ node = nilfs_btree_get_node(btree, path, level, &ncmax);
+ return nilfs_btree_node_get_ptr(node,
+ path[level].bp_index - 1,
+ ncmax);
}
/* parent */
level = NILFS_BTREE_LEVEL_NODE_MIN + 1;
if (level <= nilfs_btree_height(btree) - 1) {
- node = nilfs_btree_get_node(btree, path, level);
- return nilfs_btree_node_get_ptr(btree, node,
- path[level].bp_index);
+ node = nilfs_btree_get_node(btree, path, level, &ncmax);
+ return nilfs_btree_node_get_ptr(node, path[level].bp_index,
+ ncmax);
}
return NILFS_BMAP_INVALID_PTR;
}
-static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree,
+static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree,
const struct nilfs_btree_path *path,
__u64 key)
{
__u64 ptr;
- ptr = nilfs_bmap_find_target_seq(&btree->bt_bmap, key);
+ ptr = nilfs_bmap_find_target_seq(btree, key);
if (ptr != NILFS_BMAP_INVALID_PTR)
/* sequential access */
return ptr;
@@ -877,17 +976,10 @@ static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree,
return ptr;
}
/* block group */
- return nilfs_bmap_find_target_in_group(&btree->bt_bmap);
-}
-
-static void nilfs_btree_set_target_v(struct nilfs_btree *btree, __u64 key,
- __u64 ptr)
-{
- btree->bt_bmap.b_last_allocated_key = key;
- btree->bt_bmap.b_last_allocated_ptr = ptr;
+ return nilfs_bmap_find_target_in_group(btree);
}
-static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
+static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int *levelp, __u64 key, __u64 ptr,
struct nilfs_bmap_stats *stats)
@@ -895,79 +987,78 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
- int pindex, level, ret;
+ int pindex, level, ncmax, ncblk, ret;
struct inode *dat = NULL;
stats->bs_nblocks = 0;
level = NILFS_BTREE_LEVEL_DATA;
/* allocate a new ptr for data block */
- if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
+ if (NILFS_BMAP_USE_VBN(btree)) {
path[level].bp_newreq.bpr_ptr =
nilfs_btree_find_target_v(btree, path, key);
- dat = nilfs_bmap_get_dat(&btree->bt_bmap);
+ dat = nilfs_bmap_get_dat(btree);
}
- ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
- &path[level].bp_newreq, dat);
+ ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_data;
+ ncblk = nilfs_btree_nchildren_per_block(btree);
+
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
- if (nilfs_btree_node_get_nchildren(node) <
- nilfs_btree_node_nchildren_max(node, btree)) {
+ if (nilfs_btree_node_get_nchildren(node) < ncblk) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
}
- parent = nilfs_btree_get_node(btree, path, level + 1);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
/* left sibling */
if (pindex > 0) {
- sibptr = nilfs_btree_node_get_ptr(btree, parent,
- pindex - 1);
+ sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
+ ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
- if (nilfs_btree_node_get_nchildren(sib) <
- nilfs_btree_node_nchildren_max(sib, btree)) {
+ if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_left;
stats->bs_nblocks++;
goto out;
- } else
+ } else {
brelse(bh);
+ }
}
/* right sibling */
- if (pindex <
- nilfs_btree_node_get_nchildren(parent) - 1) {
- sibptr = nilfs_btree_node_get_ptr(btree, parent,
- pindex + 1);
+ if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) {
+ sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
+ ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data;
- if (nilfs_btree_node_get_nchildren(sib) <
- nilfs_btree_node_nchildren_max(sib, btree)) {
+ if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_carry_right;
stats->bs_nblocks++;
goto out;
- } else
+ } else {
brelse(bh);
+ }
}
/* split */
path[level].bp_newreq.bpr_ptr =
path[level - 1].bp_newreq.bpr_ptr + 1;
- ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
+ ret = nilfs_bmap_prepare_alloc_ptr(btree,
&path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_child_node;
@@ -979,9 +1070,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
stats->bs_nblocks++;
- nilfs_btree_node_init(btree,
- (struct nilfs_btree_node *)bh->b_data,
- 0, level, 0, NULL, NULL);
+ sib = (struct nilfs_btree_node *)bh->b_data;
+ nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_split;
}
@@ -989,7 +1079,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
/* root */
node = nilfs_btree_get_root(btree);
if (nilfs_btree_node_get_nchildren(node) <
- nilfs_btree_node_nchildren_max(node, btree)) {
+ NILFS_BTREE_ROOT_NCHILDREN_MAX) {
path[level].bp_op = nilfs_btree_do_insert;
stats->bs_nblocks++;
goto out;
@@ -997,8 +1087,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
/* grow */
path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1;
- ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap,
- &path[level].bp_newreq, dat);
+ ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat);
if (ret < 0)
goto err_out_child_node;
ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
@@ -1006,8 +1095,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
if (ret < 0)
goto err_out_curr_node;
- nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
- 0, level, 0, NULL, NULL);
+ nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data,
+ 0, level, 0, ncblk, NULL, NULL);
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_grow;
@@ -1024,25 +1113,22 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
/* error */
err_out_curr_node:
- nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
- dat);
+ nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
err_out_child_node:
for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) {
nilfs_btnode_delete(path[level].bp_sib_bh);
- nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap,
- &path[level].bp_newreq, dat);
+ nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
}
- nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq,
- dat);
+ nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat);
err_out_data:
*levelp = level;
stats->bs_nblocks = 0;
return ret;
}
-static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
+static void nilfs_btree_commit_insert(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int maxlevel, __u64 key, __u64 ptr)
{
@@ -1051,35 +1137,33 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree,
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr;
- if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) {
- nilfs_btree_set_target_v(btree, key, ptr);
- dat = nilfs_bmap_get_dat(&btree->bt_bmap);
+ if (NILFS_BMAP_USE_VBN(btree)) {
+ nilfs_bmap_set_target_v(btree, key, ptr);
+ dat = nilfs_bmap_get_dat(btree);
}
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
- nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap,
+ nilfs_bmap_commit_alloc_ptr(btree,
&path[level - 1].bp_newreq, dat);
path[level].bp_op(btree, path, level, &key, &ptr);
}
- if (!nilfs_bmap_dirty(&btree->bt_bmap))
- nilfs_bmap_set_dirty(&btree->bt_bmap);
+ if (!nilfs_bmap_dirty(btree))
+ nilfs_bmap_set_dirty(btree);
}
-static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
+static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
struct nilfs_bmap_stats stats;
int level, ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
- NILFS_BTREE_LEVEL_NODE_MIN);
+ NILFS_BTREE_LEVEL_NODE_MIN, 0);
if (ret != -ENOENT) {
if (ret == 0)
ret = -EEXIST;
@@ -1090,23 +1174,25 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
if (ret < 0)
goto out;
nilfs_btree_commit_insert(btree, path, level, key, ptr);
- nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
+ nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
return ret;
}
-static void nilfs_btree_do_delete(struct nilfs_btree *btree,
+static void nilfs_btree_do_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node;
+ int ncblk;
if (level < nilfs_btree_height(btree) - 1) {
node = nilfs_btree_get_nonroot_node(path, level);
- nilfs_btree_node_delete(btree, node, keyp, ptrp,
- path[level].bp_index);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
+ nilfs_btree_node_delete(node, path[level].bp_index,
+ keyp, ptrp, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
if (path[level].bp_index == 0)
@@ -1114,17 +1200,18 @@ static void nilfs_btree_do_delete(struct nilfs_btree *btree,
nilfs_btree_node_get_key(node, 0));
} else {
node = nilfs_btree_get_root(btree);
- nilfs_btree_node_delete(btree, node, keyp, ptrp,
- path[level].bp_index);
+ nilfs_btree_node_delete(node, path[level].bp_index,
+ keyp, ptrp,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
}
}
-static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
+static void nilfs_btree_borrow_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
- int nchildren, lnchildren, n;
+ int nchildren, lnchildren, n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
@@ -1132,10 +1219,11 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
left = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
lnchildren = nilfs_btree_node_get_nchildren(left);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
n = (nchildren + lnchildren) / 2 - nchildren;
- nilfs_btree_node_move_right(btree, left, node, n);
+ nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -1150,12 +1238,12 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
path[level].bp_index += n;
}
-static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
+static void nilfs_btree_borrow_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
- int nchildren, rnchildren, n;
+ int nchildren, rnchildren, n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
@@ -1163,10 +1251,11 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
right = nilfs_btree_get_sib_node(path, level);
nchildren = nilfs_btree_node_get_nchildren(node);
rnchildren = nilfs_btree_node_get_nchildren(right);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
n = (nchildren + rnchildren) / 2 - nchildren;
- nilfs_btree_node_move_left(btree, node, right, n);
+ nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -1182,21 +1271,22 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
path[level].bp_sib_bh = NULL;
}
-static void nilfs_btree_concat_left(struct nilfs_btree *btree,
+static void nilfs_btree_concat_left(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *left;
- int n;
+ int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
left = nilfs_btree_get_sib_node(path, level);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(node);
- nilfs_btree_node_move_left(btree, left, node, n);
+ nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_sib_bh))
nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
@@ -1207,21 +1297,22 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree,
path[level].bp_index += nilfs_btree_node_get_nchildren(left);
}
-static void nilfs_btree_concat_right(struct nilfs_btree *btree,
+static void nilfs_btree_concat_right(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *node, *right;
- int n;
+ int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
node = nilfs_btree_get_nonroot_node(path, level);
right = nilfs_btree_get_sib_node(path, level);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
n = nilfs_btree_node_get_nchildren(right);
- nilfs_btree_node_move_left(btree, node, right, n);
+ nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
if (!buffer_dirty(path[level].bp_bh))
nilfs_btnode_mark_dirty(path[level].bp_bh);
@@ -1231,29 +1322,32 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree,
path[level + 1].bp_index++;
}
-static void nilfs_btree_shrink(struct nilfs_btree *btree,
+static void nilfs_btree_shrink(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, __u64 *keyp, __u64 *ptrp)
{
struct nilfs_btree_node *root, *child;
- int n;
+ int n, ncblk;
nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
root = nilfs_btree_get_root(btree);
child = nilfs_btree_get_nonroot_node(path, level);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
- nilfs_btree_node_delete(btree, root, NULL, NULL, 0);
+ nilfs_btree_node_delete(root, 0, NULL, NULL,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
nilfs_btree_node_set_level(root, level);
n = nilfs_btree_node_get_nchildren(child);
- nilfs_btree_node_move_left(btree, root, child, n);
+ nilfs_btree_node_move_left(root, child, n,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk);
nilfs_btnode_delete(path[level].bp_bh);
path[level].bp_bh = NULL;
}
-static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
+static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int *levelp,
struct nilfs_bmap_stats *stats,
@@ -1262,42 +1356,43 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
struct buffer_head *bh;
struct nilfs_btree_node *node, *parent, *sib;
__u64 sibptr;
- int pindex, level, ret;
+ int pindex, level, ncmin, ncmax, ncblk, ret;
ret = 0;
stats->bs_nblocks = 0;
+ ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
+ ncblk = nilfs_btree_nchildren_per_block(btree);
+
for (level = NILFS_BTREE_LEVEL_NODE_MIN;
level < nilfs_btree_height(btree) - 1;
level++) {
node = nilfs_btree_get_nonroot_node(path, level);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(btree, node,
- path[level].bp_index);
- ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
+ nilfs_btree_node_get_ptr(node, path[level].bp_index,
+ ncblk);
+ ret = nilfs_bmap_prepare_end_ptr(btree,
&path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
- if (nilfs_btree_node_get_nchildren(node) >
- nilfs_btree_node_nchildren_min(node, btree)) {
+ if (nilfs_btree_node_get_nchildren(node) > ncmin) {
path[level].bp_op = nilfs_btree_do_delete;
stats->bs_nblocks++;
goto out;
}
- parent = nilfs_btree_get_node(btree, path, level + 1);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
pindex = path[level + 1].bp_index;
if (pindex > 0) {
/* left sibling */
- sibptr = nilfs_btree_node_get_ptr(btree, parent,
- pindex - 1);
+ sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1,
+ ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
- if (nilfs_btree_node_get_nchildren(sib) >
- nilfs_btree_node_nchildren_min(sib, btree)) {
+ if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_left;
stats->bs_nblocks++;
@@ -1311,14 +1406,13 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
} else if (pindex <
nilfs_btree_node_get_nchildren(parent) - 1) {
/* right sibling */
- sibptr = nilfs_btree_node_get_ptr(btree, parent,
- pindex + 1);
+ sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1,
+ ncmax);
ret = nilfs_btree_get_block(btree, sibptr, &bh);
if (ret < 0)
goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data;
- if (nilfs_btree_node_get_nchildren(sib) >
- nilfs_btree_node_nchildren_min(sib, btree)) {
+ if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
path[level].bp_sib_bh = bh;
path[level].bp_op = nilfs_btree_borrow_right;
stats->bs_nblocks++;
@@ -1349,10 +1443,10 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
node = nilfs_btree_get_root(btree);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(btree, node, path[level].bp_index);
+ nilfs_btree_node_get_ptr(node, path[level].bp_index,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
- ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap,
- &path[level].bp_oldreq, dat);
+ ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
if (ret < 0)
goto err_out_child_node;
@@ -1367,75 +1461,68 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
/* error */
err_out_curr_node:
- nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat);
+ nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
err_out_child_node:
for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) {
brelse(path[level].bp_sib_bh);
- nilfs_bmap_abort_end_ptr(&btree->bt_bmap,
- &path[level].bp_oldreq, dat);
+ nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat);
}
*levelp = level;
stats->bs_nblocks = 0;
return ret;
}
-static void nilfs_btree_commit_delete(struct nilfs_btree *btree,
+static void nilfs_btree_commit_delete(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int maxlevel, struct inode *dat)
{
int level;
for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) {
- nilfs_bmap_commit_end_ptr(&btree->bt_bmap,
- &path[level].bp_oldreq, dat);
+ nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat);
path[level].bp_op(btree, path, level, NULL, NULL);
}
- if (!nilfs_bmap_dirty(&btree->bt_bmap))
- nilfs_bmap_set_dirty(&btree->bt_bmap);
+ if (!nilfs_bmap_dirty(btree))
+ nilfs_bmap_set_dirty(btree);
}
-static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key)
+static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
struct nilfs_bmap_stats stats;
struct inode *dat;
int level, ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
ret = nilfs_btree_do_lookup(btree, path, key, NULL,
- NILFS_BTREE_LEVEL_NODE_MIN);
+ NILFS_BTREE_LEVEL_NODE_MIN, 0);
if (ret < 0)
goto out;
- dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ?
- nilfs_bmap_get_dat(&btree->bt_bmap) : NULL;
+ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat);
if (ret < 0)
goto out;
nilfs_btree_commit_delete(btree, path, level, dat);
- nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks);
+ nilfs_bmap_sub_blocks(btree, stats.bs_nblocks);
out:
nilfs_btree_free_path(path);
return ret;
}
-static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
+static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
int ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
@@ -1447,16 +1534,14 @@ static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
return ret;
}
-static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
+static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
{
struct buffer_head *bh;
- struct nilfs_btree *btree;
struct nilfs_btree_node *root, *node;
__u64 maxkey, nextmaxkey;
__u64 ptr;
int nchildren, ret;
- btree = (struct nilfs_btree *)bmap;
root = nilfs_btree_get_root(btree);
switch (nilfs_btree_height(btree)) {
case 2:
@@ -1467,7 +1552,8 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
nchildren = nilfs_btree_node_get_nchildren(root);
if (nchildren > 1)
return 0;
- ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
+ ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0)
return ret;
@@ -1487,32 +1573,33 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
}
-static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
+static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
__u64 *keys, __u64 *ptrs, int nitems)
{
struct buffer_head *bh;
- struct nilfs_btree *btree;
struct nilfs_btree_node *node, *root;
__le64 *dkeys;
__le64 *dptrs;
__u64 ptr;
- int nchildren, i, ret;
+ int nchildren, ncmax, i, ret;
- btree = (struct nilfs_btree *)bmap;
root = nilfs_btree_get_root(btree);
switch (nilfs_btree_height(btree)) {
case 2:
bh = NULL;
node = root;
+ ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX;
break;
case 3:
nchildren = nilfs_btree_node_get_nchildren(root);
WARN_ON(nchildren > 1);
- ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
+ ptr = nilfs_btree_node_get_ptr(root, nchildren - 1,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0)
return ret;
node = (struct nilfs_btree_node *)bh->b_data;
+ ncmax = nilfs_btree_nchildren_per_block(btree);
break;
default:
node = NULL;
@@ -1523,10 +1610,10 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
if (nchildren < nitems)
nitems = nchildren;
dkeys = nilfs_btree_node_dkeys(node);
- dptrs = nilfs_btree_node_dptrs(node, btree);
+ dptrs = nilfs_btree_node_dptrs(node, ncmax);
for (i = 0; i < nitems; i++) {
- keys[i] = nilfs_bmap_dkey_to_key(dkeys[i]);
- ptrs[i] = nilfs_bmap_dptr_to_ptr(dptrs[i]);
+ keys[i] = le64_to_cpu(dkeys[i]);
+ ptrs[i] = le64_to_cpu(dptrs[i]);
}
if (bh != NULL)
@@ -1536,14 +1623,13 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
}
static int
-nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
+nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
union nilfs_bmap_ptr_req *dreq,
union nilfs_bmap_ptr_req *nreq,
struct buffer_head **bhp,
struct nilfs_bmap_stats *stats)
{
struct buffer_head *bh;
- struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
struct inode *dat = NULL;
int ret;
@@ -1551,12 +1637,12 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
/* for data */
/* cannot find near ptr */
- if (NILFS_BMAP_USE_VBN(bmap)) {
+ if (NILFS_BMAP_USE_VBN(btree)) {
dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key);
- dat = nilfs_bmap_get_dat(bmap);
+ dat = nilfs_bmap_get_dat(btree);
}
- ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat);
+ ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0)
return ret;
@@ -1564,7 +1650,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
stats->bs_nblocks++;
if (nreq != NULL) {
nreq->bpr_ptr = dreq->bpr_ptr + 1;
- ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat);
+ ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat);
if (ret < 0)
goto err_out_dreq;
@@ -1581,16 +1667,16 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
/* error */
err_out_nreq:
- nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat);
+ nilfs_bmap_abort_alloc_ptr(btree, nreq, dat);
err_out_dreq:
- nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat);
+ nilfs_bmap_abort_alloc_ptr(btree, dreq, dat);
stats->bs_nblocks = 0;
return ret;
}
static void
-nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
+nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
__u64 key, __u64 ptr,
const __u64 *keys, const __u64 *ptrs,
int n,
@@ -1598,57 +1684,59 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
union nilfs_bmap_ptr_req *nreq,
struct buffer_head *bh)
{
- struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
struct nilfs_btree_node *node;
struct inode *dat;
__u64 tmpptr;
+ int ncblk;
/* free resources */
- if (bmap->b_ops->bop_clear != NULL)
- bmap->b_ops->bop_clear(bmap);
+ if (btree->b_ops->bop_clear != NULL)
+ btree->b_ops->bop_clear(btree);
/* ptr must be a pointer to a buffer head. */
set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr));
/* convert and insert */
- dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
- nilfs_btree_init(bmap);
+ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
+ nilfs_btree_init(btree);
if (nreq != NULL) {
- nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
- nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
+ nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
+ nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
/* create child node at level 1 */
node = (struct nilfs_btree_node *)bh->b_data;
- nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
- nilfs_btree_node_insert(btree, node,
- key, dreq->bpr_ptr, n);
+ ncblk = nilfs_btree_nchildren_per_block(btree);
+ nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
+ nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh);
- if (!nilfs_bmap_dirty(bmap))
- nilfs_bmap_set_dirty(bmap);
+ if (!nilfs_bmap_dirty(btree))
+ nilfs_bmap_set_dirty(btree);
brelse(bh);
/* create root node at level 2 */
node = nilfs_btree_get_root(btree);
tmpptr = nreq->bpr_ptr;
- nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
- 2, 1, &keys[0], &tmpptr);
+ nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX,
+ &keys[0], &tmpptr);
} else {
- nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat);
+ nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
/* create root node at level 1 */
node = nilfs_btree_get_root(btree);
- nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT,
- 1, n, keys, ptrs);
- nilfs_btree_node_insert(btree, node,
- key, dreq->bpr_ptr, n);
- if (!nilfs_bmap_dirty(bmap))
- nilfs_bmap_set_dirty(bmap);
+ nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX,
+ keys, ptrs);
+ nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr,
+ NILFS_BTREE_ROOT_NCHILDREN_MAX);
+ if (!nilfs_bmap_dirty(btree))
+ nilfs_bmap_set_dirty(btree);
}
- if (NILFS_BMAP_USE_VBN(bmap))
- nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr);
+ if (NILFS_BMAP_USE_VBN(btree))
+ nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr);
}
/**
@@ -1660,7 +1748,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
* @ptrs:
* @n:
*/
-int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap,
+int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
__u64 key, __u64 ptr,
const __u64 *keys, const __u64 *ptrs, int n)
{
@@ -1673,7 +1761,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap,
di = &dreq;
ni = NULL;
} else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
- 1 << bmap->b_inode->i_blkbits)) {
+ 1 << btree->b_inode->i_blkbits)) {
di = &dreq;
ni = &nreq;
} else {
@@ -1682,17 +1770,17 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap,
BUG();
}
- ret = nilfs_btree_prepare_convert_and_insert(bmap, key, di, ni, &bh,
+ ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh,
&stats);
if (ret < 0)
return ret;
- nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n,
+ nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n,
di, ni, bh);
- nilfs_bmap_add_blocks(bmap, stats.bs_nblocks);
+ nilfs_bmap_add_blocks(btree, stats.bs_nblocks);
return 0;
}
-static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
+static int nilfs_btree_propagate_p(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head *bh)
@@ -1704,17 +1792,17 @@ static int nilfs_btree_propagate_p(struct nilfs_btree *btree,
return 0;
}
-static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
+static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
struct nilfs_btree_node *parent;
- int ret;
+ int ncmax, ret;
- parent = nilfs_btree_get_node(btree, path, level + 1);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
path[level].bp_oldreq.bpr_ptr =
- nilfs_btree_node_get_ptr(btree, parent,
- path[level + 1].bp_index);
+ nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
+ ncmax);
path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1;
ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req);
@@ -1726,7 +1814,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key(
- &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
+ &NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
if (ret < 0) {
nilfs_dat_abort_update(dat,
@@ -1739,30 +1827,31 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree,
return 0;
}
-static void nilfs_btree_commit_update_v(struct nilfs_btree *btree,
+static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
struct nilfs_btree_node *parent;
+ int ncmax;
nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req,
&path[level].bp_newreq.bpr_req,
- btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS);
+ btree->b_ptr_type == NILFS_BMAP_PTR_VS);
if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key(
- &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
+ &NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh;
}
set_buffer_nilfs_volatile(path[level].bp_bh);
- parent = nilfs_btree_get_node(btree, path, level + 1);
- nilfs_btree_node_set_ptr(btree, parent, path[level + 1].bp_index,
- path[level].bp_newreq.bpr_ptr);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
+ nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index,
+ path[level].bp_newreq.bpr_ptr, ncmax);
}
-static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
+static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct inode *dat)
{
@@ -1770,11 +1859,11 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree,
&path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key(
- &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
+ &NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
}
-static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
+static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int minlevel, int *maxlevelp,
struct inode *dat)
@@ -1809,7 +1898,7 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
return ret;
}
-static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
+static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int minlevel, int maxlevel,
struct buffer_head *bh,
@@ -1824,14 +1913,15 @@ static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree,
nilfs_btree_commit_update_v(btree, path, level, dat);
}
-static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
+static int nilfs_btree_propagate_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level, struct buffer_head *bh)
{
int maxlevel = 0, ret;
struct nilfs_btree_node *parent;
- struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
+ struct inode *dat = nilfs_bmap_get_dat(btree);
__u64 ptr;
+ int ncmax;
get_bh(bh);
path[level].bp_bh = bh;
@@ -1841,9 +1931,10 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
goto out;
if (buffer_nilfs_volatile(path[level].bp_bh)) {
- parent = nilfs_btree_get_node(btree, path, level + 1);
- ptr = nilfs_btree_node_get_ptr(btree, parent,
- path[level + 1].bp_index);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
+ ptr = nilfs_btree_node_get_ptr(parent,
+ path[level + 1].bp_index,
+ ncmax);
ret = nilfs_dat_mark_dirty(dat, ptr);
if (ret < 0)
goto out;
@@ -1857,10 +1948,9 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree,
return ret;
}
-static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
+static int nilfs_btree_propagate(struct nilfs_bmap *btree,
struct buffer_head *bh)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
__u64 key;
@@ -1868,7 +1958,6 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
WARN_ON(!buffer_dirty(bh));
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
@@ -1878,11 +1967,11 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
} else {
- key = nilfs_bmap_data_get_key(bmap, bh);
+ key = nilfs_bmap_data_get_key(btree, bh);
level = NILFS_BTREE_LEVEL_DATA;
}
- ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
+ ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
if (unlikely(ret == -ENOENT))
printk(KERN_CRIT "%s: key = %llu, level == %d\n",
@@ -1890,7 +1979,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
goto out;
}
- ret = NILFS_BMAP_USE_VBN(bmap) ?
+ ret = NILFS_BMAP_USE_VBN(btree) ?
nilfs_btree_propagate_v(btree, path, level, bh) :
nilfs_btree_propagate_p(btree, path, level, bh);
@@ -1900,13 +1989,13 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
return ret;
}
-static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap,
+static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree,
struct buffer_head *bh)
{
- return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr);
+ return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr);
}
-static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
+static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
struct list_head *lists,
struct buffer_head *bh)
{
@@ -1920,6 +2009,18 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
node = (struct nilfs_btree_node *)bh->b_data;
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
+ if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
+ level >= NILFS_BTREE_LEVEL_MAX) {
+ dump_stack();
+ printk(KERN_WARNING
+ "%s: invalid btree level: %d (key=%llu, ino=%lu, "
+ "blocknr=%llu)\n",
+ __func__, level, (unsigned long long)key,
+ NILFS_BMAP_I(btree)->vfs_inode.i_ino,
+ (unsigned long long)bh->b_blocknr);
+ return;
+ }
+
list_for_each(head, &lists[level]) {
cbh = list_entry(head, struct buffer_head, b_assoc_buffers);
cnode = (struct nilfs_btree_node *)cbh->b_data;
@@ -1930,11 +2031,10 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree,
list_add_tail(&bh->b_assoc_buffers, head);
}
-static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
+static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp)
{
- struct nilfs_btree *btree = (struct nilfs_btree *)bmap;
- struct address_space *btcache = &NILFS_BMAP_I(bmap)->i_btnode_cache;
+ struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache;
struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec;
struct buffer_head *bh, *head;
@@ -1968,7 +2068,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
list_splice_tail(&lists[level], listp);
}
-static int nilfs_btree_assign_p(struct nilfs_btree *btree,
+static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head **bh,
@@ -1978,38 +2078,38 @@ static int nilfs_btree_assign_p(struct nilfs_btree *btree,
struct nilfs_btree_node *parent;
__u64 key;
__u64 ptr;
- int ret;
+ int ncmax, ret;
- parent = nilfs_btree_get_node(btree, path, level + 1);
- ptr = nilfs_btree_node_get_ptr(btree, parent,
- path[level + 1].bp_index);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
+ ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
+ ncmax);
if (buffer_nilfs_node(*bh)) {
path[level].bp_ctxt.oldkey = ptr;
path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key(
- &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
+ &NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
if (ret < 0)
return ret;
nilfs_btnode_commit_change_key(
- &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache,
+ &NILFS_BMAP_I(btree)->i_btnode_cache,
&path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh;
}
- nilfs_btree_node_set_ptr(btree, parent,
- path[level + 1].bp_index, blocknr);
+ nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr,
+ ncmax);
key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
/* on-disk format */
- binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key);
+ binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
binfo->bi_dat.bi_level = level;
return 0;
}
-static int nilfs_btree_assign_v(struct nilfs_btree *btree,
+static int nilfs_btree_assign_v(struct nilfs_bmap *btree,
struct nilfs_btree_path *path,
int level,
struct buffer_head **bh,
@@ -2017,15 +2117,15 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
union nilfs_binfo *binfo)
{
struct nilfs_btree_node *parent;
- struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap);
+ struct inode *dat = nilfs_bmap_get_dat(btree);
__u64 key;
__u64 ptr;
union nilfs_bmap_ptr_req req;
- int ret;
+ int ncmax, ret;
- parent = nilfs_btree_get_node(btree, path, level + 1);
- ptr = nilfs_btree_node_get_ptr(btree, parent,
- path[level + 1].bp_index);
+ parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
+ ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index,
+ ncmax);
req.bpr_ptr = ptr;
ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
if (ret < 0)
@@ -2034,24 +2134,22 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree,
key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index);
/* on-disk format */
- binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
- binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
+ binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
+ binfo->bi_v.bi_blkoff = cpu_to_le64(key);
return 0;
}
-static int nilfs_btree_assign(struct nilfs_bmap *bmap,
+static int nilfs_btree_assign(struct nilfs_bmap *btree,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
struct nilfs_btree_node *node;
__u64 key;
int level, ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
@@ -2061,17 +2159,17 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
key = nilfs_btree_node_get_key(node, 0);
level = nilfs_btree_node_get_level(node);
} else {
- key = nilfs_bmap_data_get_key(bmap, *bh);
+ key = nilfs_bmap_data_get_key(btree, *bh);
level = NILFS_BTREE_LEVEL_DATA;
}
- ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
+ ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
goto out;
}
- ret = NILFS_BMAP_USE_VBN(bmap) ?
+ ret = NILFS_BMAP_USE_VBN(btree) ?
nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) :
nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo);
@@ -2081,7 +2179,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
return ret;
}
-static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
+static int nilfs_btree_assign_gc(struct nilfs_bmap *btree,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
@@ -2090,7 +2188,7 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
__u64 key;
int ret;
- ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr,
+ ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr,
blocknr);
if (ret < 0)
return ret;
@@ -2099,29 +2197,27 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap,
node = (struct nilfs_btree_node *)(*bh)->b_data;
key = nilfs_btree_node_get_key(node, 0);
} else
- key = nilfs_bmap_data_get_key(bmap, *bh);
+ key = nilfs_bmap_data_get_key(btree, *bh);
/* on-disk format */
binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr);
- binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
+ binfo->bi_v.bi_blkoff = cpu_to_le64(key);
return 0;
}
-static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
+static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level)
{
struct buffer_head *bh;
- struct nilfs_btree *btree;
struct nilfs_btree_path *path;
__u64 ptr;
int ret;
- btree = (struct nilfs_btree *)bmap;
path = nilfs_btree_alloc_path();
if (path == NULL)
return -ENOMEM;
- ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
+ ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
goto out;
@@ -2135,8 +2231,8 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
if (!buffer_dirty(bh))
nilfs_btnode_mark_dirty(bh);
brelse(bh);
- if (!nilfs_bmap_dirty(&btree->bt_bmap))
- nilfs_bmap_set_dirty(&btree->bt_bmap);
+ if (!nilfs_bmap_dirty(btree))
+ nilfs_bmap_set_dirty(btree);
out:
nilfs_btree_free_path(path);
@@ -2186,10 +2282,14 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
int nilfs_btree_init(struct nilfs_bmap *bmap)
{
bmap->b_ops = &nilfs_btree_ops;
+ bmap->b_nchildren_per_block =
+ NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
return 0;
}
void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
{
bmap->b_ops = &nilfs_btree_ops_gc;
+ bmap->b_nchildren_per_block =
+ NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
}
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index af638d59e3b..22c02e35b6e 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -31,14 +31,6 @@
#include "bmap.h"
/**
- * struct nilfs_btree - B-tree structure
- * @bt_bmap: bmap base structure
- */
-struct nilfs_btree {
- struct nilfs_bmap bt_bmap;
-};
-
-/**
* struct nilfs_btree_path - A path on which B-tree operations are executed
* @bp_bh: buffer head of node block
* @bp_sib_bh: buffer head of sibling node block
@@ -54,7 +46,7 @@ struct nilfs_btree_path {
union nilfs_bmap_ptr_req bp_oldreq;
union nilfs_bmap_ptr_req bp_newreq;
struct nilfs_btnode_chkey_ctxt bp_ctxt;
- void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *,
+ void (*bp_op)(struct nilfs_bmap *, struct nilfs_btree_path *,
int, __u64 *, __u64 *);
};
@@ -75,11 +67,11 @@ struct nilfs_btree_path {
extern struct kmem_cache *nilfs_btree_path_cache;
-int nilfs_btree_path_cache_init(void);
-void nilfs_btree_path_cache_destroy(void);
int nilfs_btree_init(struct nilfs_bmap *);
int nilfs_btree_convert_and_insert(struct nilfs_bmap *, __u64, __u64,
const __u64 *, const __u64 *, int);
void nilfs_btree_init_gc(struct nilfs_bmap *);
+int nilfs_btree_broken_node_block(struct buffer_head *bh);
+
#endif /* _NILFS_BTREE_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 85c89dfc71f..cb003c8ee1f 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -80,23 +80,10 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
return last_byte;
}
-static int nilfs_prepare_chunk_uninterruptible(struct page *page,
- struct address_space *mapping,
- unsigned from, unsigned to)
+static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to)
{
loff_t pos = page_offset(page) + from;
- return block_write_begin(NULL, mapping, pos, to - from,
- AOP_FLAG_UNINTERRUPTIBLE, &page,
- NULL, nilfs_get_block);
-}
-
-static int nilfs_prepare_chunk(struct page *page,
- struct address_space *mapping,
- unsigned from, unsigned to)
-{
- loff_t pos = page_offset(page) + from;
- return block_write_begin(NULL, mapping, pos, to - from, 0, &page,
- NULL, nilfs_get_block);
+ return __block_write_begin(page, pos, to - from, nilfs_get_block);
}
static void nilfs_commit_chunk(struct page *page,
@@ -141,7 +128,7 @@ static void nilfs_check_page(struct page *page)
}
for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) {
p = (struct nilfs_dir_entry *)(kaddr + offs);
- rec_len = le16_to_cpu(p->rec_len);
+ rec_len = nilfs_rec_len_from_disk(p->rec_len);
if (rec_len < NILFS_DIR_REC_LEN(1))
goto Eshort;
@@ -199,13 +186,10 @@ fail:
static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
- struct page *page = read_cache_page(mapping, n,
- (filler_t *)mapping->a_ops->readpage, NULL);
+ struct page *page = read_mapping_page(mapping, n, NULL);
+
if (!IS_ERR(page)) {
- wait_on_page_locked(page);
kmap(page);
- if (!PageUptodate(page))
- goto fail;
if (!PageChecked(page))
nilfs_check_page(page);
if (PageError(page))
@@ -238,7 +222,8 @@ nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de)
*/
static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
{
- return (struct nilfs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len));
+ return (struct nilfs_dir_entry *)((char *)p +
+ nilfs_rec_len_from_disk(p->rec_len));
}
static unsigned char
@@ -329,7 +314,7 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
goto success;
}
}
- filp->f_pos += le16_to_cpu(de->rec_len);
+ filp->f_pos += nilfs_rec_len_from_disk(de->rec_len);
}
nilfs_put_page(page);
}
@@ -444,12 +429,12 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
struct page *page, struct inode *inode)
{
unsigned from = (char *) de - (char *) page_address(page);
- unsigned to = from + le16_to_cpu(de->rec_len);
+ unsigned to = from + nilfs_rec_len_from_disk(de->rec_len);
struct address_space *mapping = page->mapping;
int err;
lock_page(page);
- err = nilfs_prepare_chunk_uninterruptible(page, mapping, from, to);
+ err = nilfs_prepare_chunk(page, from, to);
BUG_ON(err);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
@@ -500,7 +485,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
/* We hit i_size */
name_len = 0;
rec_len = chunk_size;
- de->rec_len = cpu_to_le16(chunk_size);
+ de->rec_len = nilfs_rec_len_to_disk(chunk_size);
de->inode = 0;
goto got_it;
}
@@ -514,7 +499,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
if (nilfs_match(namelen, name, de))
goto out_unlock;
name_len = NILFS_DIR_REC_LEN(de->name_len);
- rec_len = le16_to_cpu(de->rec_len);
+ rec_len = nilfs_rec_len_from_disk(de->rec_len);
if (!de->inode && rec_len >= reclen)
goto got_it;
if (rec_len >= name_len + reclen)
@@ -530,15 +515,15 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
got_it:
from = (char *)de - (char *)page_address(page);
to = from + rec_len;
- err = nilfs_prepare_chunk(page, page->mapping, from, to);
+ err = nilfs_prepare_chunk(page, from, to);
if (err)
goto out_unlock;
if (de->inode) {
struct nilfs_dir_entry *de1;
de1 = (struct nilfs_dir_entry *)((char *)de + name_len);
- de1->rec_len = cpu_to_le16(rec_len - name_len);
- de->rec_len = cpu_to_le16(name_len);
+ de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len);
+ de->rec_len = nilfs_rec_len_to_disk(name_len);
de = de1;
}
de->name_len = namelen;
@@ -569,7 +554,8 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
struct inode *inode = mapping->host;
char *kaddr = page_address(page);
unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
- unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len);
+ unsigned to = ((char *)dir - kaddr) +
+ nilfs_rec_len_from_disk(dir->rec_len);
struct nilfs_dir_entry *pde = NULL;
struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from);
int err;
@@ -587,10 +573,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
if (pde)
from = (char *)pde - (char *)page_address(page);
lock_page(page);
- err = nilfs_prepare_chunk(page, mapping, from, to);
+ err = nilfs_prepare_chunk(page, from, to);
BUG_ON(err);
if (pde)
- pde->rec_len = cpu_to_le16(to - from);
+ pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -615,7 +601,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
if (!page)
return -ENOMEM;
- err = nilfs_prepare_chunk(page, mapping, 0, chunk_size);
+ err = nilfs_prepare_chunk(page, 0, chunk_size);
if (unlikely(err)) {
unlock_page(page);
goto fail;
@@ -624,14 +610,14 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
memset(kaddr, 0, chunk_size);
de = (struct nilfs_dir_entry *)kaddr;
de->name_len = 1;
- de->rec_len = cpu_to_le16(NILFS_DIR_REC_LEN(1));
+ de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1));
memcpy(de->name, ".\0\0", 4);
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
de->name_len = 2;
- de->rec_len = cpu_to_le16(chunk_size - NILFS_DIR_REC_LEN(1));
+ de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1));
de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode);
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 236753df5cd..324d80c5751 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -27,47 +27,43 @@
#include "alloc.h"
#include "dat.h"
-static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct)
+static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
{
return (__le64 *)
- ((struct nilfs_direct_node *)direct->d_bmap.b_u.u_data + 1);
+ ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
}
static inline __u64
-nilfs_direct_get_ptr(const struct nilfs_direct *direct, __u64 key)
+nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
{
- return nilfs_bmap_dptr_to_ptr(*(nilfs_direct_dptrs(direct) + key));
+ return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
}
-static inline void nilfs_direct_set_ptr(struct nilfs_direct *direct,
+static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
__u64 key, __u64 ptr)
{
- *(nilfs_direct_dptrs(direct) + key) = nilfs_bmap_ptr_to_dptr(ptr);
+ *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
}
-static int nilfs_direct_lookup(const struct nilfs_bmap *bmap,
+static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
__u64 key, int level, __u64 *ptrp)
{
- struct nilfs_direct *direct;
__u64 ptr;
- direct = (struct nilfs_direct *)bmap; /* XXX: use macro for level 1 */
if (key > NILFS_DIRECT_KEY_MAX || level != 1)
return -ENOENT;
ptr = nilfs_direct_get_ptr(direct, key);
if (ptr == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
- if (ptrp != NULL)
- *ptrp = ptr;
+ *ptrp = ptr;
return 0;
}
-static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap,
+static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
__u64 key, __u64 *ptrp,
unsigned maxblocks)
{
- struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
struct inode *dat = NULL;
__u64 ptr, ptr2;
sector_t blocknr;
@@ -79,8 +75,8 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap,
if (ptr == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
- if (NILFS_BMAP_USE_VBN(bmap)) {
- dat = nilfs_bmap_get_dat(bmap);
+ if (NILFS_BMAP_USE_VBN(direct)) {
+ dat = nilfs_bmap_get_dat(direct);
ret = nilfs_dat_translate(dat, ptr, &blocknr);
if (ret < 0)
return ret;
@@ -106,29 +102,21 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap,
}
static __u64
-nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key)
+nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
{
__u64 ptr;
- ptr = nilfs_bmap_find_target_seq(&direct->d_bmap, key);
+ ptr = nilfs_bmap_find_target_seq(direct, key);
if (ptr != NILFS_BMAP_INVALID_PTR)
/* sequential access */
return ptr;
else
/* block group */
- return nilfs_bmap_find_target_in_group(&direct->d_bmap);
-}
-
-static void nilfs_direct_set_target_v(struct nilfs_direct *direct,
- __u64 key, __u64 ptr)
-{
- direct->d_bmap.b_last_allocated_key = key;
- direct->d_bmap.b_last_allocated_ptr = ptr;
+ return nilfs_bmap_find_target_in_group(direct);
}
static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
{
- struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
union nilfs_bmap_ptr_req req;
struct inode *dat = NULL;
struct buffer_head *bh;
@@ -136,11 +124,11 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
if (key > NILFS_DIRECT_KEY_MAX)
return -ENOENT;
- if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR)
+ if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
return -EEXIST;
if (NILFS_BMAP_USE_VBN(bmap)) {
- req.bpr_ptr = nilfs_direct_find_target_v(direct, key);
+ req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
dat = nilfs_bmap_get_dat(bmap);
}
ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
@@ -150,13 +138,13 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
set_buffer_nilfs_volatile(bh);
nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
- nilfs_direct_set_ptr(direct, key, req.bpr_ptr);
+ nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
if (!nilfs_bmap_dirty(bmap))
nilfs_bmap_set_dirty(bmap);
if (NILFS_BMAP_USE_VBN(bmap))
- nilfs_direct_set_target_v(direct, key, req.bpr_ptr);
+ nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
nilfs_bmap_add_blocks(bmap, 1);
}
@@ -165,33 +153,30 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
{
- struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
union nilfs_bmap_ptr_req req;
struct inode *dat;
int ret;
if (key > NILFS_DIRECT_KEY_MAX ||
- nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR)
+ nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
- req.bpr_ptr = nilfs_direct_get_ptr(direct, key);
+ req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
if (!ret) {
nilfs_bmap_commit_end_ptr(bmap, &req, dat);
- nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR);
+ nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
nilfs_bmap_sub_blocks(bmap, 1);
}
return ret;
}
-static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
+static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
{
- struct nilfs_direct *direct;
__u64 key, lastkey;
- direct = (struct nilfs_direct *)bmap;
lastkey = NILFS_DIRECT_KEY_MAX + 1;
for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
if (nilfs_direct_get_ptr(direct, key) !=
@@ -211,15 +196,13 @@ static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
return key > NILFS_DIRECT_KEY_MAX;
}
-static int nilfs_direct_gather_data(struct nilfs_bmap *bmap,
+static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
__u64 *keys, __u64 *ptrs, int nitems)
{
- struct nilfs_direct *direct;
__u64 key;
__u64 ptr;
int n;
- direct = (struct nilfs_direct *)bmap;
if (nitems > NILFS_DIRECT_NBLOCKS)
nitems = NILFS_DIRECT_NBLOCKS;
n = 0;
@@ -237,7 +220,6 @@ static int nilfs_direct_gather_data(struct nilfs_bmap *bmap,
int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
__u64 key, __u64 *keys, __u64 *ptrs, int n)
{
- struct nilfs_direct *direct;
__le64 *dptrs;
int ret, i, j;
@@ -253,12 +235,11 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
bmap->b_ops->bop_clear(bmap);
/* convert */
- direct = (struct nilfs_direct *)bmap;
- dptrs = nilfs_direct_dptrs(direct);
+ dptrs = nilfs_direct_dptrs(bmap);
for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
if ((j < n) && (i == keys[j])) {
dptrs[i] = (i != key) ?
- nilfs_bmap_ptr_to_dptr(ptrs[j]) :
+ cpu_to_le64(ptrs[j]) :
NILFS_BMAP_INVALID_PTR;
j++;
} else
@@ -269,10 +250,9 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
return 0;
}
-static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
+static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
struct buffer_head *bh)
{
- struct nilfs_direct *direct = (struct nilfs_direct *)bmap;
struct nilfs_palloc_req oldreq, newreq;
struct inode *dat;
__u64 key;
@@ -284,7 +264,7 @@ static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
dat = nilfs_bmap_get_dat(bmap);
key = nilfs_bmap_data_get_key(bmap, bh);
- ptr = nilfs_direct_get_ptr(direct, key);
+ ptr = nilfs_direct_get_ptr(bmap, key);
if (!buffer_nilfs_volatile(bh)) {
oldreq.pr_entry_nr = ptr;
newreq.pr_entry_nr = ptr;
@@ -294,20 +274,20 @@ static int nilfs_direct_propagate(const struct nilfs_bmap *bmap,
nilfs_dat_commit_update(dat, &oldreq, &newreq,
bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
set_buffer_nilfs_volatile(bh);
- nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr);
+ nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
} else
ret = nilfs_dat_mark_dirty(dat, ptr);
return ret;
}
-static int nilfs_direct_assign_v(struct nilfs_direct *direct,
+static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
__u64 key, __u64 ptr,
struct buffer_head **bh,
sector_t blocknr,
union nilfs_binfo *binfo)
{
- struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap);
+ struct inode *dat = nilfs_bmap_get_dat(direct);
union nilfs_bmap_ptr_req req;
int ret;
@@ -315,13 +295,13 @@ static int nilfs_direct_assign_v(struct nilfs_direct *direct,
ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
if (!ret) {
nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
- binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr);
- binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key);
+ binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
+ binfo->bi_v.bi_blkoff = cpu_to_le64(key);
}
return ret;
}
-static int nilfs_direct_assign_p(struct nilfs_direct *direct,
+static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
__u64 key, __u64 ptr,
struct buffer_head **bh,
sector_t blocknr,
@@ -329,7 +309,7 @@ static int nilfs_direct_assign_p(struct nilfs_direct *direct,
{
nilfs_direct_set_ptr(direct, key, blocknr);
- binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key);
+ binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
binfo->bi_dat.bi_level = 0;
return 0;
@@ -340,18 +320,16 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
sector_t blocknr,
union nilfs_binfo *binfo)
{
- struct nilfs_direct *direct;
__u64 key;
__u64 ptr;
- direct = (struct nilfs_direct *)bmap;
key = nilfs_bmap_data_get_key(bmap, *bh);
if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
(unsigned long long)key);
return -EINVAL;
}
- ptr = nilfs_direct_get_ptr(direct, key);
+ ptr = nilfs_direct_get_ptr(bmap, key);
if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
(unsigned long long)ptr);
@@ -359,8 +337,8 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
}
return NILFS_BMAP_USE_VBN(bmap) ?
- nilfs_direct_assign_v(direct, key, ptr, bh, blocknr, binfo) :
- nilfs_direct_assign_p(direct, key, ptr, bh, blocknr, binfo);
+ nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
+ nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
}
static const struct nilfs_bmap_operations nilfs_direct_ops = {
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index a5ffd66e25d..dc643de20a2 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -28,8 +28,6 @@
#include "bmap.h"
-struct nilfs_direct;
-
/**
* struct nilfs_direct_node - direct node
* @dn_flags: flags
@@ -40,15 +38,6 @@ struct nilfs_direct_node {
__u8 pad[7];
};
-/**
- * struct nilfs_direct - direct mapping
- * @d_bmap: bmap structure
- */
-struct nilfs_direct {
- struct nilfs_bmap d_bmap;
-};
-
-
#define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1)
#define NILFS_DIRECT_KEY_MIN 0
#define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1)
diff --git a/fs/nilfs2/gcdat.c b/fs/nilfs2/gcdat.c
index dd5f7e0a95f..84a45d1d546 100644
--- a/fs/nilfs2/gcdat.c
+++ b/fs/nilfs2/gcdat.c
@@ -78,7 +78,7 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs)
struct inode *gcdat = nilfs->ns_gc_dat;
struct nilfs_inode_info *gii = NILFS_I(gcdat);
- gcdat->i_state = I_CLEAR;
+ gcdat->i_state = I_FREEING | I_CLEAR;
gii->i_flags = 0;
nilfs_palloc_clear_cache(gcdat);
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 145f03cd7d3..bed3a783129 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -48,6 +48,8 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include "nilfs.h"
+#include "btree.h"
+#include "btnode.h"
#include "page.h"
#include "mdt.h"
#include "dat.h"
@@ -149,8 +151,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh)
{
- int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
- vbn ? : pbn, pbn, out_bh);
+ int ret;
+
+ ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
+ vbn ? : pbn, pbn, READ, out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */
ret = 0;
return ret;
@@ -164,10 +168,15 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
if (buffer_dirty(bh))
return -EEXIST;
- if (buffer_nilfs_node(bh))
+ if (buffer_nilfs_node(bh)) {
+ if (nilfs_btree_broken_node_block(bh)) {
+ clear_buffer_uptodate(bh);
+ return -EIO;
+ }
nilfs_btnode_mark_dirty(bh);
- else
+ } else {
nilfs_mdt_mark_buffer_dirty(bh);
+ }
return 0;
}
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 39e038ac8fc..eccb2f2e231 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -27,6 +27,7 @@
#include <linux/writeback.h>
#include <linux/uio.h>
#include "nilfs.h"
+#include "btnode.h"
#include "segment.h"
#include "page.h"
#include "mdt.h"
@@ -197,11 +198,15 @@ static int nilfs_write_begin(struct file *file, struct address_space *mapping,
if (unlikely(err))
return err;
- *pagep = NULL;
- err = block_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata, nilfs_get_block);
- if (unlikely(err))
+ err = block_write_begin(mapping, pos, len, flags, pagep,
+ nilfs_get_block);
+ if (unlikely(err)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+
nilfs_transaction_abort(inode->i_sb);
+ }
return err;
}
@@ -237,6 +242,19 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
/* Needs synchronization with the cleaner */
size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, nilfs_get_block, NULL);
+
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && size < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+
return size;
}
@@ -337,7 +355,6 @@ void nilfs_free_inode(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct nilfs_sb_info *sbi = NILFS_SB(sb);
- clear_inode(inode);
/* XXX: check error code? Is there any thing I can do? */
(void) nilfs_ifile_delete_inode(sbi->s_ifile, inode->i_ino);
atomic_dec(&sbi->s_inodes_count);
@@ -597,16 +614,34 @@ void nilfs_truncate(struct inode *inode)
But truncate has no return value. */
}
-void nilfs_delete_inode(struct inode *inode)
+static void nilfs_clear_inode(struct inode *inode)
+{
+ struct nilfs_inode_info *ii = NILFS_I(inode);
+
+ /*
+ * Free resources allocated in nilfs_read_inode(), here.
+ */
+ BUG_ON(!list_empty(&ii->i_dirty));
+ brelse(ii->i_bh);
+ ii->i_bh = NULL;
+
+ if (test_bit(NILFS_I_BMAP, &ii->i_state))
+ nilfs_bmap_clear(ii->i_bmap);
+
+ nilfs_btnode_cache_clear(&ii->i_btnode_cache);
+}
+
+void nilfs_evict_inode(struct inode *inode)
{
struct nilfs_transaction_info ti;
struct super_block *sb = inode->i_sb;
struct nilfs_inode_info *ii = NILFS_I(inode);
- if (unlikely(is_bad_inode(inode))) {
+ if (inode->i_nlink || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ end_writeback(inode);
+ nilfs_clear_inode(inode);
return;
}
nilfs_transaction_begin(sb, &ti, 0); /* never fails */
@@ -616,6 +651,8 @@ void nilfs_delete_inode(struct inode *inode)
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
+ end_writeback(inode);
+ nilfs_clear_inode(inode);
nilfs_free_inode(inode);
/* nilfs_free_inode() marks inode buffer dirty */
if (IS_SYNC(inode))
@@ -639,14 +676,27 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
err = nilfs_transaction_begin(sb, &ti, 0);
if (unlikely(err))
return err;
- err = inode_setattr(inode, iattr);
- if (!err && (iattr->ia_valid & ATTR_MODE))
+
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(inode)) {
+ err = vmtruncate(inode, iattr->ia_size);
+ if (unlikely(err))
+ goto out_err;
+ }
+
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
+
+ if (iattr->ia_valid & ATTR_MODE) {
err = nilfs_acl_chmod(inode);
- if (likely(!err))
- err = nilfs_transaction_commit(sb);
- else
- nilfs_transaction_abort(sb);
+ if (unlikely(err))
+ goto out_err;
+ }
+
+ return nilfs_transaction_commit(sb);
+out_err:
+ nilfs_transaction_abort(sb);
return err;
}
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 024be8c35bb..d01aff4957d 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -28,6 +28,7 @@
#include <linux/swap.h>
#include <linux/slab.h>
#include "nilfs.h"
+#include "btnode.h"
#include "segment.h"
#include "page.h"
#include "mdt.h"
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 47d6d792812..d3d54046e5f 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -32,7 +32,6 @@
#include "the_nilfs.h"
#include "sb.h"
#include "bmap.h"
-#include "bmap_union.h"
/*
* nilfs inode data in memory
@@ -41,7 +40,7 @@ struct nilfs_inode_info {
__u32 i_flags;
unsigned long i_state; /* Dynamic state flags */
struct nilfs_bmap *i_bmap;
- union nilfs_bmap_union i_bmap_union;
+ struct nilfs_bmap i_bmap_data;
__u64 i_xattr; /* sector_t ??? */
__u32 i_dir_start_lookup;
__u64 i_cno; /* check point number for GC inode */
@@ -71,9 +70,7 @@ static inline struct nilfs_inode_info *NILFS_I(const struct inode *inode)
static inline struct nilfs_inode_info *
NILFS_BMAP_I(const struct nilfs_bmap *bmap)
{
- return container_of((union nilfs_bmap_union *)bmap,
- struct nilfs_inode_info,
- i_bmap_union);
+ return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
}
static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
@@ -107,6 +104,14 @@ enum {
};
/*
+ * commit flags for nilfs_commit_super and nilfs_sync_super
+ */
+enum {
+ NILFS_SB_COMMIT = 0, /* Commit a super block alternately */
+ NILFS_SB_COMMIT_ALL /* Commit both super blocks */
+};
+
+/*
* Macros to check inode numbers
*/
#define NILFS_MDT_INO_BITS \
@@ -245,7 +250,7 @@ extern void nilfs_write_inode_common(struct inode *, struct nilfs_inode *, int);
extern struct inode *nilfs_iget(struct super_block *, unsigned long);
extern void nilfs_update_inode(struct inode *, struct buffer_head *);
extern void nilfs_truncate(struct inode *);
-extern void nilfs_delete_inode(struct inode *);
+extern void nilfs_evict_inode(struct inode *);
extern int nilfs_setattr(struct dentry *, struct iattr *);
extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *,
struct buffer_head **);
@@ -270,7 +275,14 @@ extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
struct nilfs_super_block *, char *);
+extern int nilfs_check_feature_compatibility(struct super_block *,
+ struct nilfs_super_block *);
+extern void nilfs_set_log_cursor(struct nilfs_super_block *,
+ struct the_nilfs *);
+extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *,
+ int flip);
extern int nilfs_commit_super(struct nilfs_sb_info *, int);
+extern int nilfs_cleanup_super(struct nilfs_sb_info *);
extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64);
extern void nilfs_detach_checkpoint(struct nilfs_sb_info *);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 8de3e1e4813..aab11db2cb0 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -37,7 +37,8 @@
#define NILFS_BUFFER_INHERENT_BITS \
((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
- (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated))
+ (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \
+ (1UL << BH_NILFS_Checked))
static struct buffer_head *
__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
@@ -129,6 +130,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
lock_buffer(bh);
clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
clear_buffer_dirty(bh);
if (nilfs_page_buffers_clean(page))
__nilfs_clear_page_dirty(page);
@@ -480,6 +482,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
lock_buffer(bh);
clear_buffer_dirty(bh);
clear_buffer_nilfs_volatile(bh);
+ clear_buffer_nilfs_checked(bh);
clear_buffer_uptodate(bh);
clear_buffer_mapped(bh);
unlock_buffer(bh);
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 8abca4d1c1f..f53d8da41ed 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -34,11 +34,13 @@ enum {
BH_NILFS_Allocated = BH_PrivateStart,
BH_NILFS_Node,
BH_NILFS_Volatile,
+ BH_NILFS_Checked,
};
BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */
BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */
BUFFER_FNS(NILFS_Volatile, nilfs_volatile)
+BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
void nilfs_mark_buffer_dirty(struct buffer_head *bh);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index bae2a516b4e..d0c35ef39f6 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -91,27 +91,9 @@ static int nilfs_warn_segment_error(int err)
return -EINVAL;
}
-static void store_segsum_info(struct nilfs_segsum_info *ssi,
- struct nilfs_segment_summary *sum,
- unsigned int blocksize)
-{
- ssi->flags = le16_to_cpu(sum->ss_flags);
- ssi->seg_seq = le64_to_cpu(sum->ss_seq);
- ssi->ctime = le64_to_cpu(sum->ss_create);
- ssi->next = le64_to_cpu(sum->ss_next);
- ssi->nblocks = le32_to_cpu(sum->ss_nblocks);
- ssi->nfinfo = le32_to_cpu(sum->ss_nfinfo);
- ssi->sumbytes = le32_to_cpu(sum->ss_sumbytes);
-
- ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize);
- ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi);
-
- /* need to verify ->ss_bytes field if read ->ss_cno */
-}
-
/**
- * calc_crc_cont - check CRC of blocks continuously
- * @sbi: nilfs_sb_info
+ * nilfs_compute_checksum - compute checksum of blocks continuously
+ * @nilfs: nilfs object
* @bhs: buffer head of start block
* @sum: place to store result
* @offset: offset bytes in the first block
@@ -119,23 +101,25 @@ static void store_segsum_info(struct nilfs_segsum_info *ssi,
* @start: DBN of start block
* @nblock: number of blocks to be checked
*/
-static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs,
- u32 *sum, unsigned long offset, u64 check_bytes,
- sector_t start, unsigned long nblock)
+static int nilfs_compute_checksum(struct the_nilfs *nilfs,
+ struct buffer_head *bhs, u32 *sum,
+ unsigned long offset, u64 check_bytes,
+ sector_t start, unsigned long nblock)
{
- unsigned long blocksize = sbi->s_super->s_blocksize;
+ unsigned int blocksize = nilfs->ns_blocksize;
unsigned long size;
u32 crc;
BUG_ON(offset >= blocksize);
check_bytes -= offset;
size = min_t(u64, check_bytes, blocksize - offset);
- crc = crc32_le(sbi->s_nilfs->ns_crc_seed,
+ crc = crc32_le(nilfs->ns_crc_seed,
(unsigned char *)bhs->b_data + offset, size);
if (--nblock > 0) {
do {
- struct buffer_head *bh
- = sb_bread(sbi->s_super, ++start);
+ struct buffer_head *bh;
+
+ bh = __bread(nilfs->ns_bdev, ++start, blocksize);
if (!bh)
return -EIO;
check_bytes -= size;
@@ -150,12 +134,12 @@ static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs,
/**
* nilfs_read_super_root_block - read super root block
- * @sb: super_block
+ * @nilfs: nilfs object
* @sr_block: disk block number of the super root block
* @pbh: address of a buffer_head pointer to return super root buffer
* @check: CRC check flag
*/
-int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
+int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
struct buffer_head **pbh, int check)
{
struct buffer_head *bh_sr;
@@ -164,7 +148,7 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
int ret;
*pbh = NULL;
- bh_sr = sb_bread(sb, sr_block);
+ bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
if (unlikely(!bh_sr)) {
ret = NILFS_SEG_FAIL_IO;
goto failed;
@@ -174,12 +158,13 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
if (check) {
unsigned bytes = le16_to_cpu(sr->sr_bytes);
- if (bytes == 0 || bytes > sb->s_blocksize) {
+ if (bytes == 0 || bytes > nilfs->ns_blocksize) {
ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
goto failed_bh;
}
- if (calc_crc_cont(NILFS_SB(sb), bh_sr, &crc,
- sizeof(sr->sr_sum), bytes, sr_block, 1)) {
+ if (nilfs_compute_checksum(
+ nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
+ sr_block, 1)) {
ret = NILFS_SEG_FAIL_IO;
goto failed_bh;
}
@@ -199,64 +184,76 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block,
}
/**
- * load_segment_summary - read segment summary of the specified partial segment
- * @sbi: nilfs_sb_info
- * @pseg_start: start disk block number of partial segment
- * @seg_seq: sequence number requested
- * @ssi: pointer to nilfs_segsum_info struct to store information
+ * nilfs_read_log_header - read summary header of the specified log
+ * @nilfs: nilfs object
+ * @start_blocknr: start block number of the log
+ * @sum: pointer to return segment summary structure
*/
-static int
-load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start,
- u64 seg_seq, struct nilfs_segsum_info *ssi)
+static struct buffer_head *
+nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
+ struct nilfs_segment_summary **sum)
{
struct buffer_head *bh_sum;
- struct nilfs_segment_summary *sum;
+
+ bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
+ if (bh_sum)
+ *sum = (struct nilfs_segment_summary *)bh_sum->b_data;
+ return bh_sum;
+}
+
+/**
+ * nilfs_validate_log - verify consistency of log
+ * @nilfs: nilfs object
+ * @seg_seq: sequence number of segment
+ * @bh_sum: buffer head of summary block
+ * @sum: segment summary struct
+ */
+static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
+ struct buffer_head *bh_sum,
+ struct nilfs_segment_summary *sum)
+{
unsigned long nblock;
u32 crc;
- int ret = NILFS_SEG_FAIL_IO;
+ int ret;
- bh_sum = sb_bread(sbi->s_super, pseg_start);
- if (!bh_sum)
+ ret = NILFS_SEG_FAIL_MAGIC;
+ if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
goto out;
- sum = (struct nilfs_segment_summary *)bh_sum->b_data;
-
- /* Check consistency of segment summary */
- if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) {
- ret = NILFS_SEG_FAIL_MAGIC;
- goto failed;
- }
- store_segsum_info(ssi, sum, sbi->s_super->s_blocksize);
- if (seg_seq != ssi->seg_seq) {
- ret = NILFS_SEG_FAIL_SEQ;
- goto failed;
- }
+ ret = NILFS_SEG_FAIL_SEQ;
+ if (le64_to_cpu(sum->ss_seq) != seg_seq)
+ goto out;
- nblock = ssi->nblocks;
- if (unlikely(nblock == 0 ||
- nblock > sbi->s_nilfs->ns_blocks_per_segment)) {
+ nblock = le32_to_cpu(sum->ss_nblocks);
+ ret = NILFS_SEG_FAIL_CONSISTENCY;
+ if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
/* This limits the number of blocks read in the CRC check */
- ret = NILFS_SEG_FAIL_CONSISTENCY;
- goto failed;
- }
- if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum),
- ((u64)nblock << sbi->s_super->s_blocksize_bits),
- pseg_start, nblock)) {
- ret = NILFS_SEG_FAIL_IO;
- goto failed;
- }
- if (crc == le32_to_cpu(sum->ss_datasum))
- ret = 0;
- else
- ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
- failed:
- brelse(bh_sum);
- out:
+ goto out;
+
+ ret = NILFS_SEG_FAIL_IO;
+ if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
+ ((u64)nblock << nilfs->ns_blocksize_bits),
+ bh_sum->b_blocknr, nblock))
+ goto out;
+
+ ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
+ if (crc != le32_to_cpu(sum->ss_datasum))
+ goto out;
+ ret = 0;
+out:
return ret;
}
-static void *segsum_get(struct super_block *sb, struct buffer_head **pbh,
- unsigned int *offset, unsigned int bytes)
+/**
+ * nilfs_read_summary_info - read an item on summary blocks of a log
+ * @nilfs: nilfs object
+ * @pbh: the current buffer head on summary blocks [in, out]
+ * @offset: the current byte offset on summary blocks [in, out]
+ * @bytes: byte size of the item to be read
+ */
+static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
+ struct buffer_head **pbh,
+ unsigned int *offset, unsigned int bytes)
{
void *ptr;
sector_t blocknr;
@@ -265,7 +262,8 @@ static void *segsum_get(struct super_block *sb, struct buffer_head **pbh,
if (bytes > (*pbh)->b_size - *offset) {
blocknr = (*pbh)->b_blocknr;
brelse(*pbh);
- *pbh = sb_bread(sb, blocknr + 1);
+ *pbh = __bread(nilfs->ns_bdev, blocknr + 1,
+ nilfs->ns_blocksize);
if (unlikely(!*pbh))
return NULL;
*offset = 0;
@@ -275,9 +273,18 @@ static void *segsum_get(struct super_block *sb, struct buffer_head **pbh,
return ptr;
}
-static void segsum_skip(struct super_block *sb, struct buffer_head **pbh,
- unsigned int *offset, unsigned int bytes,
- unsigned long count)
+/**
+ * nilfs_skip_summary_info - skip items on summary blocks of a log
+ * @nilfs: nilfs object
+ * @pbh: the current buffer head on summary blocks [in, out]
+ * @offset: the current byte offset on summary blocks [in, out]
+ * @bytes: byte size of the item to be skipped
+ * @count: number of items to be skipped
+ */
+static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
+ struct buffer_head **pbh,
+ unsigned int *offset, unsigned int bytes,
+ unsigned long count)
{
unsigned int rest_item_in_current_block
= ((*pbh)->b_size - *offset) / bytes;
@@ -294,36 +301,46 @@ static void segsum_skip(struct super_block *sb, struct buffer_head **pbh,
*offset = bytes * (count - (bcnt - 1) * nitem_per_block);
brelse(*pbh);
- *pbh = sb_bread(sb, blocknr + bcnt);
+ *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
+ nilfs->ns_blocksize);
}
}
-static int
-collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr,
- struct nilfs_segsum_info *ssi,
- struct list_head *head)
+/**
+ * nilfs_scan_dsync_log - get block information of a log written for data sync
+ * @nilfs: nilfs object
+ * @start_blocknr: start block number of the log
+ * @sum: log summary information
+ * @head: list head to add nilfs_recovery_block struct
+ */
+static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
+ struct nilfs_segment_summary *sum,
+ struct list_head *head)
{
struct buffer_head *bh;
unsigned int offset;
- unsigned long nfinfo = ssi->nfinfo;
- sector_t blocknr = sum_blocknr + ssi->nsumblk;
+ u32 nfinfo, sumbytes;
+ sector_t blocknr;
ino_t ino;
int err = -EIO;
+ nfinfo = le32_to_cpu(sum->ss_nfinfo);
if (!nfinfo)
return 0;
- bh = sb_bread(sbi->s_super, sum_blocknr);
+ sumbytes = le32_to_cpu(sum->ss_sumbytes);
+ blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
+ bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
if (unlikely(!bh))
goto out;
- offset = le16_to_cpu(
- ((struct nilfs_segment_summary *)bh->b_data)->ss_bytes);
+ offset = le16_to_cpu(sum->ss_bytes);
for (;;) {
unsigned long nblocks, ndatablk, nnodeblk;
struct nilfs_finfo *finfo;
- finfo = segsum_get(sbi->s_super, &bh, &offset, sizeof(*finfo));
+ finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
+ sizeof(*finfo));
if (unlikely(!finfo))
goto out;
@@ -336,8 +353,8 @@ collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr,
struct nilfs_recovery_block *rb;
struct nilfs_binfo_v *binfo;
- binfo = segsum_get(sbi->s_super, &bh, &offset,
- sizeof(*binfo));
+ binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
+ sizeof(*binfo));
if (unlikely(!binfo))
goto out;
@@ -355,9 +372,9 @@ collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr,
}
if (--nfinfo == 0)
break;
- blocknr += nnodeblk; /* always 0 for the data sync segments */
- segsum_skip(sbi->s_super, &bh, &offset, sizeof(__le64),
- nnodeblk);
+ blocknr += nnodeblk; /* always 0 for data sync logs */
+ nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
+ nnodeblk);
if (unlikely(!bh))
goto out;
}
@@ -467,14 +484,14 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
return err;
}
-static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi,
+static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
struct nilfs_recovery_block *rb,
struct page *page)
{
struct buffer_head *bh_org;
void *kaddr;
- bh_org = sb_bread(sbi->s_super, rb->blocknr);
+ bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
if (unlikely(!bh_org))
return -EIO;
@@ -485,13 +502,14 @@ static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi,
return 0;
}
-static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
- struct list_head *head,
- unsigned long *nr_salvaged_blocks)
+static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
+ struct nilfs_sb_info *sbi,
+ struct list_head *head,
+ unsigned long *nr_salvaged_blocks)
{
struct inode *inode;
struct nilfs_recovery_block *rb, *n;
- unsigned blocksize = sbi->s_super->s_blocksize;
+ unsigned blocksize = nilfs->ns_blocksize;
struct page *page;
loff_t pos;
int err = 0, err2 = 0;
@@ -505,13 +523,16 @@ static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
}
pos = rb->blkoff << inode->i_blkbits;
- page = NULL;
- err = block_write_begin(NULL, inode->i_mapping, pos, blocksize,
- 0, &page, NULL, nilfs_get_block);
- if (unlikely(err))
+ err = block_write_begin(inode->i_mapping, pos, blocksize,
+ 0, &page, nilfs_get_block);
+ if (unlikely(err)) {
+ loff_t isize = inode->i_size;
+ if (pos + blocksize > isize)
+ vmtruncate(inode, isize);
goto failed_inode;
+ }
- err = nilfs_recovery_copy_block(sbi, rb, page);
+ err = nilfs_recovery_copy_block(nilfs, rb, page);
if (unlikely(err))
goto failed_page;
@@ -551,18 +572,20 @@ static int recover_dsync_blocks(struct nilfs_sb_info *sbi,
/**
* nilfs_do_roll_forward - salvage logical segments newer than the latest
* checkpoint
+ * @nilfs: nilfs object
* @sbi: nilfs_sb_info
- * @nilfs: the_nilfs
* @ri: pointer to a nilfs_recovery_info
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
struct nilfs_sb_info *sbi,
struct nilfs_recovery_info *ri)
{
- struct nilfs_segsum_info ssi;
+ struct buffer_head *bh_sum = NULL;
+ struct nilfs_segment_summary *sum;
sector_t pseg_start;
sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */
unsigned long nsalvaged_blocks = 0;
+ unsigned int flags;
u64 seg_seq;
__u64 segnum, nextnum = 0;
int empty_seg = 0;
@@ -581,8 +604,14 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
+ brelse(bh_sum);
+ bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
+ if (!bh_sum) {
+ err = -EIO;
+ goto failed;
+ }
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
+ ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO) {
err = -EIO;
@@ -590,33 +619,38 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
}
goto strayed;
}
- if (unlikely(NILFS_SEG_HAS_SR(&ssi)))
+
+ flags = le16_to_cpu(sum->ss_flags);
+ if (flags & NILFS_SS_SR)
goto confused;
/* Found a valid partial segment; do recovery actions */
- nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
+ nextnum = nilfs_get_segnum_of_block(nilfs,
+ le64_to_cpu(sum->ss_next));
empty_seg = 0;
- nilfs->ns_ctime = ssi.ctime;
- if (!(ssi.flags & NILFS_SS_GC))
- nilfs->ns_nongc_ctime = ssi.ctime;
+ nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
+ if (!(flags & NILFS_SS_GC))
+ nilfs->ns_nongc_ctime = nilfs->ns_ctime;
switch (state) {
case RF_INIT_ST:
- if (!NILFS_SEG_LOGBGN(&ssi) || !NILFS_SEG_DSYNC(&ssi))
+ if (!(flags & NILFS_SS_LOGBGN) ||
+ !(flags & NILFS_SS_SYNDT))
goto try_next_pseg;
state = RF_DSYNC_ST;
/* Fall through */
case RF_DSYNC_ST:
- if (!NILFS_SEG_DSYNC(&ssi))
+ if (!(flags & NILFS_SS_SYNDT))
goto confused;
- err = collect_blocks_from_segsum(
- sbi, pseg_start, &ssi, &dsync_blocks);
+ err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
+ &dsync_blocks);
if (unlikely(err))
goto failed;
- if (NILFS_SEG_LOGEND(&ssi)) {
- err = recover_dsync_blocks(
- sbi, &dsync_blocks, &nsalvaged_blocks);
+ if (flags & NILFS_SS_LOGEND) {
+ err = nilfs_recover_dsync_blocks(
+ nilfs, sbi, &dsync_blocks,
+ &nsalvaged_blocks);
if (unlikely(err))
goto failed;
state = RF_INIT_ST;
@@ -627,7 +661,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
try_next_pseg:
if (pseg_start == ri->ri_lsegs_end)
break;
- pseg_start += ssi.nblocks;
+ pseg_start += le32_to_cpu(sum->ss_nblocks);
if (pseg_start < seg_end)
continue;
goto feed_segment;
@@ -652,8 +686,9 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
}
out:
+ brelse(bh_sum);
dispose_recovery_list(&dsync_blocks);
- nilfs_detach_writer(sbi->s_nilfs, sbi);
+ nilfs_detach_writer(nilfs, sbi);
return err;
confused:
@@ -667,7 +702,6 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
}
static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
struct nilfs_recovery_info *ri)
{
struct buffer_head *bh;
@@ -677,7 +711,7 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
return;
- bh = sb_getblk(sbi->s_super, ri->ri_lsegs_start);
+ bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
BUG_ON(!bh);
memset(bh->b_data, 0, bh->b_size);
set_buffer_dirty(bh);
@@ -690,9 +724,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
}
/**
- * nilfs_recover_logical_segments - salvage logical segments written after
- * the latest super root
- * @nilfs: the_nilfs
+ * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
+ * @nilfs: nilfs object
* @sbi: nilfs_sb_info
* @ri: pointer to a nilfs_recovery_info struct to store search results.
*
@@ -709,9 +742,9 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
*
* %-ENOMEM - Insufficient memory available.
*/
-int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi,
- struct nilfs_recovery_info *ri)
+int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
+ struct nilfs_sb_info *sbi,
+ struct nilfs_recovery_info *ri)
{
int err;
@@ -751,7 +784,7 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
goto failed;
}
- nilfs_finish_roll_forward(nilfs, sbi, ri);
+ nilfs_finish_roll_forward(nilfs, ri);
}
failed:
@@ -762,7 +795,6 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
/**
* nilfs_search_super_root - search the latest valid super root
* @nilfs: the_nilfs
- * @sbi: nilfs_sb_info
* @ri: pointer to a nilfs_recovery_info struct to store search results.
*
* nilfs_search_super_root() looks for the latest super-root from a partial
@@ -775,14 +807,19 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
* %-EINVAL - No valid segment found
*
* %-EIO - I/O error
+ *
+ * %-ENOMEM - Insufficient memory available.
*/
-int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
+int nilfs_search_super_root(struct the_nilfs *nilfs,
struct nilfs_recovery_info *ri)
{
- struct nilfs_segsum_info ssi;
+ struct buffer_head *bh_sum = NULL;
+ struct nilfs_segment_summary *sum;
sector_t pseg_start, pseg_end, sr_pseg_start = 0;
sector_t seg_start, seg_end; /* range of full segment (block number) */
sector_t b, end;
+ unsigned long nblocks;
+ unsigned int flags;
u64 seg_seq;
__u64 segnum, nextnum = 0;
__u64 cno;
@@ -801,17 +838,24 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
/* Read ahead segment */
b = seg_start;
while (b <= seg_end)
- sb_breadahead(sbi->s_super, b++);
+ __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
for (;;) {
- /* Load segment summary */
- ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi);
+ brelse(bh_sum);
+ ret = NILFS_SEG_FAIL_IO;
+ bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
+ if (!bh_sum)
+ goto failed;
+
+ ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
if (ret) {
if (ret == NILFS_SEG_FAIL_IO)
goto failed;
goto strayed;
}
- pseg_end = pseg_start + ssi.nblocks - 1;
+
+ nblocks = le32_to_cpu(sum->ss_nblocks);
+ pseg_end = pseg_start + nblocks - 1;
if (unlikely(pseg_end > seg_end)) {
ret = NILFS_SEG_FAIL_CONSISTENCY;
goto strayed;
@@ -821,11 +865,13 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
ri->ri_pseg_start = pseg_start;
ri->ri_seq = seg_seq;
ri->ri_segnum = segnum;
- nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next);
+ nextnum = nilfs_get_segnum_of_block(nilfs,
+ le64_to_cpu(sum->ss_next));
ri->ri_nextnum = nextnum;
empty_seg = 0;
- if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
+ flags = le16_to_cpu(sum->ss_flags);
+ if (!(flags & NILFS_SS_SR) && !scan_newer) {
/* This will never happen because a superblock
(last_segment) always points to a pseg
having a super root. */
@@ -836,14 +882,15 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
if (pseg_start == seg_start) {
nilfs_get_segment_range(nilfs, nextnum, &b, &end);
while (b <= end)
- sb_breadahead(sbi->s_super, b++);
+ __breadahead(nilfs->ns_bdev, b++,
+ nilfs->ns_blocksize);
}
- if (!NILFS_SEG_HAS_SR(&ssi)) {
- if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
+ if (!(flags & NILFS_SS_SR)) {
+ if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
ri->ri_lsegs_start = pseg_start;
ri->ri_lsegs_start_seq = seg_seq;
}
- if (NILFS_SEG_LOGEND(&ssi))
+ if (flags & NILFS_SS_LOGEND)
ri->ri_lsegs_end = pseg_start;
goto try_next_pseg;
}
@@ -854,12 +901,12 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
nilfs_dispose_segment_list(&segments);
- nilfs->ns_pseg_offset = (sr_pseg_start = pseg_start)
- + ssi.nblocks - seg_start;
+ sr_pseg_start = pseg_start;
+ nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
nilfs->ns_seg_seq = seg_seq;
nilfs->ns_segnum = segnum;
nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */
- nilfs->ns_ctime = ssi.ctime;
+ nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
nilfs->ns_nextnum = nextnum;
if (scan_newer)
@@ -870,15 +917,9 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
scan_newer = 1;
}
- /* reset region for roll-forward */
- pseg_start += ssi.nblocks;
- if (pseg_start < seg_end)
- continue;
- goto feed_segment;
-
try_next_pseg:
/* Standing on a course, or met an inconsistent state */
- pseg_start += ssi.nblocks;
+ pseg_start += nblocks;
if (pseg_start < seg_end)
continue;
goto feed_segment;
@@ -909,6 +950,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
super_root_found:
/* Updating pointers relating to the latest checkpoint */
+ brelse(bh_sum);
list_splice_tail(&segments, &ri->ri_used_segments);
nilfs->ns_last_pseg = sr_pseg_start;
nilfs->ns_last_seq = nilfs->ns_seg_seq;
@@ -916,6 +958,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
return 0;
failed:
+ brelse(bh_sum);
nilfs_dispose_segment_list(&segments);
return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 2e6a2723b8f..4588fb9e93d 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following
* submission.
*/
- rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+ rw |= REQ_SYNC | REQ_UNPLUG;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
}
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index fdf1c3b6d67..b04f08cc239 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -54,17 +54,6 @@ struct nilfs_segsum_info {
sector_t next;
};
-/* macro for the flags */
-#define NILFS_SEG_HAS_SR(sum) ((sum)->flags & NILFS_SS_SR)
-#define NILFS_SEG_LOGBGN(sum) ((sum)->flags & NILFS_SS_LOGBGN)
-#define NILFS_SEG_LOGEND(sum) ((sum)->flags & NILFS_SS_LOGEND)
-#define NILFS_SEG_DSYNC(sum) ((sum)->flags & NILFS_SS_SYNDT)
-#define NILFS_SEG_SIMPLEX(sum) \
- (((sum)->flags & (NILFS_SS_LOGBGN | NILFS_SS_LOGEND)) == \
- (NILFS_SS_LOGBGN | NILFS_SS_LOGEND))
-
-#define NILFS_SEG_EMPTY(sum) ((sum)->nblocks == (sum)->nsumblk)
-
/**
* struct nilfs_segment_buffer - Segment buffer
* @sb_super: back pointer to a superblock struct
@@ -127,8 +116,6 @@ struct nilfs_segment_buffer {
extern struct kmem_cache *nilfs_segbuf_cachep;
-int __init nilfs_init_segbuf_cache(void);
-void nilfs_destroy_segbuf_cache(void);
struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *);
void nilfs_segbuf_free(struct nilfs_segment_buffer *);
void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long,
@@ -143,6 +130,19 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *,
struct buffer_head **);
void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *);
+static inline int nilfs_segbuf_simplex(struct nilfs_segment_buffer *segbuf)
+{
+ unsigned int flags = segbuf->sb_sum.flags;
+
+ return (flags & (NILFS_SS_LOGBGN | NILFS_SS_LOGEND)) ==
+ (NILFS_SS_LOGBGN | NILFS_SS_LOGEND);
+}
+
+static inline int nilfs_segbuf_empty(struct nilfs_segment_buffer *segbuf)
+{
+ return segbuf->sb_sum.nblocks == segbuf->sb_sum.nsumblk;
+}
+
static inline void
nilfs_segbuf_add_segsum_buffer(struct nilfs_segment_buffer *segbuf,
struct buffer_head *bh)
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index c9201649cc4..9fd051a33c4 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1914,12 +1914,12 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
}
}
- if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) {
- if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) {
+ if (!nilfs_segbuf_simplex(segbuf)) {
+ if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
sci->sc_lseg_stime = jiffies;
}
- if (NILFS_SEG_LOGEND(&segbuf->sb_sum))
+ if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
}
}
@@ -1951,7 +1951,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
if (update_sr) {
nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
- set_nilfs_sb_dirty(nilfs);
clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
@@ -2082,7 +2081,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
/* Avoid empty segment */
if (sci->sc_stage.scnt == NILFS_ST_DONE &&
- NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
+ nilfs_segbuf_empty(sci->sc_curseg)) {
nilfs_segctor_abort_construction(sci, nilfs, 1);
goto out;
}
@@ -2408,6 +2407,7 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
{
struct nilfs_sb_info *sbi = sci->sc_sbi;
struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_super_block **sbp;
int err = 0;
nilfs_segctor_accept(sci);
@@ -2423,8 +2423,13 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
nilfs_discontinued(nilfs)) {
down_write(&nilfs->ns_sem);
- err = nilfs_commit_super(
- sbi, nilfs_altsb_need_update(nilfs));
+ err = -EIO;
+ sbp = nilfs_prepare_super(sbi,
+ nilfs_sb_will_flip(nilfs));
+ if (likely(sbp)) {
+ nilfs_set_log_cursor(sbp[0], nilfs);
+ err = nilfs_commit_super(sbi, NILFS_SB_COMMIT);
+ }
up_write(&nilfs->ns_sem);
}
}
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index dca142361cc..17c487bd815 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -221,8 +221,6 @@ enum {
extern struct kmem_cache *nilfs_transaction_cachep;
/* segment.c */
-extern int nilfs_init_transaction_cache(void);
-extern void nilfs_destroy_transaction_cache(void);
extern void nilfs_relax_pressure_in_lock(struct super_block *);
extern int nilfs_construct_segment(struct super_block *);
@@ -236,13 +234,13 @@ extern int nilfs_attach_segment_constructor(struct nilfs_sb_info *);
extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *);
/* recovery.c */
-extern int nilfs_read_super_root_block(struct super_block *, sector_t,
+extern int nilfs_read_super_root_block(struct the_nilfs *, sector_t,
struct buffer_head **, int);
-extern int nilfs_search_super_root(struct the_nilfs *, struct nilfs_sb_info *,
+extern int nilfs_search_super_root(struct the_nilfs *,
struct nilfs_recovery_info *);
-extern int nilfs_recover_logical_segments(struct the_nilfs *,
- struct nilfs_sb_info *,
- struct nilfs_recovery_info *);
+extern int nilfs_salvage_orphan_logs(struct the_nilfs *,
+ struct nilfs_sb_info *,
+ struct nilfs_recovery_info *);
extern void nilfs_dispose_segment_list(struct list_head *);
#endif /* _NILFS_SEGMENT_H */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 03b34b73899..1fa86b9df73 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -55,6 +55,8 @@
#include "nilfs.h"
#include "mdt.h"
#include "alloc.h"
+#include "btree.h"
+#include "btnode.h"
#include "page.h"
#include "cpfile.h"
#include "ifile.h"
@@ -74,6 +76,25 @@ struct kmem_cache *nilfs_btree_path_cache;
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
+static void nilfs_set_error(struct nilfs_sb_info *sbi)
+{
+ struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_super_block **sbp;
+
+ down_write(&nilfs->ns_sem);
+ if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
+ nilfs->ns_mount_state |= NILFS_ERROR_FS;
+ sbp = nilfs_prepare_super(sbi, 0);
+ if (likely(sbp)) {
+ sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
+ if (sbp[1])
+ sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
+ nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
+ }
+ }
+ up_write(&nilfs->ns_sem);
+}
+
/**
* nilfs_error() - report failure condition on a filesystem
*
@@ -99,16 +120,7 @@ void nilfs_error(struct super_block *sb, const char *function,
va_end(args);
if (!(sb->s_flags & MS_RDONLY)) {
- struct the_nilfs *nilfs = sbi->s_nilfs;
-
- down_write(&nilfs->ns_sem);
- if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
- nilfs->ns_mount_state |= NILFS_ERROR_FS;
- nilfs->ns_sbp[0]->s_state |=
- cpu_to_le16(NILFS_ERROR_FS);
- nilfs_commit_super(sbi, 1);
- }
- up_write(&nilfs->ns_sem);
+ nilfs_set_error(sbi);
if (nilfs_test_opt(sbi, ERRORS_RO)) {
printk(KERN_CRIT "Remounting filesystem read-only\n");
@@ -159,24 +171,7 @@ void nilfs_destroy_inode(struct inode *inode)
kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
}
-static void nilfs_clear_inode(struct inode *inode)
-{
- struct nilfs_inode_info *ii = NILFS_I(inode);
-
- /*
- * Free resources allocated in nilfs_read_inode(), here.
- */
- BUG_ON(!list_empty(&ii->i_dirty));
- brelse(ii->i_bh);
- ii->i_bh = NULL;
-
- if (test_bit(NILFS_I_BMAP, &ii->i_state))
- nilfs_bmap_clear(ii->i_bmap);
-
- nilfs_btnode_cache_clear(&ii->i_btnode_cache);
-}
-
-static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb)
+static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
int err;
@@ -202,12 +197,20 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb)
printk(KERN_ERR
"NILFS: unable to write superblock (err=%d)\n", err);
if (err == -EIO && nilfs->ns_sbh[1]) {
+ /*
+ * sbp[0] points to newer log than sbp[1],
+ * so copy sbp[0] to sbp[1] to take over sbp[0].
+ */
+ memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0],
+ nilfs->ns_sbsize);
nilfs_fall_back_super_block(nilfs);
goto retry;
}
} else {
struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
+ nilfs->ns_sbwcount++;
+
/*
* The latest segment becomes trailable from the position
* written in superblock.
@@ -216,66 +219,122 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb)
/* update GC protection for recent segments */
if (nilfs->ns_sbh[1]) {
- sbp = NULL;
- if (dupsb) {
+ if (flag == NILFS_SB_COMMIT_ALL) {
set_buffer_dirty(nilfs->ns_sbh[1]);
- if (!sync_dirty_buffer(nilfs->ns_sbh[1]))
- sbp = nilfs->ns_sbp[1];
+ if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0)
+ goto out;
}
+ if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) <
+ le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno))
+ sbp = nilfs->ns_sbp[1];
}
- if (sbp) {
- spin_lock(&nilfs->ns_last_segment_lock);
- nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
- spin_unlock(&nilfs->ns_last_segment_lock);
- }
- }
+ spin_lock(&nilfs->ns_last_segment_lock);
+ nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
+ spin_unlock(&nilfs->ns_last_segment_lock);
+ }
+ out:
return err;
}
-int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb)
+void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
+ struct the_nilfs *nilfs)
+{
+ sector_t nfreeblocks;
+
+ /* nilfs->ns_sem must be locked by the caller. */
+ nilfs_count_free_blocks(nilfs, &nfreeblocks);
+ sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks);
+
+ spin_lock(&nilfs->ns_last_segment_lock);
+ sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
+ sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
+ sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
+ spin_unlock(&nilfs->ns_last_segment_lock);
+}
+
+struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi,
+ int flip)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
struct nilfs_super_block **sbp = nilfs->ns_sbp;
- sector_t nfreeblocks;
- time_t t;
- int err;
- /* nilfs->sem must be locked by the caller. */
+ /* nilfs->ns_sem must be locked by the caller. */
if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
- if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC))
- nilfs_swap_super_block(nilfs);
- else {
+ if (sbp[1] &&
+ sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
+ memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
+ } else {
printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
sbi->s_super->s_id);
- return -EIO;
+ return NULL;
}
+ } else if (sbp[1] &&
+ sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
+ memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
}
- err = nilfs_count_free_blocks(nilfs, &nfreeblocks);
- if (unlikely(err)) {
- printk(KERN_ERR "NILFS: failed to count free blocks\n");
- return err;
- }
- spin_lock(&nilfs->ns_last_segment_lock);
- sbp[0]->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
- sbp[0]->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
- sbp[0]->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
- spin_unlock(&nilfs->ns_last_segment_lock);
+ if (flip && sbp[1])
+ nilfs_swap_super_block(nilfs);
+
+ return sbp;
+}
+
+int nilfs_commit_super(struct nilfs_sb_info *sbi, int flag)
+{
+ struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ time_t t;
+
+ /* nilfs->ns_sem must be locked by the caller. */
t = get_seconds();
- nilfs->ns_sbwtime[0] = t;
- sbp[0]->s_free_blocks_count = cpu_to_le64(nfreeblocks);
+ nilfs->ns_sbwtime = t;
sbp[0]->s_wtime = cpu_to_le64(t);
sbp[0]->s_sum = 0;
sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
(unsigned char *)sbp[0],
nilfs->ns_sbsize));
- if (dupsb && sbp[1]) {
- memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
- nilfs->ns_sbwtime[1] = t;
+ if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
+ sbp[1]->s_wtime = sbp[0]->s_wtime;
+ sbp[1]->s_sum = 0;
+ sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
+ (unsigned char *)sbp[1],
+ nilfs->ns_sbsize));
}
clear_nilfs_sb_dirty(nilfs);
- return nilfs_sync_super(sbi, dupsb);
+ return nilfs_sync_super(sbi, flag);
+}
+
+/**
+ * nilfs_cleanup_super() - write filesystem state for cleanup
+ * @sbi: nilfs_sb_info to be unmounted or degraded to read-only
+ *
+ * This function restores state flags in the on-disk super block.
+ * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
+ * filesystem was not clean previously.
+ */
+int nilfs_cleanup_super(struct nilfs_sb_info *sbi)
+{
+ struct nilfs_super_block **sbp;
+ int flag = NILFS_SB_COMMIT;
+ int ret = -EIO;
+
+ sbp = nilfs_prepare_super(sbi, 0);
+ if (sbp) {
+ sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state);
+ nilfs_set_log_cursor(sbp[0], sbi->s_nilfs);
+ if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
+ /*
+ * make the "clean" flag also to the opposite
+ * super block if both super blocks point to
+ * the same checkpoint.
+ */
+ sbp[1]->s_state = sbp[0]->s_state;
+ flag = NILFS_SB_COMMIT_ALL;
+ }
+ ret = nilfs_commit_super(sbi, flag);
+ }
+ return ret;
}
static void nilfs_put_super(struct super_block *sb)
@@ -289,8 +348,7 @@ static void nilfs_put_super(struct super_block *sb)
if (!(sb->s_flags & MS_RDONLY)) {
down_write(&nilfs->ns_sem);
- nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
- nilfs_commit_super(sbi, 1);
+ nilfs_cleanup_super(sbi);
up_write(&nilfs->ns_sem);
}
down_write(&nilfs->ns_super_sem);
@@ -311,6 +369,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
struct the_nilfs *nilfs = sbi->s_nilfs;
+ struct nilfs_super_block **sbp;
int err = 0;
/* This function is called when super block should be written back */
@@ -318,8 +377,13 @@ static int nilfs_sync_fs(struct super_block *sb, int wait)
err = nilfs_construct_segment(sb);
down_write(&nilfs->ns_sem);
- if (nilfs_sb_dirty(nilfs))
- nilfs_commit_super(sbi, 1);
+ if (nilfs_sb_dirty(nilfs)) {
+ sbp = nilfs_prepare_super(sbi, nilfs_sb_will_flip(nilfs));
+ if (likely(sbp)) {
+ nilfs_set_log_cursor(sbp[0], nilfs);
+ nilfs_commit_super(sbi, NILFS_SB_COMMIT);
+ }
+ }
up_write(&nilfs->ns_sem);
return err;
@@ -442,20 +506,20 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
struct nilfs_sb_info *sbi = NILFS_SB(sb);
if (!nilfs_test_opt(sbi, BARRIER))
- seq_printf(seq, ",nobarrier");
+ seq_puts(seq, ",nobarrier");
if (nilfs_test_opt(sbi, SNAPSHOT))
seq_printf(seq, ",cp=%llu",
(unsigned long long int)sbi->s_snapshot_cno);
if (nilfs_test_opt(sbi, ERRORS_PANIC))
- seq_printf(seq, ",errors=panic");
+ seq_puts(seq, ",errors=panic");
if (nilfs_test_opt(sbi, ERRORS_CONT))
- seq_printf(seq, ",errors=continue");
+ seq_puts(seq, ",errors=continue");
if (nilfs_test_opt(sbi, STRICT_ORDER))
- seq_printf(seq, ",order=strict");
+ seq_puts(seq, ",order=strict");
if (nilfs_test_opt(sbi, NORECOVERY))
- seq_printf(seq, ",norecovery");
+ seq_puts(seq, ",norecovery");
if (nilfs_test_opt(sbi, DISCARD))
- seq_printf(seq, ",discard");
+ seq_puts(seq, ",discard");
return 0;
}
@@ -467,7 +531,7 @@ static const struct super_operations nilfs_sops = {
/* .write_inode = nilfs_write_inode, */
/* .put_inode = nilfs_put_inode, */
/* .drop_inode = nilfs_drop_inode, */
- .delete_inode = nilfs_delete_inode,
+ .evict_inode = nilfs_evict_inode,
.put_super = nilfs_put_super,
/* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
@@ -475,7 +539,6 @@ static const struct super_operations nilfs_sops = {
/* .unlockfs */
.statfs = nilfs_statfs,
.remount_fs = nilfs_remount,
- .clear_inode = nilfs_clear_inode,
/* .umount_begin */
.show_options = nilfs_show_options
};
@@ -524,23 +587,25 @@ static const struct export_operations nilfs_export_ops = {
enum {
Opt_err_cont, Opt_err_panic, Opt_err_ro,
- Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
- Opt_discard, Opt_err,
+ Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
+ Opt_discard, Opt_nodiscard, Opt_err,
};
static match_table_t tokens = {
{Opt_err_cont, "errors=continue"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
+ {Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_snapshot, "cp=%u"},
{Opt_order, "order=%s"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
{Opt_err, NULL}
};
-static int parse_options(char *options, struct super_block *sb)
+static int parse_options(char *options, struct super_block *sb, int is_remount)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
char *p;
@@ -557,6 +622,9 @@ static int parse_options(char *options, struct super_block *sb)
token = match_token(p, tokens, args);
switch (token) {
+ case Opt_barrier:
+ nilfs_set_opt(sbi, BARRIER);
+ break;
case Opt_nobarrier:
nilfs_clear_opt(sbi, BARRIER);
break;
@@ -582,8 +650,26 @@ static int parse_options(char *options, struct super_block *sb)
case Opt_snapshot:
if (match_int(&args[0], &option) || option <= 0)
return 0;
- if (!(sb->s_flags & MS_RDONLY))
+ if (is_remount) {
+ if (!nilfs_test_opt(sbi, SNAPSHOT)) {
+ printk(KERN_ERR
+ "NILFS: cannot change regular "
+ "mount to snapshot.\n");
+ return 0;
+ } else if (option != sbi->s_snapshot_cno) {
+ printk(KERN_ERR
+ "NILFS: cannot remount to a "
+ "different snapshot.\n");
+ return 0;
+ }
+ break;
+ }
+ if (!(sb->s_flags & MS_RDONLY)) {
+ printk(KERN_ERR "NILFS: cannot mount snapshot "
+ "read/write. A read-only option is "
+ "required.\n");
return 0;
+ }
sbi->s_snapshot_cno = option;
nilfs_set_opt(sbi, SNAPSHOT);
break;
@@ -593,6 +679,9 @@ static int parse_options(char *options, struct super_block *sb)
case Opt_discard:
nilfs_set_opt(sbi, DISCARD);
break;
+ case Opt_nodiscard:
+ nilfs_clear_opt(sbi, DISCARD);
+ break;
default:
printk(KERN_ERR
"NILFS: Unrecognized mount option \"%s\"\n", p);
@@ -613,11 +702,18 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi,
static int nilfs_setup_super(struct nilfs_sb_info *sbi)
{
struct the_nilfs *nilfs = sbi->s_nilfs;
- struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
- int max_mnt_count = le16_to_cpu(sbp->s_max_mnt_count);
- int mnt_count = le16_to_cpu(sbp->s_mnt_count);
+ struct nilfs_super_block **sbp;
+ int max_mnt_count;
+ int mnt_count;
+
+ /* nilfs->ns_sem must be locked by the caller. */
+ sbp = nilfs_prepare_super(sbi, 0);
+ if (!sbp)
+ return -EIO;
+
+ max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
+ mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
- /* nilfs->sem must be locked by the caller. */
if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
printk(KERN_WARNING
"NILFS warning: mounting fs with errors\n");
@@ -628,12 +724,15 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
#endif
}
if (!max_mnt_count)
- sbp->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
-
- sbp->s_mnt_count = cpu_to_le16(mnt_count + 1);
- sbp->s_state = cpu_to_le16(le16_to_cpu(sbp->s_state) & ~NILFS_VALID_FS);
- sbp->s_mtime = cpu_to_le64(get_seconds());
- return nilfs_commit_super(sbi, 1);
+ sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
+
+ sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
+ sbp[0]->s_state =
+ cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
+ sbp[0]->s_mtime = cpu_to_le64(get_seconds());
+ /* synchronize sbp[1] with sbp[0] */
+ memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+ return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
}
struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
@@ -670,7 +769,31 @@ int nilfs_store_magic_and_option(struct super_block *sb,
sbi->s_interval = le32_to_cpu(sbp->s_c_interval);
sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max);
- return !parse_options(data, sb) ? -EINVAL : 0 ;
+ return !parse_options(data, sb, 0) ? -EINVAL : 0 ;
+}
+
+int nilfs_check_feature_compatibility(struct super_block *sb,
+ struct nilfs_super_block *sbp)
+{
+ __u64 features;
+
+ features = le64_to_cpu(sbp->s_feature_incompat) &
+ ~NILFS_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
+ "optional features (%llx)\n",
+ (unsigned long long)features);
+ return -EINVAL;
+ }
+ features = le64_to_cpu(sbp->s_feature_compat_ro) &
+ ~NILFS_FEATURE_COMPAT_RO_SUPP;
+ if (!(sb->s_flags & MS_RDONLY) && features) {
+ printk(KERN_ERR "NILFS: couldn't mount RDWR because of "
+ "unsupported optional features (%llx)\n",
+ (unsigned long long)features);
+ return -EINVAL;
+ }
+ return 0;
}
/**
@@ -819,7 +942,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
static int nilfs_remount(struct super_block *sb, int *flags, char *data)
{
struct nilfs_sb_info *sbi = NILFS_SB(sb);
- struct nilfs_super_block *sbp;
struct the_nilfs *nilfs = sbi->s_nilfs;
unsigned long old_sb_flags;
struct nilfs_mount_options old_opts;
@@ -833,32 +955,17 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
old_opts.snapshot_cno = sbi->s_snapshot_cno;
was_snapshot = nilfs_test_opt(sbi, SNAPSHOT);
- if (!parse_options(data, sb)) {
+ if (!parse_options(data, sb, 1)) {
err = -EINVAL;
goto restore_opts;
}
sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
err = -EINVAL;
- if (was_snapshot) {
- if (!(*flags & MS_RDONLY)) {
- printk(KERN_ERR "NILFS (device %s): cannot remount "
- "snapshot read/write.\n",
- sb->s_id);
- goto restore_opts;
- } else if (sbi->s_snapshot_cno != old_opts.snapshot_cno) {
- printk(KERN_ERR "NILFS (device %s): cannot "
- "remount to a different snapshot.\n",
- sb->s_id);
- goto restore_opts;
- }
- } else {
- if (nilfs_test_opt(sbi, SNAPSHOT)) {
- printk(KERN_ERR "NILFS (device %s): cannot change "
- "a regular mount to a snapshot.\n",
- sb->s_id);
- goto restore_opts;
- }
+ if (was_snapshot && !(*flags & MS_RDONLY)) {
+ printk(KERN_ERR "NILFS (device %s): cannot remount snapshot "
+ "read/write.\n", sb->s_id);
+ goto restore_opts;
}
if (!nilfs_valid_fs(nilfs)) {
@@ -880,19 +987,29 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
* the RDONLY flag and then mark the partition as valid again.
*/
down_write(&nilfs->ns_sem);
- sbp = nilfs->ns_sbp[0];
- if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) &&
- (nilfs->ns_mount_state & NILFS_VALID_FS))
- sbp->s_state = cpu_to_le16(nilfs->ns_mount_state);
- sbp->s_mtime = cpu_to_le64(get_seconds());
- nilfs_commit_super(sbi, 1);
+ nilfs_cleanup_super(sbi);
up_write(&nilfs->ns_sem);
} else {
+ __u64 features;
+
/*
* Mounting a RDONLY partition read-write, so reread and
* store the current valid flag. (It may have been changed
* by fsck since we originally mounted the partition.)
*/
+ down_read(&nilfs->ns_sem);
+ features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
+ ~NILFS_FEATURE_COMPAT_RO_SUPP;
+ up_read(&nilfs->ns_sem);
+ if (features) {
+ printk(KERN_WARNING "NILFS (device %s): couldn't "
+ "remount RDWR because of unsupported optional "
+ "features (%llx)\n",
+ sb->s_id, (unsigned long long)features);
+ err = -EROFS;
+ goto restore_opts;
+ }
+
sb->s_flags &= ~MS_RDONLY;
err = nilfs_attach_segment_constructor(sbi);
@@ -1119,7 +1236,7 @@ static void nilfs_inode_init_once(void *obj)
init_rwsem(&ii->xattr_sem);
#endif
nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
- ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
+ ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode);
}
@@ -1130,13 +1247,13 @@ static void nilfs_segbuf_init_once(void *obj)
static void nilfs_destroy_cachep(void)
{
- if (nilfs_inode_cachep)
+ if (nilfs_inode_cachep)
kmem_cache_destroy(nilfs_inode_cachep);
- if (nilfs_transaction_cachep)
+ if (nilfs_transaction_cachep)
kmem_cache_destroy(nilfs_transaction_cachep);
- if (nilfs_segbuf_cachep)
+ if (nilfs_segbuf_cachep)
kmem_cache_destroy(nilfs_segbuf_cachep);
- if (nilfs_btree_path_cache)
+ if (nilfs_btree_path_cache)
kmem_cache_destroy(nilfs_btree_path_cache);
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 8c1097327ab..37de1f062d8 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -38,6 +38,8 @@
static LIST_HEAD(nilfs_objects);
static DEFINE_SPINLOCK(nilfs_lock);
+static int nilfs_valid_sb(struct nilfs_super_block *sbp);
+
void nilfs_set_last_segment(struct the_nilfs *nilfs,
sector_t start_blocknr, u64 seq, __u64 cno)
{
@@ -45,6 +47,16 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
nilfs->ns_last_pseg = start_blocknr;
nilfs->ns_last_seq = seq;
nilfs->ns_last_cno = cno;
+
+ if (!nilfs_sb_dirty(nilfs)) {
+ if (nilfs->ns_prev_seq == nilfs->ns_last_seq)
+ goto stay_cursor;
+
+ set_nilfs_sb_dirty(nilfs);
+ }
+ nilfs->ns_prev_seq = nilfs->ns_last_seq;
+
+ stay_cursor:
spin_unlock(&nilfs->ns_last_segment_lock);
}
@@ -159,8 +171,7 @@ void put_nilfs(struct the_nilfs *nilfs)
kfree(nilfs);
}
-static int nilfs_load_super_root(struct the_nilfs *nilfs,
- struct nilfs_sb_info *sbi, sector_t sr_block)
+static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block)
{
struct buffer_head *bh_sr;
struct nilfs_super_root *raw_sr;
@@ -169,7 +180,7 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
unsigned inode_size;
int err;
- err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1);
+ err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1);
if (unlikely(err))
return err;
@@ -248,6 +259,37 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
}
/**
+ * nilfs_store_log_cursor - load log cursor from a super block
+ * @nilfs: nilfs object
+ * @sbp: buffer storing super block to be read
+ *
+ * nilfs_store_log_cursor() reads the last position of the log
+ * containing a super root from a given super block, and initializes
+ * relevant information on the nilfs object preparatory for log
+ * scanning and recovery.
+ */
+static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
+ struct nilfs_super_block *sbp)
+{
+ int ret = 0;
+
+ nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
+ nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
+ nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
+
+ nilfs->ns_prev_seq = nilfs->ns_last_seq;
+ nilfs->ns_seg_seq = nilfs->ns_last_seq;
+ nilfs->ns_segnum =
+ nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
+ nilfs->ns_cno = nilfs->ns_last_cno + 1;
+ if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
+ printk(KERN_ERR "NILFS invalid last segment number.\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
* load_nilfs - load and recover the nilfs
* @nilfs: the_nilfs structure to be released
* @sbi: nilfs_sb_info used to recover past segment
@@ -285,13 +327,55 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
nilfs_init_recovery_info(&ri);
- err = nilfs_search_super_root(nilfs, sbi, &ri);
+ err = nilfs_search_super_root(nilfs, &ri);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: error searching super root.\n");
- goto failed;
+ struct nilfs_super_block **sbp = nilfs->ns_sbp;
+ int blocksize;
+
+ if (err != -EINVAL)
+ goto scan_error;
+
+ if (!nilfs_valid_sb(sbp[1])) {
+ printk(KERN_WARNING
+ "NILFS warning: unable to fall back to spare"
+ "super block\n");
+ goto scan_error;
+ }
+ printk(KERN_INFO
+ "NILFS: try rollback from an earlier position\n");
+
+ /*
+ * restore super block with its spare and reconfigure
+ * relevant states of the nilfs object.
+ */
+ memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
+ nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed);
+ nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
+
+ /* verify consistency between two super blocks */
+ blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
+ if (blocksize != nilfs->ns_blocksize) {
+ printk(KERN_WARNING
+ "NILFS warning: blocksize differs between "
+ "two super blocks (%d != %d)\n",
+ blocksize, nilfs->ns_blocksize);
+ goto scan_error;
+ }
+
+ err = nilfs_store_log_cursor(nilfs, sbp[0]);
+ if (err)
+ goto scan_error;
+
+ /* drop clean flag to allow roll-forward and recovery */
+ nilfs->ns_mount_state &= ~NILFS_VALID_FS;
+ valid_fs = 0;
+
+ err = nilfs_search_super_root(nilfs, &ri);
+ if (err)
+ goto scan_error;
}
- err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root);
+ err = nilfs_load_super_root(nilfs, ri.ri_super_root);
if (unlikely(err)) {
printk(KERN_ERR "NILFS: error loading super root.\n");
goto failed;
@@ -301,11 +385,23 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto skip_recovery;
if (s_flags & MS_RDONLY) {
+ __u64 features;
+
if (nilfs_test_opt(sbi, NORECOVERY)) {
printk(KERN_INFO "NILFS: norecovery option specified. "
"skipping roll-forward recovery\n");
goto skip_recovery;
}
+ features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
+ ~NILFS_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ printk(KERN_ERR "NILFS: couldn't proceed with "
+ "recovery because of unsupported optional "
+ "features (%llx)\n",
+ (unsigned long long)features);
+ err = -EROFS;
+ goto failed_unload;
+ }
if (really_read_only) {
printk(KERN_ERR "NILFS: write access "
"unavailable, cannot proceed.\n");
@@ -320,14 +416,13 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
goto failed_unload;
}
- err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
+ err = nilfs_salvage_orphan_logs(nilfs, sbi, &ri);
if (err)
goto failed_unload;
down_write(&nilfs->ns_sem);
- nilfs->ns_mount_state |= NILFS_VALID_FS;
- nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
- err = nilfs_commit_super(sbi, 1);
+ nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */
+ err = nilfs_cleanup_super(sbi);
up_write(&nilfs->ns_sem);
if (err) {
@@ -343,6 +438,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
sbi->s_super->s_flags = s_flags;
return 0;
+ scan_error:
+ printk(KERN_ERR "NILFS: error searching super root.\n");
+ goto failed;
+
failed_unload:
nilfs_mdt_destroy(nilfs->ns_cpfile);
nilfs_mdt_destroy(nilfs->ns_sufile);
@@ -515,8 +614,8 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
nilfs_swap_super_block(nilfs);
}
- nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime);
- nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0;
+ nilfs->ns_sbwcount = 0;
+ nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
*sbpp = sbp[0];
return 0;
@@ -557,6 +656,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
if (err)
goto out;
+ err = nilfs_check_feature_compatibility(sb, sbp);
+ if (err)
+ goto out;
+
blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
if (sb->s_blocksize != blocksize &&
!sb_set_blocksize(sb, blocksize)) {
@@ -568,7 +671,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
goto out;
}
- blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
+ blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
printk(KERN_ERR "NILFS: unable to set blocksize\n");
err = -EINVAL;
@@ -582,7 +685,18 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
if (err)
goto failed_sbh;
+ err = nilfs_check_feature_compatibility(sb, sbp);
+ if (err)
+ goto failed_sbh;
+
blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
+ if (blocksize < NILFS_MIN_BLOCK_SIZE ||
+ blocksize > NILFS_MAX_BLOCK_SIZE) {
+ printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
+ "filesystem blocksize %d\n", blocksize);
+ err = -EINVAL;
+ goto failed_sbh;
+ }
if (sb->s_blocksize != blocksize) {
int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
@@ -604,6 +718,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
when reloading fails. */
}
nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
+ nilfs->ns_blocksize = blocksize;
err = nilfs_store_disk_layout(nilfs, sbp);
if (err)
@@ -616,23 +731,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
- /* Finding last segment */
- nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
- nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
- nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
-
- nilfs->ns_seg_seq = nilfs->ns_last_seq;
- nilfs->ns_segnum =
- nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
- nilfs->ns_cno = nilfs->ns_last_cno + 1;
- if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
- printk(KERN_ERR "NILFS invalid last segment number.\n");
- err = -EINVAL;
+ err = nilfs_store_log_cursor(nilfs, sbp);
+ if (err)
goto failed_sbh;
- }
- /* Dummy values */
- nilfs->ns_free_segments_count =
- nilfs->ns_nsegments - (nilfs->ns_segnum + 1);
/* Initialize gcinode cache */
err = nilfs_init_gccache(nilfs);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 1ab97453369..f785a7b0ab9 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -57,7 +57,8 @@ enum {
* @ns_current: back pointer to current mount
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
- * @ns_sbwtime: previous write time of super blocks
+ * @ns_sbwtime: previous write time of super block
+ * @ns_sbwcount: write count of super block
* @ns_sbsize: size of valid data in super block
* @ns_supers: list of nilfs super block structs
* @ns_seg_seq: segment sequence counter
@@ -73,7 +74,7 @@ enum {
* @ns_last_seq: sequence value of the latest segment
* @ns_last_cno: checkpoint number of the latest segment
* @ns_prot_seq: least sequence number of segments which must not be reclaimed
- * @ns_free_segments_count: counter of free segments
+ * @ns_prev_seq: base sequence number used to decide if advance log cursor
* @ns_segctor_sem: segment constructor semaphore
* @ns_dat: DAT file inode
* @ns_cpfile: checkpoint file inode
@@ -82,6 +83,7 @@ enum {
* @ns_gc_inodes: dummy inodes to keep live blocks
* @ns_gc_inodes_h: hash list to keep dummy inode holding live blocks
* @ns_blocksize_bits: bit length of block size
+ * @ns_blocksize: block size
* @ns_nsegments: number of segments in filesystem
* @ns_blocks_per_segment: number of blocks per segment
* @ns_r_segments_percentage: reserved segments percentage
@@ -119,7 +121,8 @@ struct the_nilfs {
*/
struct buffer_head *ns_sbh[2];
struct nilfs_super_block *ns_sbp[2];
- time_t ns_sbwtime[2];
+ time_t ns_sbwtime;
+ unsigned ns_sbwcount;
unsigned ns_sbsize;
unsigned ns_mount_state;
@@ -149,7 +152,7 @@ struct the_nilfs {
u64 ns_last_seq;
__u64 ns_last_cno;
u64 ns_prot_seq;
- unsigned long ns_free_segments_count;
+ u64 ns_prev_seq;
struct rw_semaphore ns_segctor_sem;
@@ -168,6 +171,7 @@ struct the_nilfs {
/* Disk layout information (static) */
unsigned int ns_blocksize_bits;
+ unsigned int ns_blocksize;
unsigned long ns_nsegments;
unsigned long ns_blocks_per_segment;
unsigned long ns_r_segments_percentage;
@@ -203,20 +207,17 @@ THE_NILFS_FNS(SB_DIRTY, sb_dirty)
/* Minimum interval of periodical update of superblocks (in seconds) */
#define NILFS_SB_FREQ 10
-#define NILFS_ALTSB_FREQ 60 /* spare superblock */
static inline int nilfs_sb_need_update(struct the_nilfs *nilfs)
{
u64 t = get_seconds();
- return t < nilfs->ns_sbwtime[0] ||
- t > nilfs->ns_sbwtime[0] + NILFS_SB_FREQ;
+ return t < nilfs->ns_sbwtime || t > nilfs->ns_sbwtime + NILFS_SB_FREQ;
}
-static inline int nilfs_altsb_need_update(struct the_nilfs *nilfs)
+static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
{
- u64 t = get_seconds();
- struct nilfs_super_block **sbp = nilfs->ns_sbp;
- return sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ;
+ int flip_bits = nilfs->ns_sbwcount & 0x0FL;
+ return (flip_bits != 0x08 && flip_bits != 0x0F);
}
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index dffbb0911d0..22c629eedd8 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -3,3 +3,4 @@ config FSNOTIFY
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
+source "fs/notify/fanotify/Kconfig"
diff --git a/fs/notify/Makefile b/fs/notify/Makefile
index 0922cc826c4..ae5f33a6d86 100644
--- a/fs/notify/Makefile
+++ b/fs/notify/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
+obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o \
+ mark.o vfsmount_mark.o
obj-y += dnotify/
obj-y += inotify/
+obj-y += fanotify/
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 7e54e52964d..3344bdd5506 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -29,17 +29,17 @@
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
-static struct kmem_cache *dnotify_mark_entry_cache __read_mostly;
+static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
- * dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which
+ * dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
-struct dnotify_mark_entry {
- struct fsnotify_mark_entry fsn_entry;
+struct dnotify_mark {
+ struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn;
};
@@ -51,27 +51,27 @@ struct dnotify_mark_entry {
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
-static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
+static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- assert_spin_locked(&entry->lock);
+ assert_spin_locked(&fsn_mark->lock);
- old_mask = entry->mask;
+ old_mask = fsn_mark->mask;
new_mask = 0;
- for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next)
+ for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
- entry->mask = new_mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask)
return;
- if (entry->inode)
- fsnotify_recalc_inode_mask(entry->inode);
+ if (fsn_mark->i.inode)
+ fsnotify_recalc_inode_mask(fsn_mark->i.inode);
}
/*
@@ -83,29 +83,25 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
* events.
*/
static int dnotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry = NULL;
- struct dnotify_mark_entry *dnentry;
+ struct dnotify_mark *dn_mark;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct fown_struct *fown;
__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
- to_tell = event->to_tell;
+ BUG_ON(vfsmount_mark);
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
+ to_tell = event->to_tell;
- /* unlikely since we alreay passed dnotify_should_send_event() */
- if (unlikely(!entry))
- return 0;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&inode_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next;
@@ -118,12 +114,11 @@ static int dnotify_handle_event(struct fsnotify_group *group,
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(inode_mark);
}
}
- spin_unlock(&entry->lock);
- fsnotify_put_mark(entry);
+ spin_unlock(&inode_mark->lock);
return 0;
}
@@ -133,44 +128,27 @@ static int dnotify_handle_event(struct fsnotify_group *group,
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
- struct inode *inode, __u32 mask)
+ struct inode *inode,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ __u32 mask, void *data, int data_type)
{
- struct fsnotify_mark_entry *entry;
- bool send;
-
- /* !dir_notify_enable should never get here, don't waste time checking
- if (!dir_notify_enable)
- return 0; */
-
/* not a dir, dnotify doesn't care */
if (!S_ISDIR(inode->i_mode))
return false;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
-
- /* no mark means no dnotify watch */
- if (!entry)
- return false;
-
- mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (mask & entry->mask);
-
- fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
-
- return send;
+ return true;
}
-static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
+static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- BUG_ON(dnentry->dn);
+ BUG_ON(dn_mark->dn);
- kmem_cache_free(dnotify_mark_entry_cache, dnentry);
+ kmem_cache_free(dnotify_mark_cache, dn_mark);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
@@ -183,15 +161,15 @@ static struct fsnotify_ops dnotify_fsnotify_ops = {
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
- * inode. If one is found run all of the ->dn entries attached to that
+ * inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
- * fsnotify_mark_entry.
+ * fsnotify_mark.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
- struct fsnotify_mark_entry *entry;
- struct dnotify_mark_entry *dnentry;
+ struct fsnotify_mark *fsn_mark;
+ struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
@@ -200,38 +178,34 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
if (!S_ISDIR(inode->i_mode))
return;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (!fsn_mark)
return;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&fsn_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
break;
}
prev = &dn->dn_next;
}
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
- if (dnentry->dn == NULL)
- fsnotify_destroy_mark_by_entry(entry);
-
- fsnotify_recalc_group_mask(dnotify_group);
+ if (dn_mark->dn == NULL)
+ fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
}
/* this conversion is done only at watch creation */
@@ -259,16 +233,16 @@ static __u32 convert_arg(unsigned long arg)
/*
* If multiple processes watch the same inode with dnotify there is only one
- * dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct
+ * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
-static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry,
+static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
- odn = dnentry->dn;
+ odn = dn_mark->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
@@ -283,8 +257,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
- dn->dn_next = dnentry->dn;
- dnentry->dn = dn;
+ dn->dn_next = dn_mark->dn;
+ dn_mark->dn = dn;
return 0;
}
@@ -296,8 +270,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
- struct dnotify_mark_entry *new_dnentry, *dnentry;
- struct fsnotify_mark_entry *new_entry, *entry;
+ struct dnotify_mark *new_dn_mark, *dn_mark;
+ struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
@@ -306,7 +280,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
__u32 mask;
/* we use these to tell if we need to kfree */
- new_entry = NULL;
+ new_fsn_mark = NULL;
dn = NULL;
if (!dir_notify_enable) {
@@ -336,8 +310,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
- new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL);
- if (!new_dnentry) {
+ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
+ if (!new_dn_mark) {
error = -ENOMEM;
goto out_err;
}
@@ -345,29 +319,27 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
- /* set up the new_entry and new_dnentry */
- new_entry = &new_dnentry->fsn_entry;
- fsnotify_init_mark(new_entry, dnotify_free_mark);
- new_entry->mask = mask;
- new_dnentry->dn = NULL;
+ /* set up the new_fsn_mark and new_dn_mark */
+ new_fsn_mark = &new_dn_mark->fsn_mark;
+ fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
+ new_fsn_mark->mask = mask;
+ new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
- /* add the new_entry or find an old one. */
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (entry) {
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
- spin_lock(&entry->lock);
+ /* add the new_fsn_mark or find an old one. */
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (fsn_mark) {
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
+ spin_lock(&fsn_mark->lock);
} else {
- fsnotify_add_mark(new_entry, dnotify_group, inode);
- spin_lock(&new_entry->lock);
- entry = new_entry;
- dnentry = new_dnentry;
- /* we used new_entry, so don't free it */
- new_entry = NULL;
+ fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
+ spin_lock(&new_fsn_mark->lock);
+ fsn_mark = new_fsn_mark;
+ dn_mark = new_dn_mark;
+ /* we used new_fsn_mark, so don't free it */
+ new_fsn_mark = NULL;
}
rcu_read_lock();
@@ -376,17 +348,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
- * the dnotify_mark_mutex and entry->lock. Since closing the fd is the
- * only time we clean up the mark entries we need to get our mark off
+ * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
+ * only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
- * the flush actually did shoot this entry. That's fine too
+ * the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
- * we found a dnentry already attached to the inode, just sod
+ * we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
@@ -394,13 +366,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
- error = attach_dn(dn, dnentry, id, fd, filp, mask);
- /* !error means that we attached the dn to the dnentry, so don't free it */
+ error = attach_dn(dn, dn_mark, id, fd, filp, mask);
+ /* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
@@ -408,20 +380,18 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
else if (error == -EEXIST)
error = 0;
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
out:
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (destroy)
- fsnotify_destroy_mark_by_entry(entry);
-
- fsnotify_recalc_group_mask(dnotify_group);
+ fsnotify_destroy_mark(fsn_mark);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
out_err:
- if (new_entry)
- fsnotify_put_mark(new_entry);
+ if (new_fsn_mark)
+ fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
@@ -430,10 +400,9 @@ out_err:
static int __init dnotify_init(void)
{
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
- dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
+ dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
- dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
- 0, &dnotify_fsnotify_ops);
+ dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
new file mode 100644
index 00000000000..3ac36b7bf6b
--- /dev/null
+++ b/fs/notify/fanotify/Kconfig
@@ -0,0 +1,26 @@
+config FANOTIFY
+ bool "Filesystem wide access notification"
+ select FSNOTIFY
+ select ANON_INODES
+ default n
+ ---help---
+ Say Y here to enable fanotify suport. fanotify is a file access
+ notification system which differs from inotify in that it sends
+ and open file descriptor to the userspace listener along with
+ the event.
+
+ If unsure, say Y.
+
+config FANOTIFY_ACCESS_PERMISSIONS
+ bool "fanotify permissions checking"
+ depends on FANOTIFY
+ depends on SECURITY
+ default n
+ ---help---
+ Say Y here is you want fanotify listeners to be able to make permissions
+ decisions concerning filesystem events. This is used by some fanotify
+ listeners which need to scan files before allowing the system access to
+ use those files. This is used by some anti-malware vendors and by some
+ hierarchical storage managent systems.
+
+ If unsure, say N.
diff --git a/fs/notify/fanotify/Makefile b/fs/notify/fanotify/Makefile
new file mode 100644
index 00000000000..0999213e7e6
--- /dev/null
+++ b/fs/notify/fanotify/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FANOTIFY) += fanotify.o fanotify_user.o
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
new file mode 100644
index 00000000000..756566fe844
--- /dev/null
+++ b/fs/notify/fanotify/fanotify.c
@@ -0,0 +1,212 @@
+#include <linux/fanotify.h>
+#include <linux/fdtable.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h> /* UINT_MAX */
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ pr_debug("%s: old=%p new=%p\n", __func__, old, new);
+
+ if (old->to_tell == new->to_tell &&
+ old->data_type == new->data_type &&
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
+ BUG();
+ };
+ }
+ return false;
+}
+
+/* and the list better be locked by something too! */
+static struct fsnotify_event *fanotify_merge(struct list_head *list,
+ struct fsnotify_event *event)
+{
+ struct fsnotify_event_holder *test_holder;
+ struct fsnotify_event *test_event = NULL;
+ struct fsnotify_event *new_event;
+
+ pr_debug("%s: list=%p event=%p\n", __func__, list, event);
+
+
+ list_for_each_entry_reverse(test_holder, list, event_list) {
+ if (should_merge(test_holder->event, event)) {
+ test_event = test_holder->event;
+ break;
+ }
+ }
+
+ if (!test_event)
+ return NULL;
+
+ fsnotify_get_event(test_event);
+
+ /* if they are exactly the same we are done */
+ if (test_event->mask == event->mask)
+ return test_event;
+
+ /*
+ * if the refcnt == 2 this is the only queue
+ * for this event and so we can update the mask
+ * in place.
+ */
+ if (atomic_read(&test_event->refcnt) == 2) {
+ test_event->mask |= event->mask;
+ return test_event;
+ }
+
+ new_event = fsnotify_clone_event(test_event);
+
+ /* done with test_event */
+ fsnotify_put_event(test_event);
+
+ /* couldn't allocate memory, merge was not possible */
+ if (unlikely(!new_event))
+ return ERR_PTR(-ENOMEM);
+
+ /* build new event and replace it on the list */
+ new_event->mask = (test_event->mask | event->mask);
+ fsnotify_replace_event(test_holder, new_event);
+
+ /* we hold a reference on new_event from clone_event */
+ return new_event;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static int fanotify_get_response_from_access(struct fsnotify_group *group,
+ struct fsnotify_event *event)
+{
+ int ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ wait_event(group->fanotify_data.access_waitq, event->response);
+
+ /* userspace responded, convert to something usable */
+ spin_lock(&event->lock);
+ switch (event->response) {
+ case FAN_ALLOW:
+ ret = 0;
+ break;
+ case FAN_DENY:
+ default:
+ ret = -EPERM;
+ }
+ event->response = 0;
+ spin_unlock(&event->lock);
+
+ pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
+ group, event, ret);
+
+ return ret;
+}
+#endif
+
+static int fanotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *fanotify_mark,
+ struct fsnotify_event *event)
+{
+ int ret = 0;
+ struct fsnotify_event *notify_event = NULL;
+
+ BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
+ BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
+ BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
+ BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
+ BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
+ BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
+ BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
+ BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
+ BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge);
+ if (IS_ERR(notify_event))
+ return PTR_ERR(notify_event);
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (event->mask & FAN_ALL_PERM_EVENTS) {
+ /* if we merged we need to wait on the new event */
+ if (notify_event)
+ event = notify_event;
+ ret = fanotify_get_response_from_access(group, event);
+ }
+#endif
+
+ if (notify_event)
+ fsnotify_put_event(notify_event);
+
+ return ret;
+}
+
+static bool fanotify_should_send_event(struct fsnotify_group *group,
+ struct inode *to_tell,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmnt_mark,
+ __u32 event_mask, void *data, int data_type)
+{
+ __u32 marks_mask, marks_ignored_mask;
+
+ pr_debug("%s: group=%p to_tell=%p inode_mark=%p vfsmnt_mark=%p "
+ "mask=%x data=%p data_type=%d\n", __func__, group, to_tell,
+ inode_mark, vfsmnt_mark, event_mask, data, data_type);
+
+ pr_debug("%s: group=%p vfsmount_mark=%p inode_mark=%p mask=%x\n",
+ __func__, group, vfsmnt_mark, inode_mark, event_mask);
+
+ /* sorry, fanotify only gives a damn about files and dirs */
+ if (!S_ISREG(to_tell->i_mode) &&
+ !S_ISDIR(to_tell->i_mode))
+ return false;
+
+ /* if we don't have enough info to send an event to userspace say no */
+ if (data_type != FSNOTIFY_EVENT_PATH)
+ return false;
+
+ if (inode_mark && vfsmnt_mark) {
+ marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
+ marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
+ } else if (inode_mark) {
+ /*
+ * if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it!
+ */
+ if ((event_mask & FS_EVENT_ON_CHILD) &&
+ !(inode_mark->mask & FS_EVENT_ON_CHILD))
+ return false;
+ marks_mask = inode_mark->mask;
+ marks_ignored_mask = inode_mark->ignored_mask;
+ } else if (vfsmnt_mark) {
+ marks_mask = vfsmnt_mark->mask;
+ marks_ignored_mask = vfsmnt_mark->ignored_mask;
+ } else {
+ BUG();
+ }
+
+ if (event_mask & marks_mask & ~marks_ignored_mask)
+ return true;
+
+ return false;
+}
+
+const struct fsnotify_ops fanotify_fsnotify_ops = {
+ .handle_event = fanotify_handle_event,
+ .should_send_event = fanotify_should_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = NULL,
+};
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
new file mode 100644
index 00000000000..032b837fcd1
--- /dev/null
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -0,0 +1,760 @@
+#include <linux/fanotify.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <asm/ioctls.h>
+
+extern const struct fsnotify_ops fanotify_fsnotify_ops;
+
+static struct kmem_cache *fanotify_mark_cache __read_mostly;
+static struct kmem_cache *fanotify_response_event_cache __read_mostly;
+
+struct fanotify_response_event {
+ struct list_head list;
+ __s32 fd;
+ struct fsnotify_event *event;
+};
+
+/*
+ * Get an fsnotify notification event if one exists and is small
+ * enough to fit in "count". Return an error pointer if the count
+ * is not large enough.
+ *
+ * Called with the group->notification_mutex held.
+ */
+static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
+ size_t count)
+{
+ BUG_ON(!mutex_is_locked(&group->notification_mutex));
+
+ pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
+
+ if (fsnotify_notify_queue_is_empty(group))
+ return NULL;
+
+ if (FAN_EVENT_METADATA_LEN > count)
+ return ERR_PTR(-EINVAL);
+
+ /* held the notification_mutex the whole time, so this is the
+ * same event we peeked above */
+ return fsnotify_remove_notify_event(group);
+}
+
+static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ int client_fd;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
+ struct file *new_file;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ client_fd = get_unused_fd();
+ if (client_fd < 0)
+ return client_fd;
+
+ if (event->data_type != FSNOTIFY_EVENT_PATH) {
+ WARN_ON(1);
+ put_unused_fd(client_fd);
+ return -EINVAL;
+ }
+
+ /*
+ * we need a new file handle for the userspace program so it can read even if it was
+ * originally opened O_WRONLY.
+ */
+ dentry = dget(event->path.dentry);
+ mnt = mntget(event->path.mnt);
+ /* it's possible this event was an overflow event. in that case dentry and mnt
+ * are NULL; That's fine, just don't call dentry open */
+ if (dentry && mnt)
+ new_file = dentry_open(dentry, mnt,
+ group->fanotify_data.f_flags | FMODE_NONOTIFY,
+ current_cred());
+ else
+ new_file = ERR_PTR(-EOVERFLOW);
+ if (IS_ERR(new_file)) {
+ /*
+ * we still send an event even if we can't open the file. this
+ * can happen when say tasks are gone and we try to open their
+ * /proc files or we try to open a WRONLY file like in sysfs
+ * we just send the errno to userspace since there isn't much
+ * else we can do.
+ */
+ put_unused_fd(client_fd);
+ client_fd = PTR_ERR(new_file);
+ } else {
+ fd_install(client_fd, new_file);
+ }
+
+ return client_fd;
+}
+
+static ssize_t fill_event_metadata(struct fsnotify_group *group,
+ struct fanotify_event_metadata *metadata,
+ struct fsnotify_event *event)
+{
+ pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
+ group, metadata, event);
+
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ metadata->fd = create_fd(group, event);
+
+ return metadata->fd;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
+ __s32 fd)
+{
+ struct fanotify_response_event *re, *return_re = NULL;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_for_each_entry(re, &group->fanotify_data.access_list, list) {
+ if (re->fd != fd)
+ continue;
+
+ list_del_init(&re->list);
+ return_re = re;
+ break;
+ }
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ pr_debug("%s: found return_re=%p\n", __func__, return_re);
+
+ return return_re;
+}
+
+static int process_access_response(struct fsnotify_group *group,
+ struct fanotify_response *response_struct)
+{
+ struct fanotify_response_event *re;
+ __s32 fd = response_struct->fd;
+ __u32 response = response_struct->response;
+
+ pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
+ fd, response);
+ /*
+ * make sure the response is valid, if invalid we do nothing and either
+ * userspace can send a valid responce or we will clean it up after the
+ * timeout
+ */
+ switch (response) {
+ case FAN_ALLOW:
+ case FAN_DENY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fd < 0)
+ return -EINVAL;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return -ENOENT;
+
+ re->event->response = response;
+
+ wake_up(&group->fanotify_data.access_waitq);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return 0;
+}
+
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return 0;
+
+ re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
+ if (!re)
+ return -ENOMEM;
+
+ re->event = event;
+ re->fd = fd;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_add_tail(&re->list, &group->fanotify_data.access_list);
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return;
+
+ BUG_ON(re->event != event);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return;
+}
+#else
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return;
+}
+#endif
+
+static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ char __user *buf)
+{
+ struct fanotify_event_metadata fanotify_event_metadata;
+ int fd, ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ fd = fill_event_metadata(group, &fanotify_event_metadata, event);
+ if (fd < 0)
+ return fd;
+
+ ret = prepare_for_access_response(group, event, fd);
+ if (ret)
+ goto out_close_fd;
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
+ goto out_kill_access_response;
+
+ return FAN_EVENT_METADATA_LEN;
+
+out_kill_access_response:
+ remove_access_response(group, event, fd);
+out_close_fd:
+ sys_close(fd);
+ return ret;
+}
+
+/* intofiy userspace file descriptor functions */
+static unsigned int fanotify_poll(struct file *file, poll_table *wait)
+{
+ struct fsnotify_group *group = file->private_data;
+ int ret = 0;
+
+ poll_wait(file, &group->notification_waitq, wait);
+ mutex_lock(&group->notification_mutex);
+ if (!fsnotify_notify_queue_is_empty(group))
+ ret = POLLIN | POLLRDNORM;
+ mutex_unlock(&group->notification_mutex);
+
+ return ret;
+}
+
+static ssize_t fanotify_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event *kevent;
+ char __user *start;
+ int ret;
+ DEFINE_WAIT(wait);
+
+ start = buf;
+ group = file->private_data;
+
+ pr_debug("%s: group=%p\n", __func__, group);
+
+ while (1) {
+ prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&group->notification_mutex);
+ kevent = get_one_event(group, count);
+ mutex_unlock(&group->notification_mutex);
+
+ if (kevent) {
+ ret = PTR_ERR(kevent);
+ if (IS_ERR(kevent))
+ break;
+ ret = copy_event_to_user(group, kevent, buf);
+ fsnotify_put_event(kevent);
+ if (ret < 0)
+ break;
+ buf += ret;
+ count -= ret;
+ continue;
+ }
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ ret = -EINTR;
+ if (signal_pending(current))
+ break;
+
+ if (start != buf)
+ break;
+
+ schedule();
+ }
+
+ finish_wait(&group->notification_waitq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
+ return ret;
+}
+
+static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_response response = { .fd = -1, .response = -1 };
+ struct fsnotify_group *group;
+ int ret;
+
+ group = file->private_data;
+
+ if (count > sizeof(response))
+ count = sizeof(response);
+
+ pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
+
+ if (copy_from_user(&response, buf, count))
+ return -EFAULT;
+
+ ret = process_access_response(group, &response);
+ if (ret < 0)
+ count = ret;
+
+ return count;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int fanotify_release(struct inode *ignored, struct file *file)
+{
+ struct fsnotify_group *group = file->private_data;
+
+ pr_debug("%s: file=%p group=%p\n", __func__, file, group);
+
+ /* matches the fanotify_init->fsnotify_alloc_group */
+ fsnotify_put_group(group);
+
+ return 0;
+}
+
+static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event_holder *holder;
+ void __user *p;
+ int ret = -ENOTTY;
+ size_t send_len = 0;
+
+ group = file->private_data;
+
+ p = (void __user *) arg;
+
+ switch (cmd) {
+ case FIONREAD:
+ mutex_lock(&group->notification_mutex);
+ list_for_each_entry(holder, &group->notification_list, event_list)
+ send_len += FAN_EVENT_METADATA_LEN;
+ mutex_unlock(&group->notification_mutex);
+ ret = put_user(send_len, (int __user *) p);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations fanotify_fops = {
+ .poll = fanotify_poll,
+ .read = fanotify_read,
+ .write = fanotify_write,
+ .fasync = NULL,
+ .release = fanotify_release,
+ .unlocked_ioctl = fanotify_ioctl,
+ .compat_ioctl = fanotify_ioctl,
+};
+
+static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
+{
+ kmem_cache_free(fanotify_mark_cache, fsn_mark);
+}
+
+static int fanotify_find_path(int dfd, const char __user *filename,
+ struct path *path, unsigned int flags)
+{
+ int ret;
+
+ pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
+ dfd, filename, flags);
+
+ if (filename == NULL) {
+ struct file *file;
+ int fput_needed;
+
+ ret = -EBADF;
+ file = fget_light(dfd, &fput_needed);
+ if (!file)
+ goto out;
+
+ ret = -ENOTDIR;
+ if ((flags & FAN_MARK_ONLYDIR) &&
+ !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
+ fput_light(file, fput_needed);
+ goto out;
+ }
+
+ *path = file->f_path;
+ path_get(path);
+ fput_light(file, fput_needed);
+ } else {
+ unsigned int lookup_flags = 0;
+
+ if (!(flags & FAN_MARK_DONT_FOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+ if (flags & FAN_MARK_ONLYDIR)
+ lookup_flags |= LOOKUP_DIRECTORY;
+
+ ret = user_path_at(dfd, filename, lookup_flags, path);
+ if (ret)
+ goto out;
+ }
+
+ /* you can only watch an inode if you have read permissions on it */
+ ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ if (ret)
+ path_put(path);
+out:
+ return ret;
+}
+
+static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ if (!(oldmask & ~mask))
+ fsnotify_destroy_mark(fsn_mark);
+
+ return mask & oldmask;
+}
+
+static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (removed & mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+
+ return 0;
+}
+
+static int fanotify_remove_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ /* matches the fsnotify_find_inode_mark() */
+ fsnotify_put_mark(fsn_mark);
+ if (removed & inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+
+ return 0;
+}
+
+static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
+ if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
+ fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ return mask & ~oldmask;
+}
+
+static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added & ~mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+
+ return 0;
+}
+
+static int fanotify_add_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added & ~inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+ return 0;
+}
+
+/* fanotify syscalls */
+SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
+{
+ struct fsnotify_group *group;
+ int f_flags, fd;
+
+ pr_debug("%s: flags=%d event_f_flags=%d\n",
+ __func__, flags, event_f_flags);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (flags & ~FAN_ALL_INIT_FLAGS)
+ return -EINVAL;
+
+ f_flags = O_RDWR | FMODE_NONOTIFY;
+ if (flags & FAN_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+ if (flags & FAN_NONBLOCK)
+ f_flags |= O_NONBLOCK;
+
+ /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
+ group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ group->fanotify_data.f_flags = event_f_flags;
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ mutex_init(&group->fanotify_data.access_mutex);
+ init_waitqueue_head(&group->fanotify_data.access_waitq);
+ INIT_LIST_HEAD(&group->fanotify_data.access_list);
+#endif
+
+ fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
+ if (fd < 0)
+ goto out_put_group;
+
+ return fd;
+
+out_put_group:
+ fsnotify_put_group(group);
+ return fd;
+}
+
+SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
+ __u64 mask, int dfd,
+ const char __user * pathname)
+{
+ struct inode *inode = NULL;
+ struct vfsmount *mnt = NULL;
+ struct fsnotify_group *group;
+ struct file *filp;
+ struct path path;
+ int ret, fput_needed;
+
+ pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
+ __func__, fanotify_fd, flags, dfd, pathname, mask);
+
+ /* we only use the lower 32 bits as of right now. */
+ if (mask & ((__u64)0xffffffff << 32))
+ return -EINVAL;
+
+ if (flags & ~FAN_ALL_MARK_FLAGS)
+ return -EINVAL;
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ case FAN_MARK_REMOVE:
+ case FAN_MARK_FLUSH:
+ break;
+ default:
+ return -EINVAL;
+ }
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
+#else
+ if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
+#endif
+ return -EINVAL;
+
+ filp = fget_light(fanotify_fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+
+ /* verify that this is indeed an fanotify instance */
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &fanotify_fops))
+ goto fput_and_out;
+
+ ret = fanotify_find_path(dfd, pathname, &path, flags);
+ if (ret)
+ goto fput_and_out;
+
+ /* inode held in place by reference to path; group by fget on fd */
+ if (!(flags & FAN_MARK_MOUNT))
+ inode = path.dentry->d_inode;
+ else
+ mnt = path.mnt;
+ group = filp->private_data;
+
+ /* create/update an inode mark */
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_add_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_REMOVE:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_remove_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_FLUSH:
+ if (flags & FAN_MARK_MOUNT)
+ fsnotify_clear_vfsmount_marks_by_group(group);
+ else
+ fsnotify_clear_inode_marks_by_group(group);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ path_put(&path);
+fput_and_out:
+ fput_light(filp, fput_needed);
+ return ret;
+}
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
+ long dfd, long pathname)
+{
+ return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
+ mask, (int) dfd,
+ (const char __user *) pathname);
+}
+SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
+#endif
+
+/*
+ * fanotify_user_setup - Our initialization function. Note that we cannnot return
+ * error because we have compiled-in VFS hooks. So an (unlikely) failure here
+ * must result in panic().
+ */
+static int __init fanotify_user_setup(void)
+{
+ fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
+ fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
+ SLAB_PANIC);
+
+ return 0;
+}
+device_initcall(fanotify_user_setup);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index fcc2f064af8..3970392b272 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -21,6 +21,7 @@
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
@@ -35,6 +36,11 @@ void __fsnotify_inode_delete(struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
+void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ fsnotify_clear_marks_by_mount(mnt);
+}
+
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
@@ -78,13 +84,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
}
/* Notify this dentry's parent about a child's events. */
-void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
+ if (!dentry)
+ dentry = path->dentry;
+
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
@@ -115,8 +124,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
- fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ if (path)
+ fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
+ dentry->d_name.name, 0);
+ else
+ fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
+ dentry->d_name.name, 0);
dput(parent);
}
@@ -127,63 +140,181 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
+static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ __u32 mask, void *data,
+ int data_is, u32 cookie,
+ const unsigned char *file_name,
+ struct fsnotify_event **event)
+{
+ struct fsnotify_group *group = inode_mark->group;
+ __u32 inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ __u32 vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+
+ pr_debug("%s: group=%p to_tell=%p mnt=%p mark=%p mask=%x data=%p"
+ " data_is=%d cookie=%d event=%p\n", __func__, group, to_tell,
+ mnt, inode_mark, mask, data, data_is, cookie, *event);
+
+ /* clear ignored on inode modification */
+ if (mask & FS_MODIFY) {
+ if (inode_mark &&
+ !(inode_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ inode_mark->ignored_mask = 0;
+ if (vfsmount_mark &&
+ !(vfsmount_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ vfsmount_mark->ignored_mask = 0;
+ }
+
+ /* does the inode mark tell us to do something? */
+ if (inode_mark) {
+ inode_test_mask &= inode_mark->mask;
+ inode_test_mask &= ~inode_mark->ignored_mask;
+ }
+
+ /* does the vfsmount_mark tell us to do something? */
+ if (vfsmount_mark) {
+ vfsmount_test_mask &= vfsmount_mark->mask;
+ vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
+ if (inode_mark)
+ vfsmount_test_mask &= ~inode_mark->ignored_mask;
+ }
+
+ if (!inode_test_mask && !vfsmount_test_mask)
+ return 0;
+
+ if (group->ops->should_send_event(group, to_tell, inode_mark,
+ vfsmount_mark, mask, data,
+ data_is) == false)
+ return 0;
+
+ if (!*event) {
+ *event = fsnotify_create_event(to_tell, mask, data,
+ data_is, file_name,
+ cookie, GFP_KERNEL);
+ if (!*event)
+ return -ENOMEM;
+ }
+ return group->ops->handle_event(group, inode_mark, vfsmount_mark, *event);
+}
+
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
-void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
+int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *file_name, u32 cookie)
{
- struct fsnotify_group *group;
+ struct hlist_node *inode_node, *vfsmount_node;
+ struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
+ struct fsnotify_group *inode_group, *vfsmount_group;
struct fsnotify_event *event = NULL;
- int idx;
+ struct vfsmount *mnt;
+ int idx, ret = 0;
+ bool used_inode = false, used_vfsmount = false;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
- if (list_empty(&fsnotify_groups))
- return;
+ if (data_is == FSNOTIFY_EVENT_PATH)
+ mnt = ((struct path *)data)->mnt;
+ else
+ mnt = NULL;
- if (!(test_mask & fsnotify_mask))
- return;
-
- if (!(test_mask & to_tell->i_fsnotify_mask))
- return;
/*
- * SRCU!! the groups list is very very much read only and the path is
- * very hot. The VAST majority of events are not going to need to do
- * anything other than walk the list so it's crazy to pre-allocate.
+ * if this is a modify event we may need to clear the ignored masks
+ * otherwise return if neither the inode nor the vfsmount care about
+ * this type of event.
*/
- idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
- if (test_mask & group->mask) {
- if (!group->ops->should_send_event(group, to_tell, mask))
- continue;
- if (!event) {
- event = fsnotify_create_event(to_tell, mask, data,
- data_is, file_name, cookie,
- GFP_KERNEL);
- /* shit, we OOM'd and now we can't tell, maybe
- * someday someone else will want to do something
- * here */
- if (!event)
- break;
- }
- group->ops->handle_event(group, event);
+ if (!(mask & FS_MODIFY) &&
+ !(test_mask & to_tell->i_fsnotify_mask) &&
+ !(mnt && test_mask & mnt->mnt_fsnotify_mask))
+ return 0;
+
+ idx = srcu_read_lock(&fsnotify_mark_srcu);
+
+ if ((mask & FS_MODIFY) ||
+ (test_mask & to_tell->i_fsnotify_mask))
+ inode_node = srcu_dereference(to_tell->i_fsnotify_marks.first,
+ &fsnotify_mark_srcu);
+ else
+ inode_node = NULL;
+
+ if (mnt) {
+ if ((mask & FS_MODIFY) ||
+ (test_mask & mnt->mnt_fsnotify_mask))
+ vfsmount_node = srcu_dereference(mnt->mnt_fsnotify_marks.first,
+ &fsnotify_mark_srcu);
+ else
+ vfsmount_node = NULL;
+ } else {
+ mnt = NULL;
+ vfsmount_node = NULL;
+ }
+
+ while (inode_node || vfsmount_node) {
+ if (inode_node) {
+ inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
+ struct fsnotify_mark, i.i_list);
+ inode_group = inode_mark->group;
+ } else
+ inode_group = (void *)-1;
+
+ if (vfsmount_node) {
+ vfsmount_mark = hlist_entry(srcu_dereference(vfsmount_node, &fsnotify_mark_srcu),
+ struct fsnotify_mark, m.m_list);
+ vfsmount_group = vfsmount_mark->group;
+ } else
+ vfsmount_group = (void *)-1;
+
+ if (inode_group < vfsmount_group) {
+ /* handle inode */
+ send_to_group(to_tell, NULL, inode_mark, NULL, mask, data,
+ data_is, cookie, file_name, &event);
+ used_inode = true;
+ } else if (vfsmount_group < inode_group) {
+ send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data,
+ data_is, cookie, file_name, &event);
+ used_vfsmount = true;
+ } else {
+ send_to_group(to_tell, mnt, inode_mark, vfsmount_mark,
+ mask, data, data_is, cookie, file_name,
+ &event);
+ used_vfsmount = true;
+ used_inode = true;
}
+
+ if (used_inode)
+ inode_node = srcu_dereference(inode_node->next,
+ &fsnotify_mark_srcu);
+ if (used_vfsmount)
+ vfsmount_node = srcu_dereference(vfsmount_node->next,
+ &fsnotify_mark_srcu);
}
- srcu_read_unlock(&fsnotify_grp_srcu, idx);
+
+ srcu_read_unlock(&fsnotify_mark_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
* up while we are still trying to add it to lists, drop that one.
*/
if (event)
fsnotify_put_event(event);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(fsnotify);
static __init int fsnotify_init(void)
{
- return init_srcu_struct(&fsnotify_grp_srcu);
+ int ret;
+
+ BUG_ON(hweight32(ALL_FSNOTIFY_EVENTS) != 23);
+
+ ret = init_srcu_struct(&fsnotify_mark_srcu);
+ if (ret)
+ panic("initializing fsnotify_mark_srcu");
+
+ return 0;
}
-subsys_initcall(fsnotify_init);
+core_initcall(fsnotify_init);
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 4dc240824b2..85e7d2b431d 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -6,21 +6,34 @@
#include <linux/srcu.h>
#include <linux/types.h>
-/* protects reads of fsnotify_groups */
-extern struct srcu_struct fsnotify_grp_srcu;
-/* all groups which receive fsnotify events */
-extern struct list_head fsnotify_groups;
-/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
-extern __u32 fsnotify_mask;
-
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
+/* protects reads of inode and vfsmount marks list */
+extern struct srcu_struct fsnotify_mark_srcu;
+
+extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
+ __u32 mask);
+/* add a mark to an inode */
+extern int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups);
+/* add a mark to a vfsmount */
+extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups);
+
/* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
+/* vfsmount specific destruction of a mark */
+extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
+/* inode specific destruction of a mark */
+extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
+/* run the list of all marks associated with vfsmount and flag them to be freed */
+extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 0e1677144bc..d309f38449c 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -28,64 +28,6 @@
#include <asm/atomic.h>
-/* protects writes to fsnotify_groups and fsnotify_mask */
-static DEFINE_MUTEX(fsnotify_grp_mutex);
-/* protects reads while running the fsnotify_groups list */
-struct srcu_struct fsnotify_grp_srcu;
-/* all groups registered to receive filesystem notifications */
-LIST_HEAD(fsnotify_groups);
-/* bitwise OR of all events (FS_*) interesting to some group on this system */
-__u32 fsnotify_mask;
-
-/*
- * When a new group registers or changes it's set of interesting events
- * this function updates the fsnotify_mask to contain all interesting events
- */
-void fsnotify_recalc_global_mask(void)
-{
- struct fsnotify_group *group;
- __u32 mask = 0;
- int idx;
-
- idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
- mask |= group->mask;
- srcu_read_unlock(&fsnotify_grp_srcu, idx);
- fsnotify_mask = mask;
-}
-
-/*
- * Update the group->mask by running all of the marks associated with this
- * group and finding the bitwise | of all of the mark->mask. If we change
- * the group->mask we need to update the global mask of events interesting
- * to the system.
- */
-void fsnotify_recalc_group_mask(struct fsnotify_group *group)
-{
- __u32 mask = 0;
- __u32 old_mask = group->mask;
- struct fsnotify_mark_entry *entry;
-
- spin_lock(&group->mark_lock);
- list_for_each_entry(entry, &group->mark_entries, g_list)
- mask |= entry->mask;
- spin_unlock(&group->mark_lock);
-
- group->mask = mask;
-
- if (old_mask != mask)
- fsnotify_recalc_global_mask();
-}
-
-/*
- * Take a reference to a group so things found under the fsnotify_grp_mutex
- * can't get freed under us
- */
-static void fsnotify_get_group(struct fsnotify_group *group)
-{
- atomic_inc(&group->refcnt);
-}
-
/*
* Final freeing of a group
*/
@@ -110,145 +52,53 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
- /* clear all inode mark entries for this group */
+ /* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
+ synchronize_srcu(&fsnotify_mark_srcu);
+
/* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group);
}
/*
- * Remove this group from the global list of groups that will get events
- * this can be done even if there are still references and things still using
- * this group. This just stops the group from getting new events.
- */
-static void __fsnotify_evict_group(struct fsnotify_group *group)
-{
- BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
-
- if (group->on_group_list)
- list_del_rcu(&group->group_list);
- group->on_group_list = 0;
-}
-
-/*
- * Called when a group is no longer interested in getting events. This can be
- * used if a group is misbehaving or if for some reason a group should no longer
- * get any filesystem events.
- */
-void fsnotify_evict_group(struct fsnotify_group *group)
-{
- mutex_lock(&fsnotify_grp_mutex);
- __fsnotify_evict_group(group);
- mutex_unlock(&fsnotify_grp_mutex);
-}
-
-/*
* Drop a reference to a group. Free it if it's through.
*/
void fsnotify_put_group(struct fsnotify_group *group)
{
- if (!atomic_dec_and_mutex_lock(&group->refcnt, &fsnotify_grp_mutex))
- return;
-
- /*
- * OK, now we know that there's no other users *and* we hold mutex,
- * so no new references will appear
- */
- __fsnotify_evict_group(group);
-
- /*
- * now it's off the list, so the only thing we might care about is
- * srcu access....
- */
- mutex_unlock(&fsnotify_grp_mutex);
- synchronize_srcu(&fsnotify_grp_srcu);
-
- /* and now it is really dead. _Nothing_ could be seeing it */
- fsnotify_recalc_global_mask();
- fsnotify_destroy_group(group);
-}
-
-/*
- * Simply run the fsnotify_groups list and find a group which matches
- * the given parameters. If a group is found we take a reference to that
- * group.
- */
-static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
-{
- struct fsnotify_group *group_iter;
- struct fsnotify_group *group = NULL;
-
- BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
-
- list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
- if (group_iter->group_num == group_num) {
- if ((group_iter->mask == mask) &&
- (group_iter->ops == ops)) {
- fsnotify_get_group(group_iter);
- group = group_iter;
- } else
- group = ERR_PTR(-EEXIST);
- }
- }
- return group;
+ if (atomic_dec_and_test(&group->refcnt))
+ fsnotify_destroy_group(group);
}
/*
- * Either finds an existing group which matches the group_num, mask, and ops or
- * creates a new group and adds it to the global group list. In either case we
- * take a reference for the group returned.
+ * Create a new fsnotify_group and hold a reference for the group returned.
*/
-struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
+struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
{
- struct fsnotify_group *group, *tgroup;
+ struct fsnotify_group *group;
- /* very low use, simpler locking if we just always alloc */
- group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
+ group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
+ /* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
-
- group->on_group_list = 0;
- group->group_num = group_num;
- group->mask = mask;
+ /*
+ * hits 0 when there are no external references AND no marks for
+ * this group
+ */
+ atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
- group->q_len = 0;
group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock);
- atomic_set(&group->num_marks, 0);
- INIT_LIST_HEAD(&group->mark_entries);
+ INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
- mutex_lock(&fsnotify_grp_mutex);
- tgroup = fsnotify_find_group(group_num, mask, ops);
- if (tgroup) {
- /* group already exists */
- mutex_unlock(&fsnotify_grp_mutex);
- /* destroy the new one we made */
- fsnotify_put_group(group);
- return tgroup;
- }
-
- /* group not found, add a new one */
- list_add_rcu(&group->group_list, &fsnotify_groups);
- group->on_group_list = 1;
- /* being on the fsnotify_groups list holds one num_marks */
- atomic_inc(&group->num_marks);
-
- mutex_unlock(&fsnotify_grp_mutex);
-
- if (mask)
- fsnotify_recalc_global_mask();
-
return group;
}
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 0399bcbe09c..33297c00506 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -16,72 +16,6 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/*
- * fsnotify inode mark locking/lifetime/and refcnting
- *
- * REFCNT:
- * The mark->refcnt tells how many "things" in the kernel currently are
- * referencing this object. The object typically will live inside the kernel
- * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
- * which can find this object holding the appropriete locks, can take a reference
- * and the object itself is guarenteed to survive until the reference is dropped.
- *
- * LOCKING:
- * There are 3 spinlocks involved with fsnotify inode marks and they MUST
- * be taken in order as follows:
- *
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- *
- * entry->lock protects 2 things, entry->group and entry->inode. You must hold
- * that lock to dereference either of these things (they could be NULL even with
- * the lock)
- *
- * group->mark_lock protects the mark_entries list anchored inside a given group
- * and each entry is hooked via the g_list. It also sorta protects the
- * free_g_list, which when used is anchored by a private list on the stack of the
- * task which held the group->mark_lock.
- *
- * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
- * given inode and each entry is hooked via the i_list. (and sorta the
- * free_i_list)
- *
- *
- * LIFETIME:
- * Inode marks survive between when they are added to an inode and when their
- * refcnt==0.
- *
- * The inode mark can be cleared for a number of different reasons including:
- * - The inode is unlinked for the last time. (fsnotify_inode_remove)
- * - The inode is being evicted from cache. (fsnotify_inode_delete)
- * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
- * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
- * - The fsnotify_group associated with the mark is going away and all such marks
- * need to be cleaned up. (fsnotify_clear_marks_by_group)
- *
- * Worst case we are given an inode and need to clean up all the marks on that
- * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
- * mark on the list we take a reference (so the mark can't disappear under us).
- * We remove that mark form the inode's list of marks and we add this mark to a
- * private list anchored on the stack using i_free_list; At this point we no
- * longer fear anything finding the mark using the inode's list of marks.
- *
- * We can safely and locklessly run the private list on the stack of everything
- * we just unattached from the original inode. For each mark on the private list
- * we grab the mark-> and can thus dereference mark->group and mark->inode. If
- * we see the group and inode are not NULL we take those locks. Now holding all
- * 3 locks we can completely remove the mark from other tasks finding it in the
- * future. Remember, 10 things might already be referencing this mark, but they
- * better be holding a ref. We drop our reference we took before we unhooked it
- * from the inode. When the ref hits 0 we can free the mark.
- *
- * Very similarly for freeing by group, except we use free_g_list.
- *
- * This has the very interesting property of being able to run concurrently with
- * any (or all) other directions.
- */
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -95,30 +29,19 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
-void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
-{
- atomic_inc(&entry->refcnt);
-}
-
-void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
-{
- if (atomic_dec_and_test(&entry->refcnt))
- entry->free_mark(entry);
-}
-
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
- new_mask |= entry->mask;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
+ new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask;
}
@@ -135,107 +58,26 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
__fsnotify_update_child_dentry_flags(inode);
}
-/*
- * Any time a mark is getting freed we end up here.
- * The caller had better be holding a reference to this mark so we don't actually
- * do the final put under the entry->lock
- */
-void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
+void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
- struct fsnotify_group *group;
- struct inode *inode;
+ struct inode *inode = mark->i.inode;
- spin_lock(&entry->lock);
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
- group = entry->group;
- inode = entry->inode;
-
- BUG_ON(group && !inode);
- BUG_ON(!group && inode);
-
- /* if !group something else already marked this to die */
- if (!group) {
- spin_unlock(&entry->lock);
- return;
- }
-
- /* 1 from caller and 1 for being on i_list/g_list */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
- hlist_del_init(&entry->i_list);
- entry->inode = NULL;
-
- list_del_init(&entry->g_list);
- entry->group = NULL;
-
- fsnotify_put_mark(entry); /* for i_list and g_list */
+ hlist_del_init_rcu(&mark->i.i_list);
+ mark->i.inode = NULL;
/*
- * this mark is now off the inode->i_fsnotify_mark_entries list and we
+ * this mark is now off the inode->i_fsnotify_marks list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
-
- /*
- * Some groups like to know that marks are being freed. This is a
- * callback to the group function to let it know that this entry
- * is being freed.
- */
- if (group->ops->freeing_mark)
- group->ops->freeing_mark(entry, group);
-
- /*
- * __fsnotify_update_child_dentry_flags(inode);
- *
- * I really want to call that, but we can't, we have no idea if the inode
- * still exists the second we drop the entry->lock.
- *
- * The next time an event arrive to this inode from one of it's children
- * __fsnotify_parent will see that the inode doesn't care about it's
- * children and will update all of these flags then. So really this
- * is just a lazy update (and could be a perf win...)
- */
-
-
- iput(inode);
-
- /*
- * it's possible that this group tried to destroy itself, but this
- * this mark was simultaneously being freed by inode. If that's the
- * case, we finish freeing the group here.
- */
- if (unlikely(atomic_dec_and_test(&group->num_marks)))
- fsnotify_final_destroy_group(group);
-}
-
-/*
- * Given a group, destroy all of the marks associated with that group.
- */
-void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
-{
- struct fsnotify_mark_entry *lentry, *entry;
- LIST_HEAD(free_list);
-
- spin_lock(&group->mark_lock);
- list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
- list_add(&entry->free_g_list, &free_list);
- list_del_init(&entry->g_list);
- fsnotify_get_mark(entry);
- }
- spin_unlock(&group->mark_lock);
-
- list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
- }
}
/*
@@ -243,112 +85,145 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
- struct fsnotify_mark_entry *entry, *lentry;
+ struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
- hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
- list_add(&entry->free_i_list, &free_list);
- hlist_del_init(&entry->i_list);
- fsnotify_get_mark(entry);
+ hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
+ list_add(&mark->i.free_i_list, &free_list);
+ hlist_del_init_rcu(&mark->i.i_list);
+ fsnotify_get_mark(mark);
}
spin_unlock(&inode->i_lock);
- list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
}
}
/*
+ * Given a group clear all of the inode marks associated with that group.
+ */
+void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
+}
+
+/*
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
-struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
- struct inode *inode)
+struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
+ struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
- if (entry->group == group) {
- fsnotify_get_mark(entry);
- return entry;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
}
}
return NULL;
}
/*
- * Nothing fancy, just initialize lists and locks and counters.
+ * given a group and inode, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
*/
-void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
- void (*free_mark)(struct fsnotify_mark_entry *entry))
+struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
+ struct inode *inode)
+{
+ struct fsnotify_mark *mark;
+
+ spin_lock(&inode->i_lock);
+ mark = fsnotify_find_inode_mark_locked(group, inode);
+ spin_unlock(&inode->i_lock);
+ return mark;
+}
+
+/*
+ * If we are setting a mark mask on an inode mark we should pin the inode
+ * in memory.
+ */
+void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
+ __u32 mask)
{
- spin_lock_init(&entry->lock);
- atomic_set(&entry->refcnt, 1);
- INIT_HLIST_NODE(&entry->i_list);
- entry->group = NULL;
- entry->mask = 0;
- entry->inode = NULL;
- entry->free_mark = free_mark;
+ struct inode *inode;
+
+ assert_spin_locked(&mark->lock);
+
+ if (mask &&
+ mark->i.inode &&
+ !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
+ mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
+ inode = igrab(mark->i.inode);
+ /*
+ * we shouldn't be able to get here if the inode wasn't
+ * already safely held in memory. But bug in case it
+ * ever is wrong.
+ */
+ BUG_ON(!inode);
+ }
}
/*
- * Attach an initialized mark entry to a given group and inode.
+ * Attach an initialized mark to a given inode.
* These marks may be used for the fsnotify backend to determine which
- * event types should be delivered to which group and for which inodes.
+ * event types should be delivered to which group and for which inodes. These
+ * marks are ordered according to the group's location in memory.
*/
-int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
- struct fsnotify_group *group, struct inode *inode)
+int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups)
{
- struct fsnotify_mark_entry *lentry;
+ struct fsnotify_mark *lmark;
+ struct hlist_node *node, *last = NULL;
int ret = 0;
- inode = igrab(inode);
- if (unlikely(!inode))
- return -EINVAL;
+ mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
+
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
- /*
- * LOCKING ORDER!!!!
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- */
- spin_lock(&entry->lock);
- spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
- lentry = fsnotify_find_mark_entry(group, inode);
- if (!lentry) {
- entry->group = group;
- entry->inode = inode;
+ mark->i.inode = inode;
- hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
- list_add(&entry->g_list, &group->mark_entries);
+ /* is mark the first mark? */
+ if (hlist_empty(&inode->i_fsnotify_marks)) {
+ hlist_add_head_rcu(&mark->i.i_list, &inode->i_fsnotify_marks);
+ goto out;
+ }
- fsnotify_get_mark(entry); /* for i_list and g_list */
+ /* should mark be in the middle of the current list? */
+ hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) {
+ last = node;
+
+ if ((lmark->group == group) && !allow_dups) {
+ ret = -EEXIST;
+ goto out;
+ }
- atomic_inc(&group->num_marks);
+ if (mark->group < lmark->group)
+ continue;
- fsnotify_recalc_inode_mask_locked(inode);
+ hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
+ goto out;
}
+ BUG_ON(last == NULL);
+ /* mark should be the last entry. last is the current last entry */
+ hlist_add_after_rcu(last, &mark->i.i_list);
+out:
+ fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
-
- if (lentry) {
- ret = -EEXIST;
- iput(inode);
- fsnotify_put_mark(lentry);
- } else {
- __fsnotify_update_child_dentry_flags(inode);
- }
return ret;
}
@@ -369,11 +244,11 @@ void fsnotify_unmount_inodes(struct list_head *list)
struct inode *need_iput_tmp;
/*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
+ * We cannot __iget() an inode in state I_FREEING,
* I_WILL_FREE, or I_NEW which is fine because by that point
* the inode cannot have any associated watches.
*/
- if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
/*
@@ -397,7 +272,7 @@ void fsnotify_unmount_inodes(struct list_head *list)
/* In case the dropping of a reference would nuke next_i. */
if ((&next_i->i_sb_list != list) &&
atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) {
+ !(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
__iget(next_i);
need_iput = next_i;
}
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index b3a159b21cf..b981fc0c837 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -1,18 +1,3 @@
-config INOTIFY
- bool "Inotify file change notification support"
- default n
- ---help---
- Say Y here to enable legacy in kernel inotify support. Inotify is a
- file change notification system. It is a replacement for dnotify.
- This option only provides the legacy inotify in kernel API. There
- are no in tree kernel users of this interface since it is deprecated.
- You only need this if you are loading an out of tree kernel module
- that uses inotify.
-
- For more information, see <file:Documentation/filesystems/inotify.txt>
-
- If unsure, say N.
-
config INOTIFY_USER
bool "Inotify support for userspace"
select ANON_INODES
diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile
index 94382817136..a380dabe09d 100644
--- a/fs/notify/inotify/Makefile
+++ b/fs/notify/inotify/Makefile
@@ -1,2 +1 @@
-obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
deleted file mode 100644
index 27b75ebc746..00000000000
--- a/fs/notify/inotify/inotify.c
+++ /dev/null
@@ -1,873 +0,0 @@
-/*
- * fs/inotify.c - inode-based file event notifications
- *
- * Authors:
- * John McCutchan <ttb@tentacle.dhs.org>
- * Robert Love <rml@novell.com>
- *
- * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
- *
- * Copyright (C) 2005 John McCutchan
- * Copyright 2006 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/writeback.h>
-#include <linux/inotify.h>
-#include <linux/fsnotify_backend.h>
-
-static atomic_t inotify_cookie;
-
-/*
- * Lock ordering:
- *
- * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
- * iprune_mutex (synchronize shrink_icache_memory())
- * inode_lock (protects the super_block->s_inodes list)
- * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
- * inotify_handle->mutex (protects inotify_handle and watches->h_list)
- *
- * The inode->inotify_mutex and inotify_handle->mutex and held during execution
- * of a caller's event handler. Thus, the caller must not hold any locks
- * taken in their event handler while calling any of the published inotify
- * interfaces.
- */
-
-/*
- * Lifetimes of the three main data structures--inotify_handle, inode, and
- * inotify_watch--are managed by reference count.
- *
- * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
- * Additional references can bump the count via get_inotify_handle() and drop
- * the count via put_inotify_handle().
- *
- * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
- * to remove_watch_no_event(). Additional references can bump the count via
- * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
- * is reponsible for the final put after receiving IN_IGNORED, or when using
- * IN_ONESHOT after receiving the first event. Inotify does the final put if
- * inotify_destroy() is called.
- *
- * inode: Pinned so long as the inode is associated with a watch, from
- * inotify_add_watch() to the final put_inotify_watch().
- */
-
-/*
- * struct inotify_handle - represents an inotify instance
- *
- * This structure is protected by the mutex 'mutex'.
- */
-struct inotify_handle {
- struct idr idr; /* idr mapping wd -> watch */
- struct mutex mutex; /* protects this bad boy */
- struct list_head watches; /* list of watches */
- atomic_t count; /* reference count */
- u32 last_wd; /* the last wd allocated */
- const struct inotify_operations *in_ops; /* inotify caller operations */
-};
-
-static inline void get_inotify_handle(struct inotify_handle *ih)
-{
- atomic_inc(&ih->count);
-}
-
-static inline void put_inotify_handle(struct inotify_handle *ih)
-{
- if (atomic_dec_and_test(&ih->count)) {
- idr_destroy(&ih->idr);
- kfree(ih);
- }
-}
-
-/**
- * get_inotify_watch - grab a reference to an inotify_watch
- * @watch: watch to grab
- */
-void get_inotify_watch(struct inotify_watch *watch)
-{
- atomic_inc(&watch->count);
-}
-EXPORT_SYMBOL_GPL(get_inotify_watch);
-
-int pin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- if (atomic_inc_not_zero(&sb->s_active)) {
- atomic_inc(&watch->count);
- return 1;
- }
- return 0;
-}
-
-/**
- * put_inotify_watch - decrements the ref count on a given watch. cleans up
- * watch references if the count reaches zero. inotify_watch is freed by
- * inotify callers via the destroy_watch() op.
- * @watch: watch to release
- */
-void put_inotify_watch(struct inotify_watch *watch)
-{
- if (atomic_dec_and_test(&watch->count)) {
- struct inotify_handle *ih = watch->ih;
-
- iput(watch->inode);
- ih->in_ops->destroy_watch(watch);
- put_inotify_handle(ih);
- }
-}
-EXPORT_SYMBOL_GPL(put_inotify_watch);
-
-void unpin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- deactivate_super(sb);
-}
-
-/*
- * inotify_handle_get_wd - returns the next WD for use by the given handle
- *
- * Callers must hold ih->mutex. This function can sleep.
- */
-static int inotify_handle_get_wd(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- int ret;
-
- do {
- if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
- return -ENOSPC;
- ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
- } while (ret == -EAGAIN);
-
- if (likely(!ret))
- ih->last_wd = watch->wd;
-
- return ret;
-}
-
-/*
- * inotify_inode_watched - returns nonzero if there are watches on this inode
- * and zero otherwise. We call this lockless, we do not care if we race.
- */
-static inline int inotify_inode_watched(struct inode *inode)
-{
- return !list_empty(&inode->inotify_watches);
-}
-
-/*
- * Get child dentry flag into synch with parent inode.
- * Flag should always be clear for negative dentrys.
- */
-static void set_dentry_child_flags(struct inode *inode, int watched)
-{
- struct dentry *alias;
-
- spin_lock(&dcache_lock);
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
- struct dentry *child;
-
- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
- if (!child->d_inode)
- continue;
-
- spin_lock(&child->d_lock);
- if (watched)
- child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&child->d_lock);
- }
- }
- spin_unlock(&dcache_lock);
-}
-
-/*
- * inotify_find_handle - find the watch associated with the given inode and
- * handle
- *
- * Callers must hold inode->inotify_mutex.
- */
-static struct inotify_watch *inode_find_handle(struct inode *inode,
- struct inotify_handle *ih)
-{
- struct inotify_watch *watch;
-
- list_for_each_entry(watch, &inode->inotify_watches, i_list) {
- if (watch->ih == ih)
- return watch;
- }
-
- return NULL;
-}
-
-/*
- * remove_watch_no_event - remove watch without the IN_IGNORED event.
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-static void remove_watch_no_event(struct inotify_watch *watch,
- struct inotify_handle *ih)
-{
- list_del(&watch->i_list);
- list_del(&watch->h_list);
-
- if (!inotify_inode_watched(watch->inode))
- set_dentry_child_flags(watch->inode, 0);
-
- idr_remove(&ih->idr, watch->wd);
-}
-
-/**
- * inotify_remove_watch_locked - Remove a watch from both the handle and the
- * inode. Sends the IN_IGNORED event signifying that the inode is no longer
- * watched. May be invoked from a caller's event handler.
- * @ih: inotify handle associated with watch
- * @watch: watch to remove
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
-
-/* Kernel API for producing events */
-
-/*
- * inotify_d_instantiate - instantiate dcache entry for inode
- */
-void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
-{
- struct dentry *parent;
-
- if (!inode)
- return;
-
- spin_lock(&entry->d_lock);
- parent = entry->d_parent;
- if (parent->d_inode && inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&entry->d_lock);
-}
-
-/*
- * inotify_d_move - dcache entry has been moved
- */
-void inotify_d_move(struct dentry *entry)
-{
- struct dentry *parent;
-
- parent = entry->d_parent;
- if (inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
-}
-
-/**
- * inotify_inode_queue_event - queue an event to all watches on this inode
- * @inode: inode event is originating from
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- * @n_inode: inode associated with name
- */
-void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
- const char *name, struct inode *n_inode)
-{
- struct inotify_watch *watch, *next;
-
- if (!inotify_inode_watched(inode))
- return;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- u32 watch_mask = watch->mask;
- if (watch_mask & mask) {
- struct inotify_handle *ih= watch->ih;
- mutex_lock(&ih->mutex);
- if (watch_mask & IN_ONESHOT)
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
- name, n_inode);
- mutex_unlock(&ih->mutex);
- }
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
-
-/**
- * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
- * @dentry: the dentry in question, we queue against this dentry's parent
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- */
-void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
- u32 cookie, const char *name)
-{
- struct dentry *parent;
- struct inode *inode;
-
- if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
- return;
-
- spin_lock(&dentry->d_lock);
- parent = dentry->d_parent;
- inode = parent->d_inode;
-
- if (inotify_inode_watched(inode)) {
- dget(parent);
- spin_unlock(&dentry->d_lock);
- inotify_inode_queue_event(inode, mask, cookie, name,
- dentry->d_inode);
- dput(parent);
- } else
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
-
-/**
- * inotify_get_cookie - return a unique cookie for use in synchronizing events.
- */
-u32 inotify_get_cookie(void)
-{
- return atomic_inc_return(&inotify_cookie);
-}
-EXPORT_SYMBOL_GPL(inotify_get_cookie);
-
-/**
- * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
- * @list: list of inodes being unmounted (sb->s_inodes)
- *
- * Called with inode_lock held, protecting the unmounting super block's list
- * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
- * We temporarily drop inode_lock, however, and CAN block.
- */
-void inotify_unmount_inodes(struct list_head *list)
-{
- struct inode *inode, *next_i, *need_iput = NULL;
-
- list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
- struct inotify_watch *watch, *next_w;
- struct inode *need_iput_tmp;
- struct list_head *watches;
-
- /*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
- * I_WILL_FREE, or I_NEW which is fine because by that point
- * the inode cannot have any associated watches.
- */
- if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
- continue;
-
- /*
- * If i_count is zero, the inode cannot have any watches and
- * doing an __iget/iput with MS_ACTIVE clear would actually
- * evict all inodes with zero i_count from icache which is
- * unnecessarily violent and may in fact be illegal to do.
- */
- if (!atomic_read(&inode->i_count))
- continue;
-
- need_iput_tmp = need_iput;
- need_iput = NULL;
- /* In case inotify_remove_watch_locked() drops a reference. */
- if (inode != need_iput_tmp)
- __iget(inode);
- else
- need_iput_tmp = NULL;
- /* In case the dropping of a reference would nuke next_i. */
- if ((&next_i->i_sb_list != list) &&
- atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_CLEAR | I_FREEING |
- I_WILL_FREE))) {
- __iget(next_i);
- need_iput = next_i;
- }
-
- /*
- * We can safely drop inode_lock here because we hold
- * references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
- */
- spin_unlock(&inode_lock);
-
- if (need_iput_tmp)
- iput(need_iput_tmp);
-
- /* for each watch, send IN_UNMOUNT and then remove it */
- mutex_lock(&inode->inotify_mutex);
- watches = &inode->inotify_watches;
- list_for_each_entry_safe(watch, next_w, watches, i_list) {
- struct inotify_handle *ih= watch->ih;
- get_inotify_watch(watch);
- mutex_lock(&ih->mutex);
- ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
- NULL, NULL);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- put_inotify_watch(watch);
- }
- mutex_unlock(&inode->inotify_mutex);
- iput(inode);
-
- spin_lock(&inode_lock);
- }
-}
-EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
-
-/**
- * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
- * @inode: inode that is about to be removed
- */
-void inotify_inode_is_dead(struct inode *inode)
-{
- struct inotify_watch *watch, *next;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- struct inotify_handle *ih = watch->ih;
- mutex_lock(&ih->mutex);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
-
-/* Kernel Consumer API */
-
-/**
- * inotify_init - allocate and initialize an inotify instance
- * @ops: caller's inotify operations
- */
-struct inotify_handle *inotify_init(const struct inotify_operations *ops)
-{
- struct inotify_handle *ih;
-
- ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
- if (unlikely(!ih))
- return ERR_PTR(-ENOMEM);
-
- idr_init(&ih->idr);
- INIT_LIST_HEAD(&ih->watches);
- mutex_init(&ih->mutex);
- ih->last_wd = 0;
- ih->in_ops = ops;
- atomic_set(&ih->count, 0);
- get_inotify_handle(ih);
-
- return ih;
-}
-EXPORT_SYMBOL_GPL(inotify_init);
-
-/**
- * inotify_init_watch - initialize an inotify watch
- * @watch: watch to initialize
- */
-void inotify_init_watch(struct inotify_watch *watch)
-{
- INIT_LIST_HEAD(&watch->h_list);
- INIT_LIST_HEAD(&watch->i_list);
- atomic_set(&watch->count, 0);
- get_inotify_watch(watch); /* initial get */
-}
-EXPORT_SYMBOL_GPL(inotify_init_watch);
-
-/*
- * Watch removals suck violently. To kick the watch out we need (in this
- * order) inode->inotify_mutex and ih->mutex. That's fine if we have
- * a hold on inode; however, for all other cases we need to make damn sure
- * we don't race with umount. We can *NOT* just grab a reference to a
- * watch - inotify_unmount_inodes() will happily sail past it and we'll end
- * with reference to inode potentially outliving its superblock. Ideally
- * we just want to grab an active reference to superblock if we can; that
- * will make sure we won't go into inotify_umount_inodes() until we are
- * done. Cleanup is just deactivate_super(). However, that leaves a messy
- * case - what if we *are* racing with umount() and active references to
- * superblock can't be acquired anymore? We can bump ->s_count, grab
- * ->s_umount, which will wait until the superblock is shut down and the
- * watch in question is pining for fjords.
- *
- * And yes, this is far beyond mere "not very pretty"; so's the entire
- * concept of inotify to start with.
- */
-
-/**
- * pin_to_kill - pin the watch down for removal
- * @ih: inotify handle
- * @watch: watch to kill
- *
- * Called with ih->mutex held, drops it. Possible return values:
- * 0 - nothing to do, it has died
- * 1 - remove it, drop the reference and deactivate_super()
- */
-static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
-
- if (atomic_inc_not_zero(&sb->s_active)) {
- get_inotify_watch(watch);
- mutex_unlock(&ih->mutex);
- return 1; /* the best outcome */
- }
- spin_lock(&sb_lock);
- sb->s_count++;
- spin_unlock(&sb_lock);
- mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
- down_read(&sb->s_umount);
- /* fs is already shut down; the watch is dead */
- drop_super(sb);
- return 0;
-}
-
-static void unpin_and_kill(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- deactivate_super(sb);
-}
-
-/**
- * inotify_destroy - clean up and destroy an inotify instance
- * @ih: inotify handle
- */
-void inotify_destroy(struct inotify_handle *ih)
-{
- /*
- * Destroy all of the watches for this handle. Unfortunately, not very
- * pretty. We cannot do a simple iteration over the list, because we
- * do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_mutex before ih->mutex. The following works.
- *
- * AV: it had to become even uglier to start working ;-/
- */
- while (1) {
- struct inotify_watch *watch;
- struct list_head *watches;
- struct super_block *sb;
- struct inode *inode;
-
- mutex_lock(&ih->mutex);
- watches = &ih->watches;
- if (list_empty(watches)) {
- mutex_unlock(&ih->mutex);
- break;
- }
- watch = list_first_entry(watches, struct inotify_watch, h_list);
- sb = watch->inode->i_sb;
- if (!pin_to_kill(ih, watch))
- continue;
-
- inode = watch->inode;
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure we didn't race with another list removal */
- if (likely(idr_find(&ih->idr, watch->wd))) {
- remove_watch_no_event(watch, ih);
- put_inotify_watch(watch);
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch);
- }
-
- /* free this handle: the put matching the get in inotify_init() */
- put_inotify_handle(ih);
-}
-EXPORT_SYMBOL_GPL(inotify_destroy);
-
-/**
- * inotify_find_watch - find an existing watch for an (ih,inode) pair
- * @ih: inotify handle
- * @inode: inode to watch
- * @watchp: pointer to existing inotify_watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp)
-{
- struct inotify_watch *old;
- int ret = -ENOENT;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- old = inode_find_handle(inode, ih);
- if (unlikely(old)) {
- get_inotify_watch(old); /* caller must put watch */
- *watchp = old;
- ret = old->wd;
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_watch);
-
-/**
- * inotify_find_update_watch - find and update the mask of an existing watch
- * @ih: inotify handle
- * @inode: inode's watch to update
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
- u32 mask)
-{
- struct inotify_watch *old;
- int mask_add = 0;
- int ret;
-
- if (mask & IN_MASK_ADD)
- mask_add = 1;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /*
- * Handle the case of re-adding a watch on an (inode,ih) pair that we
- * are already watching. We just update the mask and return its wd.
- */
- old = inode_find_handle(inode, ih);
- if (unlikely(!old)) {
- ret = -ENOENT;
- goto out;
- }
-
- if (mask_add)
- old->mask |= mask;
- else
- old->mask = mask;
- ret = old->wd;
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_update_watch);
-
-/**
- * inotify_add_watch - add a watch to an inotify instance
- * @ih: inotify handle
- * @watch: caller allocated watch structure
- * @inode: inode to watch
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- * Caller must ensure it only calls inotify_add_watch() once per watch.
- * Calls inotify_handle_get_wd() so may sleep.
- */
-s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
- struct inode *inode, u32 mask)
-{
- int ret = 0;
- int newly_watched;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
- watch->mask = mask;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, watch);
- if (unlikely(ret))
- goto out;
- ret = watch->wd;
-
- /* save a reference to handle and bump the count to make it official */
- get_inotify_handle(ih);
- watch->ih = ih;
-
- /*
- * Save a reference to the inode and bump the ref count to make it
- * official. We hold a reference to nameidata, which makes this safe.
- */
- watch->inode = igrab(inode);
-
- /* Add the watch to the handle's and the inode's list */
- newly_watched = !inotify_inode_watched(inode);
- list_add(&watch->h_list, &ih->watches);
- list_add(&watch->i_list, &inode->inotify_watches);
- /*
- * Set child flags _after_ adding the watch, so there is no race
- * windows where newly instantiated children could miss their parent's
- * watched flag.
- */
- if (newly_watched)
- set_dentry_child_flags(inode, 1);
-
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_add_watch);
-
-/**
- * inotify_clone_watch - put the watch next to existing one
- * @old: already installed watch
- * @new: new watch
- *
- * Caller must hold the inotify_mutex of inode we are dealing with;
- * it is expected to remove the old watch before unlocking the inode.
- */
-s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
-{
- struct inotify_handle *ih = old->ih;
- int ret = 0;
-
- new->mask = old->mask;
- new->ih = ih;
-
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, new);
- if (unlikely(ret))
- goto out;
- ret = new->wd;
-
- get_inotify_handle(ih);
-
- new->inode = igrab(old->inode);
-
- list_add(&new->h_list, &ih->watches);
- list_add(&new->i_list, &old->inode->inotify_watches);
-out:
- mutex_unlock(&ih->mutex);
- return ret;
-}
-
-void inotify_evict_watch(struct inotify_watch *watch)
-{
- get_inotify_watch(watch);
- mutex_lock(&watch->ih->mutex);
- inotify_remove_watch_locked(watch->ih, watch);
- mutex_unlock(&watch->ih->mutex);
-}
-
-/**
- * inotify_rm_wd - remove a watch from an inotify instance
- * @ih: inotify handle
- * @wd: watch descriptor to remove
- *
- * Can sleep.
- */
-int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
-{
- struct inotify_watch *watch;
- struct super_block *sb;
- struct inode *inode;
-
- mutex_lock(&ih->mutex);
- watch = idr_find(&ih->idr, wd);
- if (unlikely(!watch)) {
- mutex_unlock(&ih->mutex);
- return -EINVAL;
- }
- sb = watch->inode->i_sb;
- if (!pin_to_kill(ih, watch))
- return 0;
-
- inode = watch->inode;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure that we did not race */
- if (likely(idr_find(&ih->idr, wd) == watch))
- inotify_remove_watch_locked(ih, watch);
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(inotify_rm_wd);
-
-/**
- * inotify_rm_watch - remove a watch from an inotify instance
- * @ih: inotify handle
- * @watch: watch to remove
- *
- * Can sleep.
- */
-int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- return inotify_rm_wd(ih, watch->wd);
-}
-EXPORT_SYMBOL_GPL(inotify_rm_watch);
-
-/*
- * inotify_setup - core initialization function
- */
-static int __init inotify_setup(void)
-{
- BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
- BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
- BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
- BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
- BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
- BUILD_BUG_ON(IN_OPEN != FS_OPEN);
- BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
- BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
- BUILD_BUG_ON(IN_CREATE != FS_CREATE);
- BUILD_BUG_ON(IN_DELETE != FS_DELETE);
- BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
- BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
- BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
-
- BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
- BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
- BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
- BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
-
- atomic_set(&inotify_cookie, 0);
-
- return 0;
-}
-
-module_init(inotify_setup);
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index f234f3a4c8c..b6642e4de4b 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -9,13 +9,12 @@ struct inotify_event_private_data {
int wd;
};
-struct inotify_inode_mark_entry {
- /* fsnotify_mark_entry MUST be the first thing */
- struct fsnotify_mark_entry fsn_entry;
+struct inotify_inode_mark {
+ struct fsnotify_mark fsn_mark;
int wd;
};
-extern void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index e27960cd76a..a91b69a6a29 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -22,6 +22,7 @@
* General Public License for more details.
*/
+#include <linux/dcache.h> /* d_unlinked */
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/inotify.h>
@@ -32,26 +33,84 @@
#include "inotify.h"
-static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+/*
+ * Check if 2 events contain the same information. We do not compare private data
+ * but at this moment that isn't a problem for any know fsnotify listeners.
+ */
+static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ if ((old->mask == new->mask) &&
+ (old->to_tell == new->to_tell) &&
+ (old->data_type == new->data_type) &&
+ (old->name_len == new->name_len)) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_INODE):
+ /* remember, after old was put on the wait_q we aren't
+ * allowed to look at the inode any more, only thing
+ * left to check was if the file_name is the same */
+ if (!old->name_len ||
+ !strcmp(old->file_name, new->file_name))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_NONE):
+ if (old->mask & FS_Q_OVERFLOW)
+ return true;
+ else if (old->mask & FS_IN_IGNORED)
+ return false;
+ return true;
+ };
+ }
+ return false;
+}
+
+static struct fsnotify_event *inotify_merge(struct list_head *list,
+ struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_event_holder *last_holder;
+ struct fsnotify_event *last_event;
+
+ /* and the list better be locked by something too */
+ spin_lock(&event->lock);
+
+ last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
+ last_event = last_holder->event;
+ if (event_compare(last_event, event))
+ fsnotify_get_event(last_event);
+ else
+ last_event = NULL;
+
+ spin_unlock(&event->lock);
+
+ return last_event;
+}
+
+static int inotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ struct fsnotify_event *event)
+{
+ struct inotify_inode_mark *i_mark;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
- int wd, ret;
+ struct fsnotify_event *added_event;
+ int wd, ret = 0;
+
+ BUG_ON(vfsmount_mark);
+
+ pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group,
+ event, event->to_tell, event->mask);
to_tell = event->to_tell;
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
- /* race with watch removal? We already passes should_send */
- if (unlikely(!entry))
- return 0;
- ientry = container_of(entry, struct inotify_inode_mark_entry,
- fsn_entry);
- wd = ientry->wd;
+ i_mark = container_of(inode_mark, struct inotify_inode_mark,
+ fsn_mark);
+ wd = i_mark->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
@@ -62,48 +121,40 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group;
event_priv->wd = wd;
- ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
- if (ret) {
+ added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge);
+ if (added_event) {
inotify_free_event_priv(fsn_event_priv);
- /* EEXIST says we tail matched, EOVERFLOW isn't something
- * to report up the stack. */
- if ((ret == -EEXIST) ||
- (ret == -EOVERFLOW))
- ret = 0;
+ if (!IS_ERR(added_event))
+ fsnotify_put_event(added_event);
+ else
+ ret = PTR_ERR(added_event);
}
- /*
- * If we hold the entry until after the event is on the queue
- * IN_IGNORED won't be able to pass this event in the queue
- */
- fsnotify_put_mark(entry);
+ if (inode_mark->mask & IN_ONESHOT)
+ fsnotify_destroy_mark(inode_mark);
return ret;
}
-static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
+static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
{
- inotify_ignored_and_remove_idr(entry, group);
+ inotify_ignored_and_remove_idr(fsn_mark, group);
}
-static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
+static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ __u32 mask, void *data, int data_type)
{
- struct fsnotify_mark_entry *entry;
- bool send;
-
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
- return false;
+ if ((inode_mark->mask & FS_EXCL_UNLINK) &&
+ (data_type == FSNOTIFY_EVENT_PATH)) {
+ struct path *path = data;
- mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (entry->mask & mask);
-
- /* find took a reference */
- fsnotify_put_mark(entry);
+ if (d_unlinked(path->dentry))
+ return false;
+ }
- return send;
+ return true;
}
/*
@@ -115,18 +166,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
*/
static int idr_callback(int id, void *p, void *data)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
static bool warned = false;
if (warned)
return 0;
warned = true;
- entry = p;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ fsn_mark = p;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
+ WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
@@ -135,9 +186,9 @@ static int idr_callback(int id, void *p, void *data)
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
- if (entry)
- printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
- entry->group, entry->inode, ientry->wd);
+ if (fsn_mark)
+ printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
+ fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
return 0;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e46ca685b9b..bf7f6d776c3 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -46,17 +46,11 @@
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
-int inotify_max_user_watches __read_mostly;
+static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
-/*
- * When inotify registers a new group it increments this and uses that
- * value as an offset to set the fsnotify group "name" and priority.
- */
-static atomic_t inotify_grp_num;
-
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -96,11 +90,14 @@ static inline __u32 inotify_arg_to_mask(u32 arg)
{
__u32 mask;
- /* everything should accept their own ignored and cares about children */
- mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
+ /*
+ * everything should accept their own ignored, cares about children,
+ * and should receive events when the inode is unmounted
+ */
+ mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
- mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
+ mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask;
}
@@ -144,6 +141,8 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
event = fsnotify_peek_notify_event(group);
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
if (event->name_len)
event_size += roundup(event->name_len + 1, event_size);
@@ -173,6 +172,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0;
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
@@ -245,6 +246,8 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
+ pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
+
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
@@ -289,6 +292,8 @@ static int inotify_release(struct inode *ignored, struct file *file)
struct fsnotify_group *group = file->private_data;
struct user_struct *user = group->inotify_data.user;
+ pr_debug("%s: group=%p\n", __func__, group);
+
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
@@ -312,6 +317,8 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
group = file->private_data;
p = (void __user *) arg;
+ pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
+
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
@@ -357,59 +364,159 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
+static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
+ int *last_wd,
+ struct inotify_inode_mark *i_mark)
+{
+ int ret;
+
+ do {
+ if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
+ return -ENOMEM;
+
+ spin_lock(idr_lock);
+ ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
+ &i_mark->wd);
+ /* we added the mark to the idr, take a reference */
+ if (!ret) {
+ *last_wd = i_mark->wd;
+ fsnotify_get_mark(&i_mark->fsn_mark);
+ }
+ spin_unlock(idr_lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
+ int wd)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *i_mark;
+
+ assert_spin_locked(idr_lock);
+
+ i_mark = idr_find(idr, wd);
+ if (i_mark) {
+ struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
+
+ fsnotify_get_mark(fsn_mark);
+ /* One ref for being in the idr, one ref we just took */
+ BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+ }
+
+ return i_mark;
+}
+
+static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
+ int wd)
+{
+ struct inotify_inode_mark *i_mark;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+ spin_lock(idr_lock);
+ i_mark = inotify_idr_find_locked(group, wd);
+ spin_unlock(idr_lock);
+
+ return i_mark;
+}
+
+static void do_inotify_remove_from_idr(struct fsnotify_group *group,
+ struct inotify_inode_mark *i_mark)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ int wd = i_mark->wd;
+
+ assert_spin_locked(idr_lock);
+
+ idr_remove(idr, wd);
+
+ /* removed from the idr, drop that ref */
+ fsnotify_put_mark(&i_mark->fsn_mark);
+}
+
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
- struct inotify_inode_mark_entry *ientry)
+ struct inotify_inode_mark *i_mark)
{
- struct idr *idr;
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *found_ientry;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *found_i_mark = NULL;
int wd;
- spin_lock(&group->inotify_data.idr_lock);
- idr = &group->inotify_data.idr;
- wd = ientry->wd;
+ spin_lock(idr_lock);
+ wd = i_mark->wd;
- if (wd == -1)
+ /*
+ * does this i_mark think it is in the idr? we shouldn't get called
+ * if it wasn't....
+ */
+ if (wd == -1) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry))
+ /* Lets look in the idr to see if we find it */
+ found_i_mark = inotify_idr_find_locked(group, wd);
+ if (unlikely(!found_i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
- if (unlikely(found_ientry != ientry)) {
- /* We found an entry in the idr with the right wd, but it's
- * not the entry we were told to remove. eparis seriously
- * fucked up somewhere. */
- WARN_ON(1);
- ientry->wd = -1;
+ /*
+ * We found an mark in the idr at the right wd, but it's
+ * not the mark we were told to remove. eparis seriously
+ * fucked up somewhere.
+ */
+ if (unlikely(found_i_mark != i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
+ "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
+ "found_i_mark->group=%p found_i_mark->inode=%p\n",
+ __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
+ i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
+ found_i_mark->fsn_mark.group,
+ found_i_mark->fsn_mark.i.inode);
goto out;
}
- /* One ref for being in the idr, one ref held by the caller */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- idr_remove(idr, wd);
- ientry->wd = -1;
+ /*
+ * One ref for being in the idr
+ * one ref held by the caller trying to kill us
+ * one ref grabbed by inotify_idr_find
+ */
+ if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
+ printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ /* we can't really recover with bad ref cnting.. */
+ BUG();
+ }
- /* removed from the idr, drop that ref */
- fsnotify_put_mark(entry);
+ do_inotify_remove_from_idr(group, i_mark);
out:
- spin_unlock(&group->inotify_data.idr_lock);
+ /* match the ref taken by inotify_idr_find_locked() */
+ if (found_i_mark)
+ fsnotify_put_mark(&found_i_mark->fsn_mark);
+ i_mark->wd = -1;
+ spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
-void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
- struct inotify_inode_mark_entry *ientry;
- struct fsnotify_event *ignored_event;
+ struct inotify_inode_mark *i_mark;
+ struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
@@ -420,7 +527,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
if (!ignored_event)
return;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
@@ -429,37 +536,44 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
- event_priv->wd = ientry->wd;
-
- ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
- if (ret)
+ event_priv->wd = i_mark->wd;
+
+ notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
+ if (notify_event) {
+ if (IS_ERR(notify_event))
+ ret = PTR_ERR(notify_event);
+ else
+ fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
+ }
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
- /* remove this entry from the idr */
- inotify_remove_from_idr(group, ientry);
+ /* remove this mark from the idr */
+ inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
-static void inotify_free_mark(struct fsnotify_mark_entry *entry)
+static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
+ struct inotify_inode_mark *i_mark;
+
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- kmem_cache_free(inotify_inode_mark_cachep, ientry);
+ kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
@@ -467,52 +581,43 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
- if (unlikely(!mask))
+ if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return -ENOENT;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- spin_lock(&entry->lock);
+ spin_lock(&fsn_mark->lock);
- old_mask = entry->mask;
- if (add) {
- entry->mask |= mask;
- new_mask = entry->mask;
- } else {
- entry->mask = mask;
- new_mask = entry->mask;
- }
+ old_mask = fsn_mark->mask;
+ if (add)
+ fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
+ else
+ fsnotify_set_mark_mask_locked(fsn_mark, mask);
+ new_mask = fsn_mark->mask;
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
- /* more bits in this entry than the inode's mask? */
+ /* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
- /* more bits in this entry than the group? */
- int do_group = (new_mask & ~group->mask);
- /* update the inode with this new entry */
+ /* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
- /* update the group mask with the new mask */
- if (dropped || do_group)
- fsnotify_recalc_group_mask(group);
}
/* return the wd */
- ret = ientry->wd;
+ ret = i_mark->wd;
- /* match the get from fsnotify_find_mark_entry() */
- fsnotify_put_mark(entry);
+ /* match the get from fsnotify_find_mark() */
+ fsnotify_put_mark(fsn_mark);
return ret;
}
@@ -521,73 +626,51 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct inotify_inode_mark_entry *tmp_ientry;
+ struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
- if (unlikely(!mask))
+ if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
- tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
- if (unlikely(!tmp_ientry))
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
return -ENOMEM;
- fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
- tmp_ientry->fsn_entry.mask = mask;
- tmp_ientry->wd = -1;
+ fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
+ tmp_i_mark->fsn_mark.mask = mask;
+ tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
-retry:
- ret = -ENOMEM;
- if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
- goto out_err;
- /* we are putting the mark on the idr, take a reference */
- fsnotify_get_mark(&tmp_ientry->fsn_entry);
-
- spin_lock(&group->inotify_data.idr_lock);
- ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd+1,
- &tmp_ientry->wd);
- spin_unlock(&group->inotify_data.idr_lock);
- if (ret) {
- /* we didn't get on the idr, drop the idr reference */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
-
- /* idr was out of memory allocate and try again */
- if (ret == -EAGAIN)
- goto retry;
+ ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
+ tmp_i_mark);
+ if (ret)
goto out_err;
- }
/* we are on the idr, now get on the inode */
- ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
+ ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
- inotify_remove_from_idr(group, tmp_ientry);
+ inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
- /* update the idr hint, who cares about races, it's just a hint */
- group->inotify_data.last_wd = tmp_ientry->wd;
-
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
- /* return the watch descriptor for this new entry */
- ret = tmp_ientry->wd;
-
- /* if this mark added a new event update the group mask */
- if (mask & ~group->mask)
- fsnotify_recalc_group_mask(group);
+ /* return the watch descriptor for this new mark */
+ ret = tmp_i_mark->wd;
out_err:
- /* match the ref from fsnotify_init_markentry() */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
+ /* match the ref from fsnotify_init_mark() */
+ fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
@@ -616,11 +699,8 @@ retry:
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
- unsigned int grp_num;
- /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
- grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
- group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
+ group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
@@ -726,7 +806,7 @@ fput_and_out:
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
- struct fsnotify_mark_entry *entry;
+ struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
@@ -735,25 +815,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
return -EBADF;
/* verify that this is indeed an inotify instance */
- if (unlikely(filp->f_op != &inotify_fops)) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &inotify_fops))
goto out;
- }
group = filp->private_data;
- spin_lock(&group->inotify_data.idr_lock);
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry)) {
- spin_unlock(&group->inotify_data.idr_lock);
- ret = -EINVAL;
+ ret = -EINVAL;
+ i_mark = inotify_idr_find(group, wd);
+ if (unlikely(!i_mark))
goto out;
- }
- fsnotify_get_mark(entry);
- spin_unlock(&group->inotify_data.idr_lock);
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ ret = 0;
+
+ fsnotify_destroy_mark(&i_mark->fsn_mark);
+
+ /* match ref taken by inotify_idr_find */
+ fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
@@ -767,7 +845,28 @@ out:
*/
static int __init inotify_user_setup(void)
{
- inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
+ BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
+ BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
+ BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
+ BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
+ BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
+ BUILD_BUG_ON(IN_OPEN != FS_OPEN);
+ BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
+ BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
+ BUILD_BUG_ON(IN_CREATE != FS_CREATE);
+ BUILD_BUG_ON(IN_DELETE != FS_DELETE);
+ BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
+ BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
+ BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
+ BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
+ BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
+ BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
+ BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
+ BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
+
+ BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
+
+ inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
new file mode 100644
index 00000000000..325185e514b
--- /dev/null
+++ b/fs/notify/mark.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * fsnotify inode mark locking/lifetime/and refcnting
+ *
+ * REFCNT:
+ * The mark->refcnt tells how many "things" in the kernel currently are
+ * referencing this object. The object typically will live inside the kernel
+ * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
+ * which can find this object holding the appropriete locks, can take a reference
+ * and the object itself is guarenteed to survive until the reference is dropped.
+ *
+ * LOCKING:
+ * There are 3 spinlocks involved with fsnotify inode marks and they MUST
+ * be taken in order as follows:
+ *
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ *
+ * mark->lock protects 2 things, mark->group and mark->inode. You must hold
+ * that lock to dereference either of these things (they could be NULL even with
+ * the lock)
+ *
+ * group->mark_lock protects the marks_list anchored inside a given group
+ * and each mark is hooked via the g_list. It also sorta protects the
+ * free_g_list, which when used is anchored by a private list on the stack of the
+ * task which held the group->mark_lock.
+ *
+ * inode->i_lock protects the i_fsnotify_marks list anchored inside a
+ * given inode and each mark is hooked via the i_list. (and sorta the
+ * free_i_list)
+ *
+ *
+ * LIFETIME:
+ * Inode marks survive between when they are added to an inode and when their
+ * refcnt==0.
+ *
+ * The inode mark can be cleared for a number of different reasons including:
+ * - The inode is unlinked for the last time. (fsnotify_inode_remove)
+ * - The inode is being evicted from cache. (fsnotify_inode_delete)
+ * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
+ * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
+ * - The fsnotify_group associated with the mark is going away and all such marks
+ * need to be cleaned up. (fsnotify_clear_marks_by_group)
+ *
+ * Worst case we are given an inode and need to clean up all the marks on that
+ * inode. We take i_lock and walk the i_fsnotify_marks safely. For each
+ * mark on the list we take a reference (so the mark can't disappear under us).
+ * We remove that mark form the inode's list of marks and we add this mark to a
+ * private list anchored on the stack using i_free_list; At this point we no
+ * longer fear anything finding the mark using the inode's list of marks.
+ *
+ * We can safely and locklessly run the private list on the stack of everything
+ * we just unattached from the original inode. For each mark on the private list
+ * we grab the mark-> and can thus dereference mark->group and mark->inode. If
+ * we see the group and inode are not NULL we take those locks. Now holding all
+ * 3 locks we can completely remove the mark from other tasks finding it in the
+ * future. Remember, 10 things might already be referencing this mark, but they
+ * better be holding a ref. We drop our reference we took before we unhooked it
+ * from the inode. When the ref hits 0 we can free the mark.
+ *
+ * Very similarly for freeing by group, except we use free_g_list.
+ *
+ * This has the very interesting property of being able to run concurrently with
+ * any (or all) other directions.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+struct srcu_struct fsnotify_mark_srcu;
+static DEFINE_SPINLOCK(destroy_lock);
+static LIST_HEAD(destroy_list);
+static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
+
+void fsnotify_get_mark(struct fsnotify_mark *mark)
+{
+ atomic_inc(&mark->refcnt);
+}
+
+void fsnotify_put_mark(struct fsnotify_mark *mark)
+{
+ if (atomic_dec_and_test(&mark->refcnt))
+ mark->free_mark(mark);
+}
+
+/*
+ * Any time a mark is getting freed we end up here.
+ * The caller had better be holding a reference to this mark so we don't actually
+ * do the final put under the mark->lock
+ */
+void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+{
+ struct fsnotify_group *group;
+ struct inode *inode = NULL;
+
+ spin_lock(&mark->lock);
+
+ group = mark->group;
+
+ /* something else already called this function on this mark */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
+ spin_unlock(&mark->lock);
+ return;
+ }
+
+ mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+
+ /* 1 from caller and 1 for being on i_list/g_list */
+ BUG_ON(atomic_read(&mark->refcnt) < 2);
+
+ spin_lock(&group->mark_lock);
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+ inode = mark->i.inode;
+ fsnotify_destroy_inode_mark(mark);
+ } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
+ fsnotify_destroy_vfsmount_mark(mark);
+ else
+ BUG();
+
+ list_del_init(&mark->g_list);
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ spin_lock(&destroy_lock);
+ list_add(&mark->destroy_list, &destroy_list);
+ spin_unlock(&destroy_lock);
+ wake_up(&destroy_waitq);
+
+ /*
+ * Some groups like to know that marks are being freed. This is a
+ * callback to the group function to let it know that this mark
+ * is being freed.
+ */
+ if (group->ops->freeing_mark)
+ group->ops->freeing_mark(mark, group);
+
+ /*
+ * __fsnotify_update_child_dentry_flags(inode);
+ *
+ * I really want to call that, but we can't, we have no idea if the inode
+ * still exists the second we drop the mark->lock.
+ *
+ * The next time an event arrive to this inode from one of it's children
+ * __fsnotify_parent will see that the inode doesn't care about it's
+ * children and will update all of these flags then. So really this
+ * is just a lazy update (and could be a perf win...)
+ */
+
+ if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
+ iput(inode);
+
+ /*
+ * it's possible that this group tried to destroy itself, but this
+ * this mark was simultaneously being freed by inode. If that's the
+ * case, we finish freeing the group here.
+ */
+ if (unlikely(atomic_dec_and_test(&group->num_marks)))
+ fsnotify_final_destroy_group(group);
+}
+
+void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->mask = mask;
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
+ fsnotify_set_inode_mark_mask_locked(mark, mask);
+}
+
+void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->ignored_mask = mask;
+}
+
+/*
+ * Attach an initialized mark to a given group and fs object.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which group.
+ */
+int fsnotify_add_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups)
+{
+ int ret = 0;
+
+ BUG_ON(inode && mnt);
+ BUG_ON(!inode && !mnt);
+
+ /*
+ * LOCKING ORDER!!!!
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ */
+ spin_lock(&mark->lock);
+ spin_lock(&group->mark_lock);
+
+ mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
+
+ mark->group = group;
+ list_add(&mark->g_list, &group->marks_list);
+ atomic_inc(&group->num_marks);
+ fsnotify_get_mark(mark); /* for i_list and g_list */
+
+ if (inode) {
+ ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
+ if (ret)
+ goto err;
+ } else if (mnt) {
+ ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
+ if (ret)
+ goto err;
+ } else {
+ BUG();
+ }
+
+ spin_unlock(&group->mark_lock);
+
+ /* this will pin the object if appropriate */
+ fsnotify_set_mark_mask_locked(mark, mark->mask);
+
+ spin_unlock(&mark->lock);
+
+ if (inode)
+ __fsnotify_update_child_dentry_flags(inode);
+
+ return ret;
+err:
+ mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
+ list_del_init(&mark->g_list);
+ mark->group = NULL;
+ atomic_dec(&group->num_marks);
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ spin_lock(&destroy_lock);
+ list_add(&mark->destroy_list, &destroy_list);
+ spin_unlock(&destroy_lock);
+ wake_up(&destroy_waitq);
+
+ return ret;
+}
+
+/*
+ * clear any marks in a group in which mark->flags & flags is true
+ */
+void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ unsigned int flags)
+{
+ struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(free_list);
+
+ spin_lock(&group->mark_lock);
+ list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+ if (mark->flags & flags) {
+ list_add(&mark->free_g_list, &free_list);
+ list_del_init(&mark->g_list);
+ fsnotify_get_mark(mark);
+ }
+ }
+ spin_unlock(&group->mark_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+/*
+ * Given a group, destroy all of the marks associated with that group.
+ */
+void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
+}
+
+void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
+{
+ assert_spin_locked(&old->lock);
+ new->i.inode = old->i.inode;
+ new->m.mnt = old->m.mnt;
+ new->group = old->group;
+ new->mask = old->mask;
+ new->free_mark = old->free_mark;
+}
+
+/*
+ * Nothing fancy, just initialize lists and locks and counters.
+ */
+void fsnotify_init_mark(struct fsnotify_mark *mark,
+ void (*free_mark)(struct fsnotify_mark *mark))
+{
+ memset(mark, 0, sizeof(*mark));
+ spin_lock_init(&mark->lock);
+ atomic_set(&mark->refcnt, 1);
+ mark->free_mark = free_mark;
+}
+
+static int fsnotify_mark_destroy(void *ignored)
+{
+ struct fsnotify_mark *mark, *next;
+ LIST_HEAD(private_destroy_list);
+
+ for (;;) {
+ spin_lock(&destroy_lock);
+ /* exchange the list head */
+ list_replace_init(&destroy_list, &private_destroy_list);
+ spin_unlock(&destroy_lock);
+
+ synchronize_srcu(&fsnotify_mark_srcu);
+
+ list_for_each_entry_safe(mark, next, &private_destroy_list, destroy_list) {
+ list_del_init(&mark->destroy_list);
+ fsnotify_put_mark(mark);
+ }
+
+ wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
+ }
+
+ return 0;
+}
+
+static int __init fsnotify_mark_init(void)
+{
+ struct task_struct *thread;
+
+ thread = kthread_run(fsnotify_mark_destroy, NULL,
+ "fsnotify_mark");
+ if (IS_ERR(thread))
+ panic("unable to start fsnotify mark destruction thread.");
+
+ return 0;
+}
+device_initcall(fsnotify_mark_init);
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index b8bf53b4c10..f39260f8f86 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -56,7 +56,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
-static struct fsnotify_event q_overflow_event;
+static struct fsnotify_event *q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
@@ -87,12 +87,15 @@ void fsnotify_put_event(struct fsnotify_event *event)
return;
if (atomic_dec_and_test(&event->refcnt)) {
+ pr_debug("%s: event=%p\n", __func__, event);
+
if (event->data_type == FSNOTIFY_EVENT_PATH)
path_put(&event->path);
BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name);
+ put_pid(event->tgid);
kmem_cache_free(fsnotify_event_cachep, event);
}
}
@@ -104,7 +107,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
- kmem_cache_free(fsnotify_event_holder_cachep, holder);
+ if (holder)
+ kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
@@ -129,53 +133,20 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
}
/*
- * Check if 2 events contain the same information. We do not compare private data
- * but at this moment that isn't a problem for any know fsnotify listeners.
- */
-static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
-{
- if ((old->mask == new->mask) &&
- (old->to_tell == new->to_tell) &&
- (old->data_type == new->data_type) &&
- (old->name_len == new->name_len)) {
- switch (old->data_type) {
- case (FSNOTIFY_EVENT_INODE):
- /* remember, after old was put on the wait_q we aren't
- * allowed to look at the inode any more, only thing
- * left to check was if the file_name is the same */
- if (!old->name_len ||
- !strcmp(old->file_name, new->file_name))
- return true;
- break;
- case (FSNOTIFY_EVENT_PATH):
- if ((old->path.mnt == new->path.mnt) &&
- (old->path.dentry == new->path.dentry))
- return true;
- break;
- case (FSNOTIFY_EVENT_NONE):
- if (old->mask & FS_Q_OVERFLOW)
- return true;
- else if (old->mask & FS_IN_IGNORED)
- return false;
- return false;
- };
- }
- return false;
-}
-
-/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
-int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
- struct fsnotify_event_private_data *priv)
+struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
+ struct fsnotify_event_private_data *priv,
+ struct fsnotify_event *(*merge)(struct list_head *,
+ struct fsnotify_event *))
{
+ struct fsnotify_event *return_event = NULL;
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
- struct fsnotify_event_holder *last_holder;
- struct fsnotify_event *last_event;
- int ret = 0;
+
+ pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv);
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
@@ -189,18 +160,40 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_even
alloc_holder:
holder = fsnotify_alloc_event_holder();
if (!holder)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
- event = &q_overflow_event;
- ret = -EOVERFLOW;
+ event = q_overflow_event;
+
+ /*
+ * we need to return the overflow event
+ * which means we need a ref
+ */
+ fsnotify_get_event(event);
+ return_event = event;
+
/* sorry, no private data on the overflow event */
priv = NULL;
}
+ if (!list_empty(list) && merge) {
+ struct fsnotify_event *tmp;
+
+ tmp = merge(list, event);
+ if (tmp) {
+ mutex_unlock(&group->notification_mutex);
+
+ if (return_event)
+ fsnotify_put_event(return_event);
+ if (holder != &event->holder)
+ fsnotify_destroy_event_holder(holder);
+ return tmp;
+ }
+ }
+
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
@@ -212,19 +205,13 @@ alloc_holder:
* event holder was used, go back and get a new one */
spin_unlock(&event->lock);
mutex_unlock(&group->notification_mutex);
- goto alloc_holder;
- }
- if (!list_empty(list)) {
- last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
- last_event = last_holder->event;
- if (event_compare(last_event, event)) {
- spin_unlock(&event->lock);
- mutex_unlock(&group->notification_mutex);
- if (holder != &event->holder)
- fsnotify_destroy_event_holder(holder);
- return -EEXIST;
+ if (return_event) {
+ fsnotify_put_event(return_event);
+ return_event = NULL;
}
+
+ goto alloc_holder;
}
group->q_len++;
@@ -238,7 +225,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
- return ret;
+ return return_event;
}
/*
@@ -253,6 +240,8 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
BUG_ON(!mutex_is_locked(&group->notification_mutex));
+ pr_debug("%s: group=%p\n", __func__, group);
+
holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list);
event = holder->event;
@@ -314,25 +303,82 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
static void initialize_event(struct fsnotify_event *event)
{
- event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
- event->path.dentry = NULL;
- event->path.mnt = NULL;
- event->inode = NULL;
- event->data_type = FSNOTIFY_EVENT_NONE;
-
INIT_LIST_HEAD(&event->private_data_list);
+}
+
+/*
+ * Caller damn well better be holding whatever mutex is protecting the
+ * old_holder->event_list and the new_event must be a clean event which
+ * cannot be found anywhere else in the kernel.
+ */
+int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
+ struct fsnotify_event *new_event)
+{
+ struct fsnotify_event *old_event = old_holder->event;
+ struct fsnotify_event_holder *new_holder = &new_event->holder;
- event->to_tell = NULL;
+ enum event_spinlock_class {
+ SPINLOCK_OLD,
+ SPINLOCK_NEW,
+ };
- event->file_name = NULL;
- event->name_len = 0;
+ pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event);
- event->sync_cookie = 0;
+ /*
+ * if the new_event's embedded holder is in use someone
+ * screwed up and didn't give us a clean new event.
+ */
+ BUG_ON(!list_empty(&new_holder->event_list));
+
+ spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
+ spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
+
+ new_holder->event = new_event;
+ list_replace_init(&old_holder->event_list, &new_holder->event_list);
+
+ spin_unlock(&new_event->lock);
+ spin_unlock(&old_event->lock);
+
+ /* event == holder means we are referenced through the in event holder */
+ if (old_holder != &old_event->holder)
+ fsnotify_destroy_event_holder(old_holder);
+
+ fsnotify_get_event(new_event); /* on the list take reference */
+ fsnotify_put_event(old_event); /* off the list, drop reference */
+
+ return 0;
+}
+
+struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
+{
+ struct fsnotify_event *event;
+
+ event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
+ if (!event)
+ return NULL;
+
+ pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event);
+
+ memcpy(event, old_event, sizeof(*event));
+ initialize_event(event);
+
+ if (event->name_len) {
+ event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
+ if (!event->file_name) {
+ kmem_cache_free(fsnotify_event_cachep, event);
+ return NULL;
+ }
+ }
+ event->tgid = get_pid(old_event->tgid);
+ if (event->data_type == FSNOTIFY_EVENT_PATH)
+ path_get(&event->path);
+
+ return event;
}
/*
@@ -348,15 +394,18 @@ static void initialize_event(struct fsnotify_event *event)
* @name the filename, if available
*/
struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data,
- int data_type, const char *name, u32 cookie,
- gfp_t gfp)
+ int data_type, const unsigned char *name,
+ u32 cookie, gfp_t gfp)
{
struct fsnotify_event *event;
- event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
+ event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
+ pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n",
+ __func__, event, to_tell, mask, data, data_type);
+
initialize_event(event);
if (name) {
@@ -368,30 +417,21 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->name_len = strlen(event->file_name);
}
+ event->tgid = get_pid(task_tgid(current));
event->sync_cookie = cookie;
event->to_tell = to_tell;
+ event->data_type = data_type;
switch (data_type) {
- case FSNOTIFY_EVENT_FILE: {
- struct file *file = data;
- struct path *path = &file->f_path;
- event->path.dentry = path->dentry;
- event->path.mnt = path->mnt;
- path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
- break;
- }
case FSNOTIFY_EVENT_PATH: {
struct path *path = data;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
- event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
@@ -412,8 +452,11 @@ __init int fsnotify_notification_init(void)
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
- initialize_event(&q_overflow_event);
- q_overflow_event.mask = FS_Q_OVERFLOW;
+ q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
+ FSNOTIFY_EVENT_NONE, NULL, 0,
+ GFP_KERNEL);
+ if (!q_overflow_event)
+ panic("unable to allocate fsnotify q_overflow_event\n");
return 0;
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
new file mode 100644
index 00000000000..56772b578fb
--- /dev/null
+++ b/fs/notify/vfsmount_mark.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark, *lmark;
+ struct hlist_node *pos, *n;
+ LIST_HEAD(free_list);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+ list_add(&mark->m.free_m_list, &free_list);
+ hlist_del_init_rcu(&mark->m.m_list);
+ fsnotify_get_mark(mark);
+ }
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT);
+}
+
+/*
+ * Recalculate the mask of events relevant to a given vfsmount locked.
+ */
+static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+ __u32 new_mask = 0;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+ new_mask |= mark->mask;
+ mnt->mnt_fsnotify_mask = new_mask;
+}
+
+/*
+ * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
+ * any notifier is interested in hearing for this mount point
+ */
+void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
+{
+ spin_lock(&mnt->mnt_root->d_lock);
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
+{
+ struct vfsmount *mnt = mark->m.mnt;
+
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ hlist_del_init_rcu(&mark->m.m_list);
+ mark->m.mnt = NULL;
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * given a group and vfsmount, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
+ */
+struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return mark;
+}
+
+/*
+ * Attach an initialized mark to a given group and vfsmount.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which groups.
+ */
+int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups)
+{
+ struct fsnotify_mark *lmark;
+ struct hlist_node *node, *last = NULL;
+ int ret = 0;
+
+ mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
+
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ mark->m.mnt = mnt;
+
+ /* is mark the first mark? */
+ if (hlist_empty(&mnt->mnt_fsnotify_marks)) {
+ hlist_add_head_rcu(&mark->m.m_list, &mnt->mnt_fsnotify_marks);
+ goto out;
+ }
+
+ /* should mark be in the middle of the current list? */
+ hlist_for_each_entry(lmark, node, &mnt->mnt_fsnotify_marks, m.m_list) {
+ last = node;
+
+ if ((lmark->group == group) && !allow_dups) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if (mark->group < lmark->group)
+ continue;
+
+ hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
+ goto out;
+ }
+
+ BUG_ON(last == NULL);
+ /* mark should be the last entry. last is the current last entry */
+ hlist_add_after_rcu(last, &mark->m.m_list);
+out:
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return ret;
+}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 4b57fb1eac2..93622b175fc 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2238,7 +2238,7 @@ void ntfs_clear_extent_inode(ntfs_inode *ni)
}
/**
- * ntfs_clear_big_inode - clean up the ntfs specific part of an inode
+ * ntfs_evict_big_inode - clean up the ntfs specific part of an inode
* @vi: vfs inode pending annihilation
*
* When the VFS is going to remove an inode from memory, ntfs_clear_big_inode()
@@ -2247,10 +2247,13 @@ void ntfs_clear_extent_inode(ntfs_inode *ni)
*
* If the MFT record is dirty, we commit it before doing anything else.
*/
-void ntfs_clear_big_inode(struct inode *vi)
+void ntfs_evict_big_inode(struct inode *vi)
{
ntfs_inode *ni = NTFS_I(vi);
+ truncate_inode_pages(&vi->i_data, 0);
+ end_writeback(vi);
+
#ifdef NTFS_RW
if (NInoDirty(ni)) {
bool was_bad = (is_bad_inode(vi));
@@ -2879,9 +2882,6 @@ void ntfs_truncate_vfs(struct inode *vi) {
*
* Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also
* called with ->i_alloc_sem held for writing.
- *
- * Basically this is a copy of generic notify_change() and inode_setattr()
- * functionality, except we intercept and abort changes in i_size.
*/
int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
{
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h
index 9a113544605..2dabf813456 100644
--- a/fs/ntfs/inode.h
+++ b/fs/ntfs/inode.h
@@ -279,7 +279,7 @@ extern struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name,
extern struct inode *ntfs_alloc_big_inode(struct super_block *sb);
extern void ntfs_destroy_big_inode(struct inode *inode);
-extern void ntfs_clear_big_inode(struct inode *vi);
+extern void ntfs_evict_big_inode(struct inode *vi);
extern void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni);
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 0de1db6cddb..512806171bf 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2700,7 +2700,7 @@ static const struct super_operations ntfs_sops = {
.put_super = ntfs_put_super, /* Syscall: umount. */
.statfs = ntfs_statfs, /* Syscall: statfs */
.remount_fs = ntfs_remount, /* Syscall: mount -o remount. */
- .clear_inode = ntfs_clear_big_inode, /* VFS: Called when an inode is
+ .evict_inode = ntfs_evict_big_inode, /* VFS: Called when an inode is
removed from memory. */
//.umount_begin = NULL, /* Forced umount. */
.show_options = ntfs_show_options, /* Show mount options in
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index da702294d7e..a76e0aa5cd3 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle,
int ocfs2_check_acl(struct inode *inode, int mask)
{
- struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *di_bh = NULL;
+ struct posix_acl *acl;
+ int ret = -EAGAIN;
- if (IS_ERR(acl))
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+ return ret;
+
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh);
+
+ brelse(di_bh);
+
+ if (IS_ERR(acl)) {
+ mlog_errno(PTR_ERR(acl));
return PTR_ERR(acl);
+ }
if (acl) {
- int ret = posix_acl_permission(inode, acl, mask);
+ ret = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
return ret;
}
@@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle,
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
- int ret = 0;
+ int ret = 0, ret2;
mode_t mode;
if (!S_ISLNK(inode->i_mode)) {
@@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle,
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
- ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret2) {
+ mlog_errno(ret2);
+ ret = ret2;
+ goto cleanup;
+ }
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3623ca20cc1..0de69c9a08b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -196,15 +196,14 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
dump_stack();
goto bail;
}
-
- past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
- mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
- (unsigned long long)past_eof);
-
- if (create && (iblock >= past_eof))
- set_buffer_new(bh_result);
}
+ past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
+ mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
+ (unsigned long long)past_eof);
+ if (create && (iblock >= past_eof))
+ set_buffer_new(bh_result);
+
bail:
if (err < 0)
err = -EIO;
@@ -459,36 +458,6 @@ int walk_page_buffers( handle_t *handle,
return ret;
}
-handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
- struct page *page,
- unsigned from,
- unsigned to)
-{
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- handle_t *handle;
- int ret = 0;
-
- handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
- if (IS_ERR(handle)) {
- ret = -ENOMEM;
- mlog_errno(ret);
- goto out;
- }
-
- if (ocfs2_should_order_data(inode)) {
- ret = ocfs2_jbd2_file_inode(handle, inode);
- if (ret < 0)
- mlog_errno(ret);
- }
-out:
- if (ret) {
- if (!IS_ERR(handle))
- ocfs2_commit_trans(osb, handle);
- handle = ERR_PTR(ret);
- }
- return handle;
-}
-
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{
sector_t status;
@@ -609,7 +578,9 @@ bail:
static void ocfs2_dio_end_io(struct kiocb *iocb,
loff_t offset,
ssize_t bytes,
- void *private)
+ void *private,
+ int ret,
+ bool is_async)
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
@@ -623,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (!level)
up_read(&inode->i_alloc_sem);
ocfs2_rw_unlock(inode, level);
+
+ if (is_async)
+ aio_complete(iocb, ret, 0);
}
/*
@@ -669,11 +643,10 @@ static ssize_t ocfs2_direct_IO(int rw,
if (i_size_read(inode) <= offset)
return 0;
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
- inode->i_sb->s_bdev, iov, offset,
- nr_segs,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io);
+ ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
+ iov, offset, nr_segs,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
mlog_exit(ret);
return ret;
@@ -1131,23 +1104,37 @@ out:
*/
static int ocfs2_grab_pages_for_write(struct address_space *mapping,
struct ocfs2_write_ctxt *wc,
- u32 cpos, loff_t user_pos, int new,
+ u32 cpos, loff_t user_pos,
+ unsigned user_len, int new,
struct page *mmap_page)
{
int ret = 0, i;
- unsigned long start, target_index, index;
+ unsigned long start, target_index, end_index, index;
struct inode *inode = mapping->host;
+ loff_t last_byte;
target_index = user_pos >> PAGE_CACHE_SHIFT;
/*
* Figure out how many pages we'll be manipulating here. For
* non allocating write, we just change the one
- * page. Otherwise, we'll need a whole clusters worth.
+ * page. Otherwise, we'll need a whole clusters worth. If we're
+ * writing past i_size, we only need enough pages to cover the
+ * last page of the write.
*/
if (new) {
wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
+ /*
+ * We need the index *past* the last page we could possibly
+ * touch. This is the page past the end of the write or
+ * i_size, whichever is greater.
+ */
+ last_byte = max(user_pos + user_len, i_size_read(inode));
+ BUG_ON(last_byte < 1);
+ end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
+ if ((start + wc->w_num_pages) > end_index)
+ wc->w_num_pages = end_index - start;
} else {
wc->w_num_pages = 1;
start = target_index;
@@ -1620,21 +1607,20 @@ out:
* write path can treat it as an non-allocating write, which has no
* special case code for sparse/nonsparse files.
*/
-static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
- unsigned len,
+static int ocfs2_expand_nonsparse_inode(struct inode *inode,
+ struct buffer_head *di_bh,
+ loff_t pos, unsigned len,
struct ocfs2_write_ctxt *wc)
{
int ret;
- struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
loff_t newsize = pos + len;
- if (ocfs2_sparse_alloc(osb))
- return 0;
+ BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
if (newsize <= i_size_read(inode))
return 0;
- ret = ocfs2_extend_no_holes(inode, newsize, pos);
+ ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
if (ret)
mlog_errno(ret);
@@ -1644,6 +1630,18 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
return ret;
}
+static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
+ loff_t pos)
+{
+ int ret = 0;
+
+ BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
+ if (pos > i_size_read(inode))
+ ret = ocfs2_zero_extend(inode, di_bh, pos);
+
+ return ret;
+}
+
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
@@ -1679,7 +1677,11 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
}
}
- ret = ocfs2_expand_nonsparse_inode(inode, pos, len, wc);
+ if (ocfs2_sparse_alloc(osb))
+ ret = ocfs2_zero_tail(inode, di_bh, pos);
+ else
+ ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
+ wc);
if (ret) {
mlog_errno(ret);
goto out;
@@ -1789,7 +1791,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
* that we can zero and flush if we error after adding the
* extent.
*/
- ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
+ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index aa75ca3f78d..1361997cf20 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1759,6 +1759,7 @@ static int o2net_accept_one(struct socket *sock)
struct sockaddr_in sin;
struct socket *new_sock = NULL;
struct o2nm_node *node = NULL;
+ struct o2nm_node *local_node = NULL;
struct o2net_sock_container *sc = NULL;
struct o2net_node *nn;
@@ -1796,11 +1797,15 @@ static int o2net_accept_one(struct socket *sock)
goto out;
}
- if (o2nm_this_node() > node->nd_num) {
- mlog(ML_NOTICE, "unexpected connect attempted from a lower "
- "numbered node '%s' at " "%pI4:%d with num %u\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port), node->nd_num);
+ if (o2nm_this_node() >= node->nd_num) {
+ local_node = o2nm_get_node_by_num(o2nm_this_node());
+ mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' ("
+ "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n",
+ local_node->nd_name, local_node->nd_num,
+ &(local_node->nd_ipv4_address),
+ ntohs(local_node->nd_ipv4_port),
+ node->nd_name, node->nd_num, &sin.sin_addr.s_addr,
+ ntohs(sin.sin_port));
ret = -EINVAL;
goto out;
}
@@ -1857,6 +1862,8 @@ out:
sock_release(new_sock);
if (node)
o2nm_node_put(node);
+ if (local_node)
+ o2nm_node_put(local_node);
if (sc)
sc_put(sc);
return ret;
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 0cd24cf5439..5efdd37dfe4 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -419,7 +419,7 @@ static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
static int debug_buffer_release(struct inode *inode, struct file *file)
{
- struct debug_buffer *db = (struct debug_buffer *)file->private_data;
+ struct debug_buffer *db = file->private_data;
if (db)
kfree(db->buf);
@@ -715,7 +715,7 @@ static int debug_lockres_open(struct inode *inode, struct file *file)
goto bail;
}
- seq = (struct seq_file *) file->private_data;
+ seq = file->private_data;
seq->private = dl;
dlm_grab(dlm);
@@ -731,7 +731,7 @@ bail:
static int debug_lockres_release(struct inode *inode, struct file *file)
{
- struct seq_file *seq = (struct seq_file *)file->private_data;
+ struct seq_file *seq = file->private_data;
struct debug_lockres *dl = (struct debug_lockres *)seq->private;
if (dl->dl_res)
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6b5a492e174..153abb5abef 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1671,7 +1671,7 @@ struct dlm_ctxt * dlm_register_domain(const char *domain,
struct dlm_ctxt *dlm = NULL;
struct dlm_ctxt *new_ctxt = NULL;
- if (strlen(domain) > O2NM_MAX_NAME_LEN) {
+ if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
ret = -ENAMETOOLONG;
mlog(ML_ERROR, "domain name length too long\n");
goto leave;
@@ -1709,6 +1709,7 @@ retry:
}
if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+ spin_unlock(&dlm_domain_lock);
mlog(ML_ERROR,
"Requested locking protocol version is not "
"compatible with already registered domain "
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 4a7506a4e31..ffb4c68dafa 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref)
atomic_dec(&dlm->res_cur_count);
- dlm_put(dlm);
-
if (!hlist_unhashed(&res->hash_node) ||
!list_empty(&res->granted) ||
!list_empty(&res->converting) ||
@@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
res->migration_pending = 0;
res->inflight_locks = 0;
- /* put in dlm_lockres_release */
- dlm_grab(dlm);
res->dlm = dlm;
kref_init(&res->refs);
@@ -2808,14 +2804,8 @@ again:
mlog(0, "trying again...\n");
goto again;
}
- /* now that we are sure the MIGRATING state is there, drop
- * the unneded state which blocked threads trying to DIRTY */
- spin_lock(&res->spinlock);
- BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
- BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
- res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
- spin_unlock(&res->spinlock);
+ ret = 0;
/* did the target go down or die? */
spin_lock(&dlm->spinlock);
if (!test_bit(target, dlm->domain_map)) {
@@ -2826,9 +2816,21 @@ again:
spin_unlock(&dlm->spinlock);
/*
+ * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
+ * another try; otherwise, we are sure the MIGRATING state is there,
+ * drop the unneded state which blocked threads trying to DIRTY
+ */
+ spin_lock(&res->spinlock);
+ BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+ res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+ if (!ret)
+ BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+ spin_unlock(&res->spinlock);
+
+ /*
* at this point:
*
- * o the DLM_LOCK_RES_MIGRATING flag is set
+ * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
* o there are no pending asts on this lockres
* o all processes trying to reserve an ast on this
* lockres must wait for the MIGRATING flag to clear
@@ -3044,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
/* check for pre-existing lock */
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, name, namelen, hash);
- spin_lock(&dlm->master_lock);
-
if (res) {
spin_lock(&res->spinlock);
if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -3063,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&res->spinlock);
}
+ spin_lock(&dlm->master_lock);
/* ignore status. only nonzero status would BUG. */
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
name, namelen,
migrate->new_master,
migrate->master);
-unlock:
spin_unlock(&dlm->master_lock);
+unlock:
spin_unlock(&dlm->spinlock);
if (oldmle) {
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index f8b75ce4be7..aaaffbcbe91 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -463,7 +463,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit;
- bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+ bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else
@@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
struct list_head *queue;
struct dlm_lock *lock, *next;
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
res->state |= DLM_LOCK_RES_RECOVERING;
if (!list_empty(&res->recovering)) {
mlog(0,
@@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
if (res->owner == dead_node) {
- if (res->state & DLM_LOCK_RES_DROPPING_REF)
- mlog(0, "%s:%.*s: owned by "
- "dead node %u, this node was "
- "dropping its ref when it died. "
- "continue, dropping the flag.\n",
- dlm->name, res->lockname.len,
- res->lockname.name, dead_node);
-
- /* the wake_up for this will happen when the
- * RECOVERING flag is dropped later */
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(ML_NOTICE, "Ignore %.*s for "
+ "recovery as it is being freed\n",
+ res->lockname.len,
+ res->lockname.name);
+ } else
+ dlm_move_lockres_to_recovery_list(dlm,
+ res);
- dlm_move_lockres_to_recovery_list(dlm, res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index d4f73ca68fe..2211acf33d9 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
* truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
- if (!__dlm_lockres_has_locks(res) &&
- (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
- /* try not to scan the bitmap unless the first two
- * conditions are already true */
- int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
- if (bit >= O2NM_MAX_NODES) {
- /* since the bit for dlm->node_num is not
- * set, inflight_locks better be zero */
- BUG_ON(res->inflight_locks != 0);
- return 1;
- }
- }
- return 0;
+ int bit;
+
+ if (__dlm_lockres_has_locks(res))
+ return 0;
+
+ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
+ return 0;
+
+ if (res->state & DLM_LOCK_RES_RECOVERING)
+ return 0;
+
+ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+ if (bit < O2NM_MAX_NODES)
+ return 0;
+
+ /*
+ * since the bit for dlm->node_num is not set, inflight_locks better
+ * be zero
+ */
+ BUG_ON(res->inflight_locks != 0);
+ return 1;
}
@@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
spin_unlock(&dlm->spinlock);
}
-static int dlm_purge_lockres(struct dlm_ctxt *dlm,
+static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int master;
int ret = 0;
- spin_lock(&res->spinlock);
- if (!__dlm_lockres_unused(res)) {
- mlog(0, "%s:%.*s: tried to purge but not unused\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
- spin_unlock(&res->spinlock);
- BUG();
- }
-
- if (res->state & DLM_LOCK_RES_MIGRATING) {
- mlog(0, "%s:%.*s: Delay dropref as this lockres is "
- "being remastered\n", dlm->name, res->lockname.len,
- res->lockname.name);
- /* Re-add the lockres to the end of the purge list */
- if (!list_empty(&res->purge)) {
- list_del_init(&res->purge);
- list_add_tail(&res->purge, &dlm->purge_list);
- }
- spin_unlock(&res->spinlock);
- return 0;
- }
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
master = (res->owner == dlm->node_num);
- if (!master)
- res->state |= DLM_LOCK_RES_DROPPING_REF;
- spin_unlock(&res->spinlock);
mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
res->lockname.name, master);
if (!master) {
+ res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
+ spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
spin_lock(&res->spinlock);
@@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
dlm->name, res->lockname.len, res->lockname.name, ret);
spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
}
- spin_lock(&res->spinlock);
if (!list_empty(&res->purge)) {
mlog(0, "removing lockres %.*s:%p from purgelist, "
"master = %d\n", res->lockname.len, res->lockname.name,
res, master);
list_del_init(&res->purge);
- spin_unlock(&res->spinlock);
dlm_lockres_put(res);
dlm->purge_count--;
- } else
- spin_unlock(&res->spinlock);
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
__dlm_unhash_lockres(res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
if (!master) {
- spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
- }
- return 0;
+ } else
+ spin_unlock(&res->spinlock);
}
static void dlm_run_purge_list(struct dlm_ctxt *dlm,
@@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
lockres = list_entry(dlm->purge_list.next,
struct dlm_lock_resource, purge);
- /* Status of the lockres *might* change so double
- * check. If the lockres is unused, holding the dlm
- * spinlock will prevent people from getting and more
- * refs on it -- there's no need to keep the lockres
- * spinlock. */
spin_lock(&lockres->spinlock);
- unused = __dlm_lockres_unused(lockres);
- spin_unlock(&lockres->spinlock);
-
- if (!unused)
- continue;
purge_jiffies = lockres->last_used +
msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
@@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
* in tail order, we can stop at the first
* unpurgable resource -- anyone added after
* him will have a greater last_used value */
+ spin_unlock(&lockres->spinlock);
break;
}
+ /* Status of the lockres *might* change so double
+ * check. If the lockres is unused, holding the dlm
+ * spinlock will prevent people from getting and more
+ * refs on it. */
+ unused = __dlm_lockres_unused(lockres);
+ if (!unused ||
+ (lockres->state & DLM_LOCK_RES_MIGRATING)) {
+ mlog(0, "lockres %s:%.*s: is in use or "
+ "being remastered, used %d, state %d\n",
+ dlm->name, lockres->lockname.len,
+ lockres->lockname.name, !unused, lockres->state);
+ list_move_tail(&dlm->purge_list, &lockres->purge);
+ spin_unlock(&lockres->spinlock);
+ continue;
+ }
+
dlm_lockres_get(lockres);
- /* This may drop and reacquire the dlm spinlock if it
- * has to do migration. */
- if (dlm_purge_lockres(dlm, lockres))
- BUG();
+ dlm_purge_lockres(dlm, lockres);
dlm_lockres_put(lockres);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index b83d6107a1f..c2903b84bb7 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -182,8 +182,7 @@ static int dlmfs_file_release(struct inode *inode,
{
int level, status;
struct dlmfs_inode_private *ip = DLMFS_I(inode);
- struct dlmfs_filp_private *fp =
- (struct dlmfs_filp_private *) file->private_data;
+ struct dlmfs_filp_private *fp = file->private_data;
if (S_ISDIR(inode->i_mode))
BUG();
@@ -214,10 +213,12 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_valid &= ~ATTR_SIZE;
error = inode_change_ok(inode, attr);
- if (!error)
- error = inode_setattr(inode, attr);
+ if (error)
+ return error;
- return error;
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
@@ -355,13 +356,12 @@ static void dlmfs_destroy_inode(struct inode *inode)
kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
}
-static void dlmfs_clear_inode(struct inode *inode)
+static void dlmfs_evict_inode(struct inode *inode)
{
int status;
struct dlmfs_inode_private *ip;
- if (!inode)
- return;
+ end_writeback(inode);
mlog(0, "inode %lu\n", inode->i_ino);
@@ -631,7 +631,7 @@ static const struct super_operations dlmfs_ops = {
.statfs = simple_statfs,
.alloc_inode = dlmfs_alloc_inode,
.destroy_inode = dlmfs_destroy_inode,
- .clear_inode = dlmfs_clear_inode,
+ .evict_inode = dlmfs_evict_inode,
.drop_inode = generic_delete_inode,
};
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 39eb16ac5f9..5e02a893f46 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2966,7 +2966,7 @@ static const struct seq_operations ocfs2_dlm_seq_ops = {
static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
{
- struct seq_file *seq = (struct seq_file *) file->private_data;
+ struct seq_file *seq = file->private_data;
struct ocfs2_dlm_seq_priv *priv = seq->private;
struct ocfs2_lock_res *res = &priv->p_iter_res;
@@ -3000,7 +3000,7 @@ static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
goto out;
}
- seq = (struct seq_file *) file->private_data;
+ seq = file->private_data;
seq->private = priv;
ocfs2_add_lockres_tracking(&priv->p_iter_res,
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 6a13ea64c44..81296b4e364 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -724,28 +724,55 @@ leave:
return status;
}
+/*
+ * While a write will already be ordering the data, a truncate will not.
+ * Thus, we need to explicitly order the zeroed pages.
+ */
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ handle_t *handle = NULL;
+ int ret = 0;
+
+ if (!ocfs2_should_order_data(inode))
+ goto out;
+
+ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_jbd2_file_inode(handle, inode);
+ if (ret < 0)
+ mlog_errno(ret);
+
+out:
+ if (ret) {
+ if (!IS_ERR(handle))
+ ocfs2_commit_trans(osb, handle);
+ handle = ERR_PTR(ret);
+ }
+ return handle;
+}
+
/* Some parts of this taken from generic_cont_expand, which turned out
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->write_begin() and ->write_end(). */
-static int ocfs2_write_zero_page(struct inode *inode,
- u64 size)
+static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
+ u64 abs_to)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long index;
- unsigned int offset;
+ unsigned long index = abs_from >> PAGE_CACHE_SHIFT;
handle_t *handle = NULL;
- int ret;
+ int ret = 0;
+ unsigned zero_from, zero_to, block_start, block_end;
- offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
- /* ugh. in prepare/commit_write, if from==to==start of block, we
- ** skip the prepare. make sure we never send an offset for the start
- ** of a block
- */
- if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
- offset++;
- }
- index = size >> PAGE_CACHE_SHIFT;
+ BUG_ON(abs_from >= abs_to);
+ BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
+ BUG_ON(abs_from & (inode->i_blkbits - 1));
page = grab_cache_page(mapping, index);
if (!page) {
@@ -754,31 +781,56 @@ static int ocfs2_write_zero_page(struct inode *inode,
goto out;
}
- ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
- if (ret < 0) {
- mlog_errno(ret);
- goto out_unlock;
- }
+ /* Get the offsets within the page that we want to zero */
+ zero_from = abs_from & (PAGE_CACHE_SIZE - 1);
+ zero_to = abs_to & (PAGE_CACHE_SIZE - 1);
+ if (!zero_to)
+ zero_to = PAGE_CACHE_SIZE;
- if (ocfs2_should_order_data(inode)) {
- handle = ocfs2_start_walk_page_trans(inode, page, offset,
- offset);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- handle = NULL;
+ mlog(0,
+ "abs_from = %llu, abs_to = %llu, index = %lu, zero_from = %u, zero_to = %u\n",
+ (unsigned long long)abs_from, (unsigned long long)abs_to,
+ index, zero_from, zero_to);
+
+ /* We know that zero_from is block aligned */
+ for (block_start = zero_from; block_start < zero_to;
+ block_start = block_end) {
+ block_end = block_start + (1 << inode->i_blkbits);
+
+ /*
+ * block_start is block-aligned. Bump it by one to
+ * force ocfs2_{prepare,commit}_write() to zero the
+ * whole block.
+ */
+ ret = ocfs2_prepare_write_nolock(inode, page,
+ block_start + 1,
+ block_start + 1);
+ if (ret < 0) {
+ mlog_errno(ret);
goto out_unlock;
}
- }
- /* must not update i_size! */
- ret = block_commit_write(page, offset, offset);
- if (ret < 0)
- mlog_errno(ret);
- else
- ret = 0;
+ if (!handle) {
+ handle = ocfs2_zero_start_ordered_transaction(inode);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ break;
+ }
+ }
+
+ /* must not update i_size! */
+ ret = block_commit_write(page, block_start + 1,
+ block_start + 1);
+ if (ret < 0)
+ mlog_errno(ret);
+ else
+ ret = 0;
+ }
if (handle)
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+
out_unlock:
unlock_page(page);
page_cache_release(page);
@@ -786,22 +838,114 @@ out:
return ret;
}
-static int ocfs2_zero_extend(struct inode *inode,
- u64 zero_to_size)
+/*
+ * Find the next range to zero. We do this in terms of bytes because
+ * that's what ocfs2_zero_extend() wants, and it is dealing with the
+ * pagecache. We may return multiple extents.
+ *
+ * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
+ * needs to be zeroed. range_start and range_end return the next zeroing
+ * range. A subsequent call should pass the previous range_end as its
+ * zero_start. If range_end is 0, there's nothing to do.
+ *
+ * Unwritten extents are skipped over. Refcounted extents are CoWd.
+ */
+static int ocfs2_zero_extend_get_range(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 zero_start, u64 zero_end,
+ u64 *range_start, u64 *range_end)
{
- int ret = 0;
- u64 start_off;
- struct super_block *sb = inode->i_sb;
+ int rc = 0, needs_cow = 0;
+ u32 p_cpos, zero_clusters = 0;
+ u32 zero_cpos =
+ zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+ u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
+ unsigned int num_clusters = 0;
+ unsigned int ext_flags = 0;
- start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
- while (start_off < zero_to_size) {
- ret = ocfs2_write_zero_page(inode, start_off);
- if (ret < 0) {
- mlog_errno(ret);
+ while (zero_cpos < last_cpos) {
+ rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
+ &num_clusters, &ext_flags);
+ if (rc) {
+ mlog_errno(rc);
+ goto out;
+ }
+
+ if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+ zero_clusters = num_clusters;
+ if (ext_flags & OCFS2_EXT_REFCOUNTED)
+ needs_cow = 1;
+ break;
+ }
+
+ zero_cpos += num_clusters;
+ }
+ if (!zero_clusters) {
+ *range_end = 0;
+ goto out;
+ }
+
+ while ((zero_cpos + zero_clusters) < last_cpos) {
+ rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
+ &p_cpos, &num_clusters,
+ &ext_flags);
+ if (rc) {
+ mlog_errno(rc);
+ goto out;
+ }
+
+ if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
+ break;
+ if (ext_flags & OCFS2_EXT_REFCOUNTED)
+ needs_cow = 1;
+ zero_clusters += num_clusters;
+ }
+ if ((zero_cpos + zero_clusters) > last_cpos)
+ zero_clusters = last_cpos - zero_cpos;
+
+ if (needs_cow) {
+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters,
+ UINT_MAX);
+ if (rc) {
+ mlog_errno(rc);
goto out;
}
+ }
+
+ *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
+ *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
+ zero_cpos + zero_clusters);
- start_off += sb->s_blocksize;
+out:
+ return rc;
+}
+
+/*
+ * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
+ * has made sure that the entire range needs zeroing.
+ */
+static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
+ u64 range_end)
+{
+ int rc = 0;
+ u64 next_pos;
+ u64 zero_pos = range_start;
+
+ mlog(0, "range_start = %llu, range_end = %llu\n",
+ (unsigned long long)range_start,
+ (unsigned long long)range_end);
+ BUG_ON(range_start >= range_end);
+
+ while (zero_pos < range_end) {
+ next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
+ if (next_pos > range_end)
+ next_pos = range_end;
+ rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+ if (rc < 0) {
+ mlog_errno(rc);
+ break;
+ }
+ zero_pos = next_pos;
/*
* Very large extends have the potential to lock up
@@ -810,16 +954,63 @@ static int ocfs2_zero_extend(struct inode *inode,
cond_resched();
}
-out:
+ return rc;
+}
+
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+ loff_t zero_to_size)
+{
+ int ret = 0;
+ u64 zero_start, range_start = 0, range_end = 0;
+ struct super_block *sb = inode->i_sb;
+
+ zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
+ mlog(0, "zero_start %llu for i_size %llu\n",
+ (unsigned long long)zero_start,
+ (unsigned long long)i_size_read(inode));
+ while (zero_start < zero_to_size) {
+ ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
+ zero_to_size,
+ &range_start,
+ &range_end);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ if (!range_end)
+ break;
+ /* Trim the ends */
+ if (range_start < zero_start)
+ range_start = zero_start;
+ if (range_end > zero_to_size)
+ range_end = zero_to_size;
+
+ ret = ocfs2_zero_extend_range(inode, range_start,
+ range_end);
+ if (ret) {
+ mlog_errno(ret);
+ break;
+ }
+ zero_start = range_end;
+ }
+
return ret;
}
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+ u64 new_i_size, u64 zero_to)
{
int ret;
u32 clusters_to_add;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ /*
+ * Only quota files call this without a bh, and they can't be
+ * refcounted.
+ */
+ BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
+ BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
+
clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
if (clusters_to_add < oi->ip_clusters)
clusters_to_add = 0;
@@ -840,7 +1031,7 @@ int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size, u64 zero_to)
* still need to zero the area between the old i_size and the
* new i_size.
*/
- ret = ocfs2_zero_extend(inode, zero_to);
+ ret = ocfs2_zero_extend(inode, di_bh, zero_to);
if (ret < 0)
mlog_errno(ret);
@@ -862,27 +1053,15 @@ static int ocfs2_extend_file(struct inode *inode,
goto out;
if (i_size_read(inode) == new_i_size)
- goto out;
+ goto out;
BUG_ON(new_i_size < i_size_read(inode));
/*
- * Fall through for converting inline data, even if the fs
- * supports sparse files.
- *
- * The check for inline data here is legal - nobody can add
- * the feature since we have i_mutex. We must check it again
- * after acquiring ip_alloc_sem though, as paths like mmap
- * might have raced us to converting the inode to extents.
- */
- if (!(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
- && ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
- goto out_update_size;
-
- /*
* The alloc sem blocks people in read/write from reading our
* allocation until we're done changing it. We depend on
* i_mutex to block other extend/truncate calls while we're
- * here.
+ * here. We even have to hold it for sparse files because there
+ * might be some tail zeroing.
*/
down_write(&oi->ip_alloc_sem);
@@ -899,14 +1078,16 @@ static int ocfs2_extend_file(struct inode *inode,
ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
if (ret) {
up_write(&oi->ip_alloc_sem);
-
mlog_errno(ret);
goto out;
}
}
- if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
- ret = ocfs2_extend_no_holes(inode, new_i_size, new_i_size);
+ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
+ ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
+ else
+ ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
+ new_i_size);
up_write(&oi->ip_alloc_sem);
@@ -1052,18 +1233,26 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
}
/*
- * This will intentionally not wind up calling simple_setsize(),
+ * This will intentionally not wind up calling truncate_setsize(),
* since all the work for a size change has been done above.
* Otherwise, we could get into problems with truncate as
* ip_alloc_sem is used there to protect against i_size
* changes.
+ *
+ * XXX: this means the conditional below can probably be removed.
*/
- status = inode_setattr(inode, attr);
- if (status < 0) {
- mlog_errno(status);
- goto bail_commit;
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ status = vmtruncate(inode, attr->ia_size);
+ if (status) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
}
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+
status = ocfs2_mark_inode_dirty(handle, inode, bh);
if (status < 0)
mlog_errno(status);
@@ -2119,12 +2308,12 @@ relock:
* blocks outside i_size. Trim these off again.
* Don't need i_size_read because we hold i_mutex.
*
- * XXX(hch): this looks buggy because ocfs2 did not
+ * XXX(truncate): this looks buggy because ocfs2 did not
* actually implement ->truncate. Take a look at
* the new truncate sequence and update this accordingly
*/
if (*ppos + count > inode->i_size)
- simple_setsize(inode, inode->i_size);
+ truncate_setsize(inode, inode->i_size);
ret = written;
goto out_dio;
}
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index d66cf4f7c70..97bf761c9e7 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -54,8 +54,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb,
int ocfs2_simple_size_update(struct inode *inode,
struct buffer_head *di_bh,
u64 new_i_size);
-int ocfs2_extend_no_holes(struct inode *inode, u64 new_i_size,
- u64 zero_to);
+int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
+ u64 new_i_size, u64 zero_to);
+int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
+ loff_t zero_to);
int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index abb0a95cc71..0492464916b 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -969,7 +969,7 @@ static void ocfs2_cleanup_delete_inode(struct inode *inode,
truncate_inode_pages(&inode->i_data, 0);
}
-void ocfs2_delete_inode(struct inode *inode)
+static void ocfs2_delete_inode(struct inode *inode)
{
int wipe, status;
sigset_t oldset;
@@ -1075,20 +1075,17 @@ bail_unlock_nfs_sync:
bail_unblock:
ocfs2_unblock_signals(&oldset);
bail:
- clear_inode(inode);
mlog_exit_void();
}
-void ocfs2_clear_inode(struct inode *inode)
+static void ocfs2_clear_inode(struct inode *inode)
{
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
mlog_entry_void();
- if (!inode)
- goto bail;
-
+ end_writeback(inode);
mlog(0, "Clearing inode: %llu, nlink = %u\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_nlink);
@@ -1180,16 +1177,27 @@ void ocfs2_clear_inode(struct inode *inode)
jbd2_journal_release_jbd_inode(OCFS2_SB(inode->i_sb)->journal->j_journal,
&oi->ip_jinode);
-bail:
mlog_exit_void();
}
+void ocfs2_evict_inode(struct inode *inode)
+{
+ if (!inode->i_nlink ||
+ (OCFS2_I(inode)->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)) {
+ ocfs2_delete_inode(inode);
+ } else {
+ truncate_inode_pages(&inode->i_data, 0);
+ }
+ ocfs2_clear_inode(inode);
+}
+
/* Called under inode_lock, with no more references on the
* struct inode, so it's safe here to check the flags field
* and to manipulate i_nlink without any other locks. */
-void ocfs2_drop_inode(struct inode *inode)
+int ocfs2_drop_inode(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ int res;
mlog_entry_void();
@@ -1197,11 +1205,12 @@ void ocfs2_drop_inode(struct inode *inode)
(unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags);
if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
- generic_delete_inode(inode);
+ res = 1;
else
- generic_drop_inode(inode);
+ res = generic_drop_inode(inode);
mlog_exit_void();
+ return res;
}
/*
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 9f5f5fcadc4..6de5a869db3 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -123,9 +123,8 @@ static inline struct ocfs2_caching_info *INODE_CACHE(struct inode *inode)
return &OCFS2_I(inode)->ip_metadata_cache;
}
-void ocfs2_clear_inode(struct inode *inode);
-void ocfs2_delete_inode(struct inode *inode);
-void ocfs2_drop_inode(struct inode *inode);
+void ocfs2_evict_inode(struct inode *inode);
+int ocfs2_drop_inode(struct inode *inode);
/* Flags for ocfs2_iget() */
#define OCFS2_FI_FLAG_SYSFILE 0x1
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 47878cf1641..9b57c0350ff 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -472,7 +472,7 @@ static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger
return container_of(triggers, struct ocfs2_triggers, ot_triggers);
}
-static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
@@ -491,7 +491,7 @@ static void ocfs2_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
* Quota blocks have their own trigger because the struct ocfs2_block_check
* offset depends on the blocksize.
*/
-static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_dq_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
@@ -511,7 +511,7 @@ static void ocfs2_dq_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
* Directory blocks also have their own trigger because the
* struct ocfs2_block_check offset depends on the blocksize.
*/
-static void ocfs2_db_commit_trigger(struct jbd2_buffer_trigger_type *triggers,
+static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh,
void *data, size_t size)
{
@@ -544,7 +544,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
static struct ocfs2_triggers di_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dinode, i_check),
@@ -552,7 +552,7 @@ static struct ocfs2_triggers di_triggers = {
static struct ocfs2_triggers eb_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_extent_block, h_check),
@@ -560,7 +560,7 @@ static struct ocfs2_triggers eb_triggers = {
static struct ocfs2_triggers rb_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
@@ -568,7 +568,7 @@ static struct ocfs2_triggers rb_triggers = {
static struct ocfs2_triggers gd_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
@@ -576,14 +576,14 @@ static struct ocfs2_triggers gd_triggers = {
static struct ocfs2_triggers db_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_db_commit_trigger,
+ .t_frozen = ocfs2_db_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
};
static struct ocfs2_triggers xb_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
@@ -591,14 +591,14 @@ static struct ocfs2_triggers xb_triggers = {
static struct ocfs2_triggers dq_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_dq_commit_trigger,
+ .t_frozen = ocfs2_dq_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
};
static struct ocfs2_triggers dr_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
@@ -606,7 +606,7 @@ static struct ocfs2_triggers dr_triggers = {
static struct ocfs2_triggers dl_triggers = {
.ot_triggers = {
- .t_commit = ocfs2_commit_trigger,
+ .t_frozen = ocfs2_frozen_trigger,
.t_abort = ocfs2_abort_trigger,
},
.ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
@@ -760,13 +760,13 @@ void ocfs2_set_journal_params(struct ocfs2_super *osb)
if (osb->osb_commit_interval)
commit_interval = osb->osb_commit_interval;
- spin_lock(&journal->j_state_lock);
+ write_lock(&journal->j_state_lock);
journal->j_commit_interval = commit_interval;
if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
journal->j_flags |= JBD2_BARRIER;
else
journal->j_flags &= ~JBD2_BARRIER;
- spin_unlock(&journal->j_state_lock);
+ write_unlock(&journal->j_state_lock);
}
int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
@@ -1936,7 +1936,7 @@ void ocfs2_orphan_scan_work(struct work_struct *work)
mutex_lock(&os->os_lock);
ocfs2_queue_orphan_scan(osb);
if (atomic_read(&os->os_state) == ORPHAN_SCAN_ACTIVE)
- schedule_delayed_work(&os->os_orphan_scan_work,
+ queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
ocfs2_orphan_scan_timeout());
mutex_unlock(&os->os_lock);
}
@@ -1976,8 +1976,8 @@ void ocfs2_orphan_scan_start(struct ocfs2_super *osb)
atomic_set(&os->os_state, ORPHAN_SCAN_INACTIVE);
else {
atomic_set(&os->os_state, ORPHAN_SCAN_ACTIVE);
- schedule_delayed_work(&os->os_orphan_scan_work,
- ocfs2_orphan_scan_timeout());
+ queue_delayed_work(ocfs2_wq, &os->os_orphan_scan_work,
+ ocfs2_orphan_scan_timeout());
}
}
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 3d7419682dc..ec6adbf8f55 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -118,6 +118,7 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
{
unsigned int la_mb;
unsigned int gd_mb;
+ unsigned int la_max_mb;
unsigned int megs_per_slot;
struct super_block *sb = osb->sb;
@@ -182,6 +183,12 @@ unsigned int ocfs2_la_default_mb(struct ocfs2_super *osb)
if (megs_per_slot < la_mb)
la_mb = megs_per_slot;
+ /* We can't store more bits than we can in a block. */
+ la_max_mb = ocfs2_clusters_to_megabytes(osb->sb,
+ ocfs2_local_alloc_size(sb) * 8);
+ if (la_mb > la_max_mb)
+ la_mb = la_max_mb;
+
return la_mb;
}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 2bb35fe0051..4607923eb24 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -775,7 +775,7 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
* locking allocators ranks above a transaction start
*/
WARN_ON(journal_current_handle());
- status = ocfs2_extend_no_holes(gqinode,
+ status = ocfs2_extend_no_holes(gqinode, NULL,
gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
gqinode->i_size);
if (status < 0)
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 8bd70d4d184..dc78764ccc4 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -971,7 +971,7 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk(
u64 p_blkno;
/* We are protected by dqio_sem so no locking needed */
- status = ocfs2_extend_no_holes(lqinode,
+ status = ocfs2_extend_no_holes(lqinode, NULL,
lqinode->i_size + 2 * sb->s_blocksize,
lqinode->i_size);
if (status < 0) {
@@ -1114,7 +1114,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file(
return ocfs2_local_quota_add_chunk(sb, type, offset);
/* We are protected by dqio_sem so no locking needed */
- status = ocfs2_extend_no_holes(lqinode,
+ status = ocfs2_extend_no_holes(lqinode, NULL,
lqinode->i_size + sb->s_blocksize,
lqinode->i_size);
if (status < 0) {
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 4793f36f651..73a11ccfd4c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
/*
- * If the refcount rec already exist, cool. We just need
- * to check whether there is a split. Otherwise we just need
- * to increase the refcount.
- * If we will insert one, increases recs_add.
- *
* We record all the records which will be inserted to the
* same refcount block, so that we can tell exactly whether
* we need a new refcount block or not.
+ *
+ * If we will insert a new one, this is easy and only happens
+ * during adding refcounted flag to the extent, so we don't
+ * have a chance of spliting. We just need one record.
+ *
+ * If the refcount rec already exists, that would be a little
+ * complicated. we may have to:
+ * 1) split at the beginning if the start pos isn't aligned.
+ * we need 1 more record in this case.
+ * 2) split int the end if the end pos isn't aligned.
+ * we need 1 more record in this case.
+ * 3) split in the middle because of file system fragmentation.
+ * we need 2 more records in this case(we can't detect this
+ * beforehand, so always think of the worst case).
*/
if (rec.r_refcount) {
+ recs_add += 2;
/* Check whether we need a split at the beginning. */
if (cpos == start_cpos &&
cpos != le64_to_cpu(rec.r_cpos))
@@ -2931,6 +2941,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
+ /*
+ * We only duplicate pages until we reach the page contains i_size - 1.
+ * So trim 'end' to i_size.
+ */
+ if (end > i_size_read(context->inode))
+ end = i_size_read(context->inode);
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
@@ -4166,6 +4182,12 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
struct inode *inode = old_dentry->d_inode;
struct buffer_head *new_bh = NULL;
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto out;
+ }
+
ret = filemap_fdatawrite(inode->i_mapping);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index 40650021fc2..d8b6e4259b8 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/list.h>
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index f4c2a9eb8c4..a8e6a95a353 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -741,7 +741,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
le16_to_cpu(bg->bg_free_bits_count));
le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
le16_to_cpu(bg->bg_bits));
- cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg->bg_blkno);
+ cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
le16_add_cpu(&cl->cl_next_free_rec, 1);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 0eaa929a4db..fa1be1b304d 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -145,8 +145,7 @@ static const struct super_operations ocfs2_sops = {
.alloc_inode = ocfs2_alloc_inode,
.destroy_inode = ocfs2_destroy_inode,
.drop_inode = ocfs2_drop_inode,
- .clear_inode = ocfs2_clear_inode,
- .delete_inode = ocfs2_delete_inode,
+ .evict_inode = ocfs2_evict_inode,
.sync_fs = ocfs2_sync_fs,
.put_super = ocfs2_put_super,
.remount_fs = ocfs2_remount,
@@ -2472,7 +2471,7 @@ static void ocfs2_delete_osb(struct ocfs2_super *osb)
kfree(osb->slot_recovery_generations);
/* FIXME
* This belongs in journal shutdown, but because we have to
- * allocate osb->journal at the start of ocfs2_initalize_osb(),
+ * allocate osb->journal at the start of ocfs2_initialize_osb(),
* we free it here.
*/
kfree(osb->journal);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index e97b34842cf..d03469f6180 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -709,7 +709,7 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_xattr_set_ctxt *ctxt)
{
- int status = 0;
+ int status = 0, credits;
handle_t *handle = ctxt->handle;
enum ocfs2_alloc_restarted why;
u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters);
@@ -719,38 +719,54 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode,
ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb);
- status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ while (clusters_to_add) {
+ status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ break;
+ }
- prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
- status = ocfs2_add_clusters_in_btree(handle,
- &et,
- &logical_start,
- clusters_to_add,
- 0,
- ctxt->data_ac,
- ctxt->meta_ac,
- &why);
- if (status < 0) {
- mlog_errno(status);
- goto leave;
- }
+ prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters);
+ status = ocfs2_add_clusters_in_btree(handle,
+ &et,
+ &logical_start,
+ clusters_to_add,
+ 0,
+ ctxt->data_ac,
+ ctxt->meta_ac,
+ &why);
+ if ((status < 0) && (status != -EAGAIN)) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ break;
+ }
- ocfs2_journal_dirty(handle, vb->vb_bh);
+ ocfs2_journal_dirty(handle, vb->vb_bh);
- clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) - prev_clusters;
+ clusters_to_add -= le32_to_cpu(vb->vb_xv->xr_clusters) -
+ prev_clusters;
- /*
- * We should have already allocated enough space before the transaction,
- * so no need to restart.
- */
- BUG_ON(why != RESTART_NONE || clusters_to_add);
-
-leave:
+ if (why != RESTART_NONE && clusters_to_add) {
+ /*
+ * We can only fail in case the alloc file doesn't give
+ * up enough clusters.
+ */
+ BUG_ON(why == RESTART_META);
+
+ mlog(0, "restarting xattr value extension for %u"
+ " clusters,.\n", clusters_to_add);
+ credits = ocfs2_calc_extend_credits(inode->i_sb,
+ &vb->vb_xv->xr_list,
+ clusters_to_add);
+ status = ocfs2_extend_trans(handle, credits);
+ if (status < 0) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ break;
+ }
+ }
+ }
return status;
}
@@ -6788,16 +6804,15 @@ out:
return ret;
}
-static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+static int ocfs2_reflink_xattr_bucket(handle_t *handle,
u64 blkno, u64 new_blkno, u32 clusters,
+ u32 *cpos, int num_buckets,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_alloc_context *data_ac,
struct ocfs2_reflink_xattr_tree_args *args)
{
int i, j, ret = 0;
struct super_block *sb = args->reflink->old_inode->i_sb;
- u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb));
- u32 num_buckets = clusters * bpc;
int bpb = args->old_bucket->bu_blocks;
struct ocfs2_xattr_value_buf vb = {
.vb_access = ocfs2_journal_access,
@@ -6816,14 +6831,6 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
break;
}
- /*
- * The real bucket num in this series of blocks is stored
- * in the 1st bucket.
- */
- if (i == 0)
- num_buckets = le16_to_cpu(
- bucket_xh(args->old_bucket)->xh_num_buckets);
-
ret = ocfs2_xattr_bucket_journal_access(handle,
args->new_bucket,
OCFS2_JOURNAL_ACCESS_CREATE);
@@ -6837,6 +6844,18 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
bucket_block(args->old_bucket, j),
sb->s_blocksize);
+ /*
+ * Record the start cpos so that we can use it to initialize
+ * our xattr tree we also set the xh_num_bucket for the new
+ * bucket.
+ */
+ if (i == 0) {
+ *cpos = le32_to_cpu(bucket_xh(args->new_bucket)->
+ xh_entries[0].xe_name_hash);
+ bucket_xh(args->new_bucket)->xh_num_buckets =
+ cpu_to_le16(num_buckets);
+ }
+
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
ret = ocfs2_reflink_xattr_header(handle, args->reflink,
@@ -6866,6 +6885,7 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
}
ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket);
+
ocfs2_xattr_bucket_relse(args->old_bucket);
ocfs2_xattr_bucket_relse(args->new_bucket);
}
@@ -6874,6 +6894,75 @@ static int ocfs2_reflink_xattr_buckets(handle_t *handle,
ocfs2_xattr_bucket_relse(args->new_bucket);
return ret;
}
+
+static int ocfs2_reflink_xattr_buckets(handle_t *handle,
+ struct inode *inode,
+ struct ocfs2_reflink_xattr_tree_args *args,
+ struct ocfs2_extent_tree *et,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_alloc_context *data_ac,
+ u64 blkno, u32 cpos, u32 len)
+{
+ int ret, first_inserted = 0;
+ u32 p_cluster, num_clusters, reflink_cpos = 0;
+ u64 new_blkno;
+ unsigned int num_buckets, reflink_buckets;
+ unsigned int bpc =
+ ocfs2_xattr_buckets_per_cluster(OCFS2_SB(inode->i_sb));
+
+ ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ num_buckets = le16_to_cpu(bucket_xh(args->old_bucket)->xh_num_buckets);
+ ocfs2_xattr_bucket_relse(args->old_bucket);
+
+ while (len && num_buckets) {
+ ret = ocfs2_claim_clusters(handle, data_ac,
+ 1, &p_cluster, &num_clusters);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ new_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
+ reflink_buckets = min(num_buckets, bpc * num_clusters);
+
+ ret = ocfs2_reflink_xattr_bucket(handle, blkno,
+ new_blkno, num_clusters,
+ &reflink_cpos, reflink_buckets,
+ meta_ac, data_ac, args);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * For the 1st allocated cluster, we make it use the same cpos
+ * so that the xattr tree looks the same as the original one
+ * in the most case.
+ */
+ if (!first_inserted) {
+ reflink_cpos = cpos;
+ first_inserted = 1;
+ }
+ ret = ocfs2_insert_extent(handle, et, reflink_cpos, new_blkno,
+ num_clusters, 0, meta_ac);
+ if (ret)
+ mlog_errno(ret);
+
+ mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
+ (unsigned long long)new_blkno, num_clusters, reflink_cpos);
+
+ len -= num_clusters;
+ blkno += ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
+ num_buckets -= reflink_buckets;
+ }
+out:
+ return ret;
+}
+
/*
* Create the same xattr extent record in the new inode's xattr tree.
*/
@@ -6885,8 +6974,6 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
void *para)
{
int ret, credits = 0;
- u32 p_cluster, num_clusters;
- u64 new_blkno;
handle_t *handle;
struct ocfs2_reflink_xattr_tree_args *args =
(struct ocfs2_reflink_xattr_tree_args *)para;
@@ -6895,6 +6982,9 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
struct ocfs2_alloc_context *data_ac = NULL;
struct ocfs2_extent_tree et;
+ mlog(0, "reflink xattr buckets %llu len %u\n",
+ (unsigned long long)blkno, len);
+
ocfs2_init_xattr_tree_extent_tree(&et,
INODE_CACHE(args->reflink->new_inode),
args->new_blk_bh);
@@ -6914,32 +7004,12 @@ static int ocfs2_reflink_xattr_rec(struct inode *inode,
goto out;
}
- ret = ocfs2_claim_clusters(handle, data_ac,
- len, &p_cluster, &num_clusters);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
-
- new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster);
-
- mlog(0, "reflink xattr buckets %llu to %llu, len %u\n",
- (unsigned long long)blkno, (unsigned long long)new_blkno, len);
- ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len,
- meta_ac, data_ac, args);
- if (ret) {
- mlog_errno(ret);
- goto out_commit;
- }
-
- mlog(0, "insert new xattr extent rec start %llu len %u to %u\n",
- (unsigned long long)new_blkno, len, cpos);
- ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno,
- len, 0, meta_ac);
+ ret = ocfs2_reflink_xattr_buckets(handle, inode, args, &et,
+ meta_ac, data_ac,
+ blkno, cpos, len);
if (ret)
mlog_errno(ret);
-out_commit:
ocfs2_commit_trans(osb, handle);
out:
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index b42d6241903..393f3f659da 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -25,11 +25,10 @@ static struct buffer_head *omfs_get_bucket(struct inode *dir,
const char *name, int namelen, int *ofs)
{
int nbuckets = (dir->i_size - OMFS_DIR_START)/8;
- int block = clus_to_blk(OMFS_SB(dir->i_sb), dir->i_ino);
int bucket = omfs_hash(name, namelen, nbuckets);
*ofs = OMFS_DIR_START + bucket * 8;
- return sb_bread(dir->i_sb, block);
+ return omfs_bread(dir->i_sb, dir->i_ino);
}
static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block,
@@ -42,8 +41,7 @@ static struct buffer_head *omfs_scan_list(struct inode *dir, u64 block,
*prev_block = ~0;
while (block != ~0) {
- bh = sb_bread(dir->i_sb,
- clus_to_blk(OMFS_SB(dir->i_sb), block));
+ bh = omfs_bread(dir->i_sb, block);
if (!bh) {
err = -EIO;
goto err;
@@ -86,11 +84,10 @@ static struct buffer_head *omfs_find_entry(struct inode *dir,
int omfs_make_empty(struct inode *inode, struct super_block *sb)
{
struct omfs_sb_info *sbi = OMFS_SB(sb);
- int block = clus_to_blk(sbi, inode->i_ino);
struct buffer_head *bh;
struct omfs_inode *oi;
- bh = sb_bread(sb, block);
+ bh = omfs_bread(sb, inode->i_ino);
if (!bh)
return -ENOMEM;
@@ -134,7 +131,7 @@ static int omfs_add_link(struct dentry *dentry, struct inode *inode)
brelse(bh);
/* now set the sibling and parent pointers on the new inode */
- bh = sb_bread(dir->i_sb, clus_to_blk(OMFS_SB(dir->i_sb), inode->i_ino));
+ bh = omfs_bread(dir->i_sb, inode->i_ino);
if (!bh)
goto out;
@@ -190,8 +187,7 @@ static int omfs_delete_entry(struct dentry *dentry)
if (prev != ~0) {
/* found in middle of list, get list ptr */
brelse(bh);
- bh = sb_bread(dir->i_sb,
- clus_to_blk(OMFS_SB(dir->i_sb), prev));
+ bh = omfs_bread(dir->i_sb, prev);
if (!bh)
goto out;
@@ -224,8 +220,7 @@ static int omfs_dir_is_empty(struct inode *inode)
u64 *ptr;
int i;
- bh = sb_bread(inode->i_sb, clus_to_blk(OMFS_SB(inode->i_sb),
- inode->i_ino));
+ bh = omfs_bread(inode->i_sb, inode->i_ino);
if (!bh)
return 0;
@@ -353,8 +348,7 @@ static int omfs_fill_chain(struct file *filp, void *dirent, filldir_t filldir,
/* follow chain in this bucket */
while (fsblock != ~0) {
- bh = sb_bread(dir->i_sb, clus_to_blk(OMFS_SB(dir->i_sb),
- fsblock));
+ bh = omfs_bread(dir->i_sb, fsblock);
if (!bh)
goto out;
@@ -466,7 +460,7 @@ static int omfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
hchain = (filp->f_pos >> 20) - 1;
hindex = filp->f_pos & 0xfffff;
- bh = sb_bread(dir->i_sb, clus_to_blk(OMFS_SB(dir->i_sb), dir->i_ino));
+ bh = omfs_bread(dir->i_sb, dir->i_ino);
if (!bh)
goto out;
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 6e7a3291bbe..8a6d34fa668 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -50,7 +50,7 @@ int omfs_shrink_inode(struct inode *inode)
if (inode->i_size != 0)
goto out;
- bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
+ bh = omfs_bread(inode->i_sb, next);
if (!bh)
goto out;
@@ -90,7 +90,7 @@ int omfs_shrink_inode(struct inode *inode)
if (next == ~0)
break;
- bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
+ bh = omfs_bread(inode->i_sb, next);
if (!bh)
goto out;
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
@@ -222,7 +222,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh;
sector_t next, offset;
int ret;
- u64 new_block;
+ u64 uninitialized_var(new_block);
u32 max_extents;
int extent_count;
struct omfs_extent *oe;
@@ -232,7 +232,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
int remain;
ret = -EIO;
- bh = sb_bread(inode->i_sb, clus_to_blk(sbi, inode->i_ino));
+ bh = omfs_bread(inode->i_sb, inode->i_ino);
if (!bh)
goto out;
@@ -265,7 +265,7 @@ static int omfs_get_block(struct inode *inode, sector_t block,
break;
brelse(bh);
- bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
+ bh = omfs_bread(inode->i_sb, next);
if (!bh)
goto out;
oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
@@ -312,9 +312,17 @@ static int omfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags,
- pagep, fsdata, omfs_get_block);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep,
+ omfs_get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
@@ -333,7 +341,29 @@ const struct file_operations omfs_file_operations = {
.splice_read = generic_file_splice_read,
};
+static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
const struct inode_operations omfs_file_inops = {
+ .setattr = omfs_setattr,
.truncate = omfs_truncate
};
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 089839a6cc6..14a22863291 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -19,6 +19,15 @@ MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>");
MODULE_DESCRIPTION("OMFS (ReplayTV/Karma) Filesystem for Linux");
MODULE_LICENSE("GPL");
+struct buffer_head *omfs_bread(struct super_block *sb, sector_t block)
+{
+ struct omfs_sb_info *sbi = OMFS_SB(sb);
+ if (block >= sbi->s_num_blocks)
+ return NULL;
+
+ return sb_bread(sb, clus_to_blk(sbi, block));
+}
+
struct inode *omfs_new_inode(struct inode *dir, int mode)
{
struct inode *inode;
@@ -93,15 +102,13 @@ static int __omfs_write_inode(struct inode *inode, int wait)
struct omfs_inode *oi;
struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
struct buffer_head *bh, *bh2;
- unsigned int block;
u64 ctime;
int i;
int ret = -EIO;
int sync_failed = 0;
/* get current inode since we may have written sibling ptrs etc. */
- block = clus_to_blk(sbi, inode->i_ino);
- bh = sb_bread(inode->i_sb, block);
+ bh = omfs_bread(inode->i_sb, inode->i_ino);
if (!bh)
goto out;
@@ -140,8 +147,7 @@ static int __omfs_write_inode(struct inode *inode, int wait)
/* if mirroring writes, copy to next fsblock */
for (i = 1; i < sbi->s_mirrors; i++) {
- bh2 = sb_bread(inode->i_sb, block + i *
- (sbi->s_blocksize / sbi->s_sys_blocksize));
+ bh2 = omfs_bread(inode->i_sb, inode->i_ino + i);
if (!bh2)
goto out_brelse;
@@ -175,9 +181,13 @@ int omfs_sync_inode(struct inode *inode)
* called when an entry is deleted, need to clear the bits in the
* bitmaps.
*/
-static void omfs_delete_inode(struct inode *inode)
+static void omfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+
+ if (inode->i_nlink)
+ return;
if (S_ISREG(inode->i_mode)) {
inode->i_size = 0;
@@ -185,7 +195,6 @@ static void omfs_delete_inode(struct inode *inode)
}
omfs_clear_range(inode->i_sb, inode->i_ino, 2);
- clear_inode(inode);
}
struct inode *omfs_iget(struct super_block *sb, ino_t ino)
@@ -193,7 +202,6 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
struct omfs_sb_info *sbi = OMFS_SB(sb);
struct omfs_inode *oi;
struct buffer_head *bh;
- unsigned int block;
u64 ctime;
unsigned long nsecs;
struct inode *inode;
@@ -204,8 +212,7 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
if (!(inode->i_state & I_NEW))
return inode;
- block = clus_to_blk(sbi, ino);
- bh = sb_bread(inode->i_sb, block);
+ bh = omfs_bread(inode->i_sb, ino);
if (!bh)
goto iget_failed;
@@ -284,7 +291,7 @@ static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static const struct super_operations omfs_sops = {
.write_inode = omfs_write_inode,
- .delete_inode = omfs_delete_inode,
+ .evict_inode = omfs_evict_inode,
.put_super = omfs_put_super,
.statfs = omfs_statfs,
.show_options = generic_show_options,
@@ -319,6 +326,9 @@ static int omfs_get_imap(struct super_block *sb)
goto nomem;
block = clus_to_blk(sbi, sbi->s_bitmap_ino);
+ if (block >= sbi->s_num_blocks)
+ goto nomem;
+
ptr = sbi->s_imap;
for (count = bitmap_size; count > 0; count -= sb->s_blocksize) {
bh = sb_bread(sb, block++);
@@ -417,7 +427,6 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
struct omfs_root_block *omfs_rb;
struct omfs_sb_info *sbi;
struct inode *root;
- sector_t start;
int ret = -EINVAL;
save_mount_options(sb, (char *) data);
@@ -486,8 +495,7 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_block_shift = get_bitmask_order(sbi->s_blocksize) -
get_bitmask_order(sbi->s_sys_blocksize);
- start = clus_to_blk(sbi, be64_to_cpu(omfs_sb->s_root_block));
- bh2 = sb_bread(sb, start);
+ bh2 = omfs_bread(sb, be64_to_cpu(omfs_sb->s_root_block));
if (!bh2)
goto out_brelse_bh;
@@ -504,6 +512,21 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
goto out_brelse_bh2;
}
+ if (sbi->s_bitmap_ino != ~0ULL &&
+ sbi->s_bitmap_ino > sbi->s_num_blocks) {
+ printk(KERN_ERR "omfs: free space bitmap location is corrupt "
+ "(%llx, total blocks %llx)\n",
+ (unsigned long long) sbi->s_bitmap_ino,
+ (unsigned long long) sbi->s_num_blocks);
+ goto out_brelse_bh2;
+ }
+ if (sbi->s_clustersize < 1 ||
+ sbi->s_clustersize > OMFS_MAX_CLUSTER_SIZE) {
+ printk(KERN_ERR "omfs: cluster size out of range (%d)",
+ sbi->s_clustersize);
+ goto out_brelse_bh2;
+ }
+
ret = omfs_get_imap(sb);
if (ret)
goto out_brelse_bh2;
@@ -529,6 +552,8 @@ out_brelse_bh2:
out_brelse_bh:
brelse(bh);
end:
+ if (ret)
+ kfree(sbi);
return ret;
}
diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h
index ebe2fdbe535..7d414fef501 100644
--- a/fs/omfs/omfs.h
+++ b/fs/omfs/omfs.h
@@ -58,6 +58,7 @@ extern void omfs_make_empty_table(struct buffer_head *bh, int offset);
extern int omfs_shrink_inode(struct inode *inode);
/* inode.c */
+extern struct buffer_head *omfs_bread(struct super_block *sb, sector_t block);
extern struct inode *omfs_iget(struct super_block *sb, ino_t inode);
extern struct inode *omfs_new_inode(struct inode *dir, int mode);
extern int omfs_reserve_block(struct super_block *sb, sector_t block);
diff --git a/fs/omfs/omfs_fs.h b/fs/omfs/omfs_fs.h
index 12cca245d6e..ee5e4327de9 100644
--- a/fs/omfs/omfs_fs.h
+++ b/fs/omfs/omfs_fs.h
@@ -17,6 +17,7 @@
#define OMFS_EXTENT_CONT 0x40
#define OMFS_XOR_COUNT 19
#define OMFS_MAX_BLOCK_SIZE 8192
+#define OMFS_MAX_CLUSTER_SIZE 8
struct omfs_super_block {
char s_fill1[256];
diff --git a/fs/open.c b/fs/open.c
index 5463266db9e..630715f9f73 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -29,6 +29,7 @@
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/ima.h>
+#include <linux/dnotify.h>
#include "internal.h"
@@ -110,7 +111,7 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error)
- error = security_path_truncate(&path, length, 0);
+ error = security_path_truncate(&path);
if (!error)
error = do_truncate(path.dentry, length, 0, NULL);
@@ -165,8 +166,7 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
error = locks_verify_truncate(inode, file, length);
if (!error)
- error = security_path_truncate(&file->f_path, length,
- ATTR_MTIME|ATTR_CTIME);
+ error = security_path_truncate(&file->f_path);
if (!error)
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
out_putf:
@@ -367,7 +367,7 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
+ error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -396,7 +396,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
if (!S_ISDIR(inode->i_mode))
goto out_putf;
- error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
+ error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
if (!error)
set_fs_pwd(current->fs, &file->f_path);
out_putf:
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
if (error)
goto out;
- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
+ error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR);
if (error)
goto dput_and_out;
@@ -888,7 +888,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
- fsnotify_open(f->f_path.dentry);
+ fsnotify_open(f);
fd_install(fd, f);
}
}
@@ -1031,7 +1031,9 @@ EXPORT_SYMBOL(generic_file_open);
/*
* This is used by subsystems that don't want seekable
- * file descriptors
+ * file descriptors. The function is not supposed to ever fail, the only
+ * reason it returns an 'int' and not 'void' is so that it can be plugged
+ * directly into file_operations structure.
*/
int nonseekable_open(struct inode *inode, struct file *filp)
{
diff --git a/fs/partitions/acorn.c b/fs/partitions/acorn.c
index 6921e7890be..fbeb697374d 100644
--- a/fs/partitions/acorn.c
+++ b/fs/partitions/acorn.c
@@ -45,8 +45,11 @@ adfs_partition(struct parsed_partitions *state, char *name, char *data,
nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) |
(le32_to_cpu(dr->disc_size) >> 9);
- if (name)
- printk(" [%s]", name);
+ if (name) {
+ strlcat(state->pp_buf, " [", PAGE_SIZE);
+ strlcat(state->pp_buf, name, PAGE_SIZE);
+ strlcat(state->pp_buf, "]", PAGE_SIZE);
+ }
put_partition(state, slot, first_sector, nr_sects);
return dr;
}
@@ -81,14 +84,14 @@ static int riscix_partition(struct parsed_partitions *state,
if (!rr)
return -1;
- printk(" [RISCiX]");
+ strlcat(state->pp_buf, " [RISCiX]", PAGE_SIZE);
if (rr->magic == RISCIX_MAGIC) {
unsigned long size = nr_sects > 2 ? 2 : nr_sects;
int part;
- printk(" <");
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
put_partition(state, slot++, first_sect, size);
for (part = 0; part < 8; part++) {
@@ -97,11 +100,13 @@ static int riscix_partition(struct parsed_partitions *state,
put_partition(state, slot++,
le32_to_cpu(rr->part[part].start),
le32_to_cpu(rr->part[part].length));
- printk("(%s)", rr->part[part].name);
+ strlcat(state->pp_buf, "(", PAGE_SIZE);
+ strlcat(state->pp_buf, rr->part[part].name, PAGE_SIZE);
+ strlcat(state->pp_buf, ")", PAGE_SIZE);
}
}
- printk(" >\n");
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
} else {
put_partition(state, slot++, first_sect, nr_sects);
}
@@ -131,7 +136,7 @@ static int linux_partition(struct parsed_partitions *state,
struct linux_part *linuxp;
unsigned long size = nr_sects > 2 ? 2 : nr_sects;
- printk(" [Linux]");
+ strlcat(state->pp_buf, " [Linux]", PAGE_SIZE);
put_partition(state, slot++, first_sect, size);
@@ -139,7 +144,7 @@ static int linux_partition(struct parsed_partitions *state,
if (!linuxp)
return -1;
- printk(" <");
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) ||
linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) {
if (slot == state->limit)
@@ -149,7 +154,7 @@ static int linux_partition(struct parsed_partitions *state,
le32_to_cpu(linuxp->nr_sects));
linuxp ++;
}
- printk(" >");
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
put_dev_sector(sect);
return slot;
@@ -294,7 +299,7 @@ int adfspart_check_ADFS(struct parsed_partitions *state)
break;
}
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
#endif
@@ -367,7 +372,7 @@ int adfspart_check_ICS(struct parsed_partitions *state)
return 0;
}
- printk(" [ICS]");
+ strlcat(state->pp_buf, " [ICS]", PAGE_SIZE);
for (slot = 1, p = (const struct ics_part *)data; p->size; p++) {
u32 start = le32_to_cpu(p->start);
@@ -401,7 +406,7 @@ int adfspart_check_ICS(struct parsed_partitions *state)
}
put_dev_sector(sect);
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
#endif
@@ -461,7 +466,7 @@ int adfspart_check_POWERTEC(struct parsed_partitions *state)
return 0;
}
- printk(" [POWERTEC]");
+ strlcat(state->pp_buf, " [POWERTEC]", PAGE_SIZE);
for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) {
u32 start = le32_to_cpu(p->start);
@@ -472,7 +477,7 @@ int adfspart_check_POWERTEC(struct parsed_partitions *state)
}
put_dev_sector(sect);
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
#endif
@@ -543,7 +548,7 @@ int adfspart_check_EESOX(struct parsed_partitions *state)
size = get_capacity(state->bdev->bd_disk);
put_partition(state, slot++, start, size - start);
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
}
return i ? 1 : 0;
diff --git a/fs/partitions/amiga.c b/fs/partitions/amiga.c
index ba443d4229f..70cbf44a156 100644
--- a/fs/partitions/amiga.c
+++ b/fs/partitions/amiga.c
@@ -69,7 +69,13 @@ int amiga_partition(struct parsed_partitions *state)
/* blksize is blocks per 512 byte standard block */
blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512;
- printk(" RDSK (%d)", blksize * 512); /* Be more informative */
+ {
+ char tmp[7 + 10 + 1 + 1];
+
+ /* Be more informative */
+ snprintf(tmp, sizeof(tmp), " RDSK (%d)", blksize * 512);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
blk = be32_to_cpu(rdb->rdb_PartitionList);
put_dev_sector(sect);
for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
@@ -106,23 +112,27 @@ int amiga_partition(struct parsed_partitions *state)
{
/* Be even more informative to aid mounting */
char dostype[4];
+ char tmp[42];
+
__be32 *dt = (__be32 *)dostype;
*dt = pb->pb_Environment[16];
if (dostype[3] < ' ')
- printk(" (%c%c%c^%c)",
+ snprintf(tmp, sizeof(tmp), " (%c%c%c^%c)",
dostype[0], dostype[1],
dostype[2], dostype[3] + '@' );
else
- printk(" (%c%c%c%c)",
+ snprintf(tmp, sizeof(tmp), " (%c%c%c%c)",
dostype[0], dostype[1],
dostype[2], dostype[3]);
- printk("(res %d spb %d)",
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ snprintf(tmp, sizeof(tmp), "(res %d spb %d)",
be32_to_cpu(pb->pb_Environment[6]),
be32_to_cpu(pb->pb_Environment[4]));
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
}
res = 1;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
rdb_done:
return res;
diff --git a/fs/partitions/atari.c b/fs/partitions/atari.c
index 4439ff1b6ce..9875b05e80a 100644
--- a/fs/partitions/atari.c
+++ b/fs/partitions/atari.c
@@ -62,7 +62,7 @@ int atari_partition(struct parsed_partitions *state)
}
pi = &rs->part[0];
- printk (" AHDI");
+ strlcat(state->pp_buf, " AHDI", PAGE_SIZE);
for (slot = 1; pi < &rs->part[4] && slot < state->limit; slot++, pi++) {
struct rootsector *xrs;
Sector sect2;
@@ -81,7 +81,7 @@ int atari_partition(struct parsed_partitions *state)
#ifdef ICD_PARTS
part_fmt = 1;
#endif
- printk(" XGM<");
+ strlcat(state->pp_buf, " XGM<", PAGE_SIZE);
partsect = extensect = be32_to_cpu(pi->st);
while (1) {
xrs = read_part_sector(state, partsect, &sect2);
@@ -120,14 +120,14 @@ int atari_partition(struct parsed_partitions *state)
break;
}
}
- printk(" >");
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
}
#ifdef ICD_PARTS
if ( part_fmt!=1 ) { /* no extended partitions -> test ICD-format */
pi = &rs->icdpart[0];
/* sanity check: no ICD format if first partition invalid */
if (OK_id(pi->id)) {
- printk(" ICD<");
+ strlcat(state->pp_buf, " ICD<", PAGE_SIZE);
for (; pi < &rs->icdpart[8] && slot < state->limit; slot++, pi++) {
/* accept only GEM,BGM,RAW,LNX,SWP partitions */
if (!((pi->flg & 1) && OK_id(pi->id)))
@@ -137,13 +137,13 @@ int atari_partition(struct parsed_partitions *state)
be32_to_cpu(pi->st),
be32_to_cpu(pi->siz));
}
- printk(" >");
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
}
}
#endif
put_dev_sector(sect);
- printk ("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 5dcd4b0c553..79fbf3f390f 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -164,10 +164,16 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
if (!state)
return NULL;
+ state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!state->pp_buf) {
+ kfree(state);
+ return NULL;
+ }
+ state->pp_buf[0] = '\0';
state->bdev = bdev;
disk_name(hd, 0, state->name);
- printk(KERN_INFO " %s:", state->name);
+ snprintf(state->pp_buf, PAGE_SIZE, " %s:", state->name);
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
@@ -185,17 +191,25 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
}
}
- if (res > 0)
+ if (res > 0) {
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
return state;
+ }
if (state->access_beyond_eod)
err = -ENOSPC;
if (err)
/* The partition is unrecognized. So report I/O errors if there were any */
res = err;
if (!res)
- printk(" unknown partition table\n");
+ strlcat(state->pp_buf, " unknown partition table\n", PAGE_SIZE);
else if (warn_no_part)
- printk(" unable to read partition table\n");
+ strlcat(state->pp_buf, " unable to read partition table\n", PAGE_SIZE);
+
+ printk(KERN_INFO "%s", state->pp_buf);
+
+ free_page((unsigned long)state->pp_buf);
kfree(state);
return ERR_PTR(res);
}
@@ -459,7 +473,6 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
}
/* everything is up and running, commence */
- INIT_RCU_HEAD(&p->rcu_head);
rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk supresses it */
diff --git a/fs/partitions/check.h b/fs/partitions/check.h
index 52f8bd39939..8e4e103ba21 100644
--- a/fs/partitions/check.h
+++ b/fs/partitions/check.h
@@ -16,6 +16,7 @@ struct parsed_partitions {
int next;
int limit;
bool access_beyond_eod;
+ char *pp_buf;
};
static inline void *read_part_sector(struct parsed_partitions *state,
@@ -32,9 +33,12 @@ static inline void
put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
{
if (n < p->limit) {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 1];
+
p->parts[n].from = from;
p->parts[n].size = size;
- printk(" %s%d", p->name, n);
+ snprintf(tmp, sizeof(tmp), " %s%d", p->name, n);
+ strlcat(p->pp_buf, tmp, PAGE_SIZE);
}
}
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
index 9efb2cfe241..dbb44d4bb8a 100644
--- a/fs/partitions/efi.c
+++ b/fs/partitions/efi.c
@@ -630,6 +630,6 @@ int efi_partition(struct parsed_partitions *state)
}
kfree(ptes);
kfree(gpt);
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 3e73de5967f..d513a07f44b 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -74,6 +74,8 @@ int ibm_partition(struct parsed_partitions *state)
} *label;
unsigned char *data;
Sector sect;
+ sector_t labelsect;
+ char tmp[64];
res = 0;
blocksize = bdev_logical_block_size(bdev);
@@ -98,10 +100,19 @@ int ibm_partition(struct parsed_partitions *state)
goto out_freeall;
/*
+ * Special case for FBA disks: label sector does not depend on
+ * blocksize.
+ */
+ if ((info->cu_type == 0x6310 && info->dev_type == 0x9336) ||
+ (info->cu_type == 0x3880 && info->dev_type == 0x3370))
+ labelsect = info->label_block;
+ else
+ labelsect = info->label_block * (blocksize >> 9);
+
+ /*
* Get volume label, extract name and type.
*/
- data = read_part_sector(state, info->label_block*(blocksize/512),
- &sect);
+ data = read_part_sector(state, labelsect, &sect);
if (data == NULL)
goto out_readerr;
@@ -134,13 +145,15 @@ int ibm_partition(struct parsed_partitions *state)
*/
blocksize = label->cms.block_size;
if (label->cms.disk_offset != 0) {
- printk("CMS1/%8s(MDSK):", name);
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s(MDSK):", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
/* disk is reserved minidisk */
offset = label->cms.disk_offset;
size = (label->cms.block_count - 1)
* (blocksize >> 9);
} else {
- printk("CMS1/%8s:", name);
+ snprintf(tmp, sizeof(tmp), "CMS1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
offset = (info->label_block + 1);
size = label->cms.block_count
* (blocksize >> 9);
@@ -149,7 +162,8 @@ int ibm_partition(struct parsed_partitions *state)
size-offset*(blocksize >> 9));
} else {
if (strncmp(type, "LNX1", 4) == 0) {
- printk("LNX1/%8s:", name);
+ snprintf(tmp, sizeof(tmp), "LNX1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
if (label->lnx.ldl_version == 0xf2) {
fmt_size = label->lnx.formatted_blocks
* (blocksize >> 9);
@@ -168,7 +182,7 @@ int ibm_partition(struct parsed_partitions *state)
offset = (info->label_block + 1);
} else {
/* unlabeled disk */
- printk("(nonl)");
+ strlcat(state->pp_buf, "(nonl)", PAGE_SIZE);
size = i_size >> 9;
offset = (info->label_block + 1);
}
@@ -187,7 +201,8 @@ int ibm_partition(struct parsed_partitions *state)
* if not, something is wrong, skipping partition detection
*/
if (strncmp(type, "VOL1", 4) == 0) {
- printk("VOL1/%8s:", name);
+ snprintf(tmp, sizeof(tmp), "VOL1/%8s:", name);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
/*
* get block number and read then go through format1
* labels
@@ -243,7 +258,7 @@ int ibm_partition(struct parsed_partitions *state)
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
goto out_freeall;
diff --git a/fs/partitions/karma.c b/fs/partitions/karma.c
index 1cc928bb762..0ea19312706 100644
--- a/fs/partitions/karma.c
+++ b/fs/partitions/karma.c
@@ -50,7 +50,7 @@ int karma_partition(struct parsed_partitions *state)
}
slot++;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 648c9d8f335..5bf8a04b5d9 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -643,7 +643,7 @@ static bool ldm_create_data_partitions (struct parsed_partitions *pp,
return false;
}
- printk (" [LDM]");
+ strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE);
/* Create the data partitions */
list_for_each (item, &ldb->v_part) {
@@ -658,7 +658,7 @@ static bool ldm_create_data_partitions (struct parsed_partitions *pp,
part_num++;
}
- printk ("\n");
+ strlcat(pp->pp_buf, "\n", PAGE_SIZE);
return true;
}
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index 74465ff7c26..68d6a216ee7 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -59,7 +59,7 @@ int mac_partition(struct parsed_partitions *state)
put_dev_sector(sect);
return 0; /* not a MacOS disk */
}
- printk(" [mac]");
+ strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
blocks_in_map = be32_to_cpu(part->map_count);
for (blk = 1; blk <= blocks_in_map; ++blk) {
int pos = blk * secsize;
@@ -128,6 +128,6 @@ int mac_partition(struct parsed_partitions *state)
#endif
put_dev_sector(sect);
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
}
diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c
index 15bfb7b1e04..5f79a6677c6 100644
--- a/fs/partitions/msdos.c
+++ b/fs/partitions/msdos.c
@@ -213,10 +213,18 @@ static void parse_solaris_x86(struct parsed_partitions *state,
put_dev_sector(sect);
return;
}
- printk(" %s%d: <solaris:", state->name, origin);
+ {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 11 + 1];
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <solaris:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
if (le32_to_cpu(v->v_version) != 1) {
- printk(" cannot handle version %d vtoc>\n",
- le32_to_cpu(v->v_version));
+ char tmp[64];
+
+ snprintf(tmp, sizeof(tmp), " cannot handle version %d vtoc>\n",
+ le32_to_cpu(v->v_version));
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
put_dev_sector(sect);
return;
}
@@ -224,9 +232,12 @@ static void parse_solaris_x86(struct parsed_partitions *state,
max_nparts = le16_to_cpu (v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8;
for (i=0; i<max_nparts && state->next<state->limit; i++) {
struct solaris_x86_slice *s = &v->v_slice[i];
+ char tmp[3 + 10 + 1 + 1];
+
if (s->s_size == 0)
continue;
- printk(" [s%d]", i);
+ snprintf(tmp, sizeof(tmp), " [s%d]", i);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
/* solaris partitions are relative to current MS-DOS
* one; must add the offset of the current partition */
put_partition(state, state->next++,
@@ -234,7 +245,7 @@ static void parse_solaris_x86(struct parsed_partitions *state,
le32_to_cpu(s->s_size));
}
put_dev_sector(sect);
- printk(" >\n");
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
#endif
}
@@ -250,6 +261,7 @@ static void parse_bsd(struct parsed_partitions *state,
Sector sect;
struct bsd_disklabel *l;
struct bsd_partition *p;
+ char tmp[64];
l = read_part_sector(state, offset + 1, &sect);
if (!l)
@@ -258,7 +270,9 @@ static void parse_bsd(struct parsed_partitions *state,
put_dev_sector(sect);
return;
}
- printk(" %s%d: <%s:", state->name, origin, flavour);
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <%s:", state->name, origin, flavour);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
if (le16_to_cpu(l->d_npartitions) < max_partitions)
max_partitions = le16_to_cpu(l->d_npartitions);
@@ -275,16 +289,18 @@ static void parse_bsd(struct parsed_partitions *state,
/* full parent partition, we have it already */
continue;
if (offset > bsd_start || offset+size < bsd_start+bsd_size) {
- printk("bad subpartition - ignored\n");
+ strlcat(state->pp_buf, "bad subpartition - ignored\n", PAGE_SIZE);
continue;
}
put_partition(state, state->next++, bsd_start, bsd_size);
}
put_dev_sector(sect);
- if (le16_to_cpu(l->d_npartitions) > max_partitions)
- printk(" (ignored %d more)",
- le16_to_cpu(l->d_npartitions) - max_partitions);
- printk(" >\n");
+ if (le16_to_cpu(l->d_npartitions) > max_partitions) {
+ snprintf(tmp, sizeof(tmp), " (ignored %d more)",
+ le16_to_cpu(l->d_npartitions) - max_partitions);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
}
#endif
@@ -333,7 +349,12 @@ static void parse_unixware(struct parsed_partitions *state,
put_dev_sector(sect);
return;
}
- printk(" %s%d: <unixware:", state->name, origin);
+ {
+ char tmp[1 + BDEVNAME_SIZE + 10 + 12 + 1];
+
+ snprintf(tmp, sizeof(tmp), " %s%d: <unixware:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
+ }
p = &l->vtoc.v_slice[1];
/* I omit the 0th slice as it is the same as whole disk. */
while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) {
@@ -347,7 +368,7 @@ static void parse_unixware(struct parsed_partitions *state,
p++;
}
put_dev_sector(sect);
- printk(" >\n");
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
#endif
}
@@ -376,8 +397,10 @@ static void parse_minix(struct parsed_partitions *state,
* the normal boot sector. */
if (msdos_magic_present (data + 510) &&
SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
+ char tmp[1 + BDEVNAME_SIZE + 10 + 9 + 1];
- printk(" %s%d: <minix:", state->name, origin);
+ snprintf(tmp, sizeof(tmp), " %s%d: <minix:", state->name, origin);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) {
if (state->next == state->limit)
break;
@@ -386,7 +409,7 @@ static void parse_minix(struct parsed_partitions *state,
put_partition(state, state->next++,
start_sect(p), nr_sects(p));
}
- printk(" >\n");
+ strlcat(state->pp_buf, " >\n", PAGE_SIZE);
}
put_dev_sector(sect);
#endif /* CONFIG_MINIX_SUBPARTITION */
@@ -425,7 +448,7 @@ int msdos_partition(struct parsed_partitions *state)
if (aix_magic_present(state, data)) {
put_dev_sector(sect);
- printk( " [AIX]");
+ strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}
@@ -446,7 +469,7 @@ int msdos_partition(struct parsed_partitions *state)
fb = (struct fat_boot_sector *) data;
if (slot == 1 && fb->reserved && fb->fats
&& fat_valid_media(fb->media)) {
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
} else {
@@ -491,21 +514,21 @@ int msdos_partition(struct parsed_partitions *state)
n = min(size, max(sector_size, n));
put_partition(state, slot, start, n);
- printk(" <");
+ strlcat(state->pp_buf, " <", PAGE_SIZE);
parse_extended(state, start, size);
- printk(" >");
+ strlcat(state->pp_buf, " >", PAGE_SIZE);
continue;
}
put_partition(state, slot, start, size);
if (SYS_IND(p) == LINUX_RAID_PARTITION)
state->parts[slot].flags = ADDPART_FLAG_RAID;
if (SYS_IND(p) == DM6_PARTITION)
- printk("[DM]");
+ strlcat(state->pp_buf, "[DM]", PAGE_SIZE);
if (SYS_IND(p) == EZD_PARTITION)
- printk("[EZD]");
+ strlcat(state->pp_buf, "[EZD]", PAGE_SIZE);
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
/* second pass - output for each on a separate line */
p = (struct partition *) (0x1be + data);
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
index fc22b85d436..48cec7cbca1 100644
--- a/fs/partitions/osf.c
+++ b/fs/partitions/osf.c
@@ -72,7 +72,7 @@ int osf_partition(struct parsed_partitions *state)
le32_to_cpu(partition->p_size));
slot++;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
diff --git a/fs/partitions/sgi.c b/fs/partitions/sgi.c
index 43b1df9aa16..ea8a86dceaf 100644
--- a/fs/partitions/sgi.c
+++ b/fs/partitions/sgi.c
@@ -76,7 +76,7 @@ int sgi_partition(struct parsed_partitions *state)
}
slot++;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
diff --git a/fs/partitions/sun.c b/fs/partitions/sun.c
index a32660e25f7..b5b6fcfb3d3 100644
--- a/fs/partitions/sun.c
+++ b/fs/partitions/sun.c
@@ -116,7 +116,7 @@ int sun_partition(struct parsed_partitions *state)
}
slot++;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
diff --git a/fs/partitions/sysv68.c b/fs/partitions/sysv68.c
index 9030c864428..9627ccffc1c 100644
--- a/fs/partitions/sysv68.c
+++ b/fs/partitions/sysv68.c
@@ -54,6 +54,7 @@ int sysv68_partition(struct parsed_partitions *state)
unsigned char *data;
struct dkblk0 *b;
struct slice *slice;
+ char tmp[64];
data = read_part_sector(state, 0, &sect);
if (!data)
@@ -73,7 +74,8 @@ int sysv68_partition(struct parsed_partitions *state)
return -1;
slices -= 1; /* last slice is the whole disk */
- printk("sysV68: %s(s%u)", state->name, slices);
+ snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
slice = (struct slice *)data;
for (i = 0; i < slices; i++, slice++) {
if (slot == state->limit)
@@ -82,11 +84,12 @@ int sysv68_partition(struct parsed_partitions *state)
put_partition(state, slot,
be32_to_cpu(slice->blkoff),
be32_to_cpu(slice->nblocks));
- printk("(s%u)", i);
+ snprintf(tmp, sizeof(tmp), "(s%u)", i);
+ strlcat(state->pp_buf, tmp, PAGE_SIZE);
}
slot++;
}
- printk("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
diff --git a/fs/partitions/ultrix.c b/fs/partitions/ultrix.c
index db9eef26036..8dbaf9f77a9 100644
--- a/fs/partitions/ultrix.c
+++ b/fs/partitions/ultrix.c
@@ -39,7 +39,7 @@ int ultrix_partition(struct parsed_partitions *state)
label->pt_part[i].pi_blkoff,
label->pt_part[i].pi_nblocks);
put_dev_sector(sect);
- printk ("\n");
+ strlcat(state->pp_buf, "\n", PAGE_SIZE);
return 1;
} else {
put_dev_sector(sect);
diff --git a/fs/pipe.c b/fs/pipe.c
index db6eaaba0dd..279eef96c51 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -26,9 +26,14 @@
/*
* The max size that a non-root user is allowed to grow the pipe. Can
- * be set by root in /proc/sys/fs/pipe-max-pages
+ * be set by root in /proc/sys/fs/pipe-max-size
*/
-unsigned int pipe_max_pages = PIPE_DEF_BUFFERS * 16;
+unsigned int pipe_max_size = 1048576;
+
+/*
+ * Minimum pipe size, as required by POSIX
+ */
+unsigned int pipe_min_size = PAGE_SIZE;
/*
* We use a start+len construction, which provides full use of the
@@ -1118,26 +1123,20 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes)
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
{
struct pipe_buffer *bufs;
/*
- * Must be a power-of-2 currently
- */
- if (!is_power_of_2(arg))
- return -EINVAL;
-
- /*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
- if (arg < pipe->nrbufs)
+ if (nr_pages < pipe->nrbufs)
return -EBUSY;
- bufs = kcalloc(arg, sizeof(struct pipe_buffer), GFP_KERNEL);
+ bufs = kcalloc(nr_pages, sizeof(struct pipe_buffer), GFP_KERNEL);
if (unlikely(!bufs))
return -ENOMEM;
@@ -1146,20 +1145,56 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
* and adjust the indexes.
*/
if (pipe->nrbufs) {
- const unsigned int tail = pipe->nrbufs & (pipe->buffers - 1);
- const unsigned int head = pipe->nrbufs - tail;
+ unsigned int tail;
+ unsigned int head;
+
+ tail = pipe->curbuf + pipe->nrbufs;
+ if (tail < pipe->buffers)
+ tail = 0;
+ else
+ tail &= (pipe->buffers - 1);
+ head = pipe->nrbufs - tail;
if (head)
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
if (tail)
- memcpy(bufs + head, pipe->bufs + pipe->curbuf, tail * sizeof(struct pipe_buffer));
+ memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
- pipe->buffers = arg;
- return arg;
+ pipe->buffers = nr_pages;
+ return nr_pages * PAGE_SIZE;
+}
+
+/*
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+static inline unsigned int round_pipe_size(unsigned int size)
+{
+ unsigned long nr_pages;
+
+ nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+}
+
+/*
+ * This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
+ * will return an error.
+ */
+int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
+ size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
+ if (ret < 0 || !write)
+ return ret;
+
+ pipe_max_size = round_pipe_size(pipe_max_size);
+ return ret;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -1174,23 +1209,25 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&pipe->inode->i_mutex);
switch (cmd) {
- case F_SETPIPE_SZ:
- if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) {
- ret = -EINVAL;
+ case F_SETPIPE_SZ: {
+ unsigned int size, nr_pages;
+
+ size = round_pipe_size(arg);
+ nr_pages = size >> PAGE_SHIFT;
+
+ ret = -EINVAL;
+ if (!nr_pages)
goto out;
- }
- /*
- * The pipe needs to be at least 2 pages large to
- * guarantee POSIX behaviour.
- */
- if (arg < 2) {
- ret = -EINVAL;
+
+ if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
+ ret = -EPERM;
goto out;
}
- ret = pipe_set_size(pipe, arg);
+ ret = pipe_set_size(pipe, nr_pages);
break;
+ }
case F_GETPIPE_SZ:
- ret = pipe->buffers;
+ ret = pipe->buffers * PAGE_SIZE;
break;
default:
ret = -EINVAL;
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 11a7b5c6815..2758e2afc51 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux proc filesystem routines.
#
-obj-$(CONFIG_PROC_FS) += proc.o
+obj-y += proc.o
proc-y := nommu.o task_nommu.o
proc-$(CONFIG_MMU) := mmu.o task_mmu.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9b58d38bc91..fff6572676a 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -176,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
- cred = get_cred((struct cred *) __task_cred(p));
+ cred = get_task_cred(p);
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
diff --git a/fs/proc/base.c b/fs/proc/base.c
index acb7ef80ea4..a1c43e7c8a7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -63,6 +63,7 @@
#include <linux/namei.h>
#include <linux/mnt_namespace.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/rcupdate.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
@@ -148,18 +149,13 @@ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
return count;
}
-static int get_fs_path(struct task_struct *task, struct path *path, bool root)
+static int get_task_root(struct task_struct *task, struct path *root)
{
- struct fs_struct *fs;
int result = -ENOENT;
task_lock(task);
- fs = task->fs;
- if (fs) {
- read_lock(&fs->lock);
- *path = root ? fs->root : fs->pwd;
- path_get(path);
- read_unlock(&fs->lock);
+ if (task->fs) {
+ get_fs_root(task->fs, root);
result = 0;
}
task_unlock(task);
@@ -172,7 +168,12 @@ static int proc_cwd_link(struct inode *inode, struct path *path)
int result = -ENOENT;
if (task) {
- result = get_fs_path(task, path, 0);
+ task_lock(task);
+ if (task->fs) {
+ get_fs_pwd(task->fs, path);
+ result = 0;
+ }
+ task_unlock(task);
put_task_struct(task);
}
return result;
@@ -184,7 +185,7 @@ static int proc_root_link(struct inode *inode, struct path *path)
int result = -ENOENT;
if (task) {
- result = get_fs_path(task, path, 1);
+ result = get_task_root(task, path);
put_task_struct(task);
}
return result;
@@ -427,17 +428,14 @@ static const struct file_operations proc_lstats_operations = {
#endif
-/* The badness from the OOM killer */
-unsigned long badness(struct task_struct *p, unsigned long uptime);
static int proc_oom_score(struct task_struct *task, char *buffer)
{
unsigned long points = 0;
- struct timespec uptime;
- do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
if (pid_alive(task))
- points = badness(task, uptime.tv_sec);
+ points = oom_badness(task, NULL, NULL,
+ totalram_pages + total_swap_pages);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
@@ -561,9 +559,19 @@ static int proc_setattr(struct dentry *dentry, struct iattr *attr)
return -EPERM;
error = inode_change_ok(inode, attr);
- if (!error)
- error = inode_setattr(inode, attr);
- return error;
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static const struct inode_operations proc_def_inode_operations = {
@@ -589,7 +597,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
get_mnt_ns(ns);
}
rcu_read_unlock();
- if (ns && get_fs_path(task, &root, 1) == 0)
+ if (ns && get_task_root(task, &root) == 0)
ret = 0;
put_task_struct(task);
}
@@ -1039,8 +1047,24 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
return -EACCES;
}
+ /*
+ * Warn that /proc/pid/oom_adj is deprecated, see
+ * Documentation/feature-removal-schedule.txt.
+ */
+ printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, "
+ "please use /proc/%d/oom_score_adj instead.\n",
+ current->comm, task_pid_nr(current),
+ task_pid_nr(task), task_pid_nr(task));
task->signal->oom_adj = oom_adjust;
-
+ /*
+ * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
+ * value is always attainable.
+ */
+ if (task->signal->oom_adj == OOM_ADJUST_MAX)
+ task->signal->oom_score_adj = OOM_SCORE_ADJ_MAX;
+ else
+ task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) /
+ -OOM_DISABLE;
unlock_task_sighand(task, &flags);
put_task_struct(task);
@@ -1053,6 +1077,82 @@ static const struct file_operations proc_oom_adjust_operations = {
.llseek = generic_file_llseek,
};
+static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ char buffer[PROC_NUMBUF];
+ int oom_score_adj = OOM_SCORE_ADJ_MIN;
+ unsigned long flags;
+ size_t len;
+
+ if (!task)
+ return -ESRCH;
+ if (lock_task_sighand(task, &flags)) {
+ oom_score_adj = task->signal->oom_score_adj;
+ unlock_task_sighand(task, &flags);
+ }
+ put_task_struct(task);
+ len = snprintf(buffer, sizeof(buffer), "%d\n", oom_score_adj);
+ return simple_read_from_buffer(buf, count, ppos, buffer, len);
+}
+
+static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task;
+ char buffer[PROC_NUMBUF];
+ unsigned long flags;
+ long oom_score_adj;
+ int err;
+
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+
+ err = strict_strtol(strstrip(buffer), 0, &oom_score_adj);
+ if (err)
+ return -EINVAL;
+ if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
+ oom_score_adj > OOM_SCORE_ADJ_MAX)
+ return -EINVAL;
+
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+ if (!lock_task_sighand(task, &flags)) {
+ put_task_struct(task);
+ return -ESRCH;
+ }
+ if (oom_score_adj < task->signal->oom_score_adj &&
+ !capable(CAP_SYS_RESOURCE)) {
+ unlock_task_sighand(task, &flags);
+ put_task_struct(task);
+ return -EACCES;
+ }
+
+ task->signal->oom_score_adj = oom_score_adj;
+ /*
+ * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is
+ * always attainable.
+ */
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
+ task->signal->oom_adj = OOM_DISABLE;
+ else
+ task->signal->oom_adj = (oom_score_adj * OOM_ADJUST_MAX) /
+ OOM_SCORE_ADJ_MAX;
+ unlock_task_sighand(task, &flags);
+ put_task_struct(task);
+ return count;
+}
+
+static const struct file_operations proc_oom_score_adj_operations = {
+ .read = oom_score_adj_read,
+ .write = oom_score_adj_write,
+};
+
#ifdef CONFIG_AUDITSYSCALL
#define TMPBUFLEN 21
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
@@ -1426,7 +1526,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
if (!tmp)
return -ENOMEM;
- pathname = d_path(path, tmp, PAGE_SIZE);
+ pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE);
len = PTR_ERR(pathname);
if (IS_ERR(pathname))
goto out;
@@ -2625,6 +2725,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
@@ -2959,6 +3060,7 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUSR, proc_sessionid_operations),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 2791907744e..dd29f033766 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -12,6 +12,7 @@
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mount.h>
@@ -258,17 +259,22 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
error = inode_change_ok(inode, iattr);
if (error)
- goto out;
+ return error;
- error = inode_setattr(inode, iattr);
- if (error)
- goto out;
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ iattr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, iattr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, iattr);
+ mark_inode_dirty(inode);
de->uid = inode->i_uid;
de->gid = inode->i_gid;
de->mode = inode->i_mode;
-out:
- return error;
+ return 0;
}
static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index aea8502e58a..9c2b5f48487 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -25,11 +25,12 @@
#include "internal.h"
-static void proc_delete_inode(struct inode *inode)
+static void proc_evict_inode(struct inode *inode)
{
struct proc_dir_entry *de;
truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
/* Stop tracking associated processes */
put_pid(PROC_I(inode)->pid);
@@ -40,7 +41,6 @@ static void proc_delete_inode(struct inode *inode)
pde_put(de);
if (PROC_I(inode)->sysctl)
sysctl_head_put(PROC_I(inode)->sysctl);
- clear_inode(inode);
}
struct vfsmount *proc_mnt;
@@ -91,7 +91,7 @@ static const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
.drop_inode = generic_delete_inode,
- .delete_inode = proc_delete_inode,
+ .evict_inode = proc_evict_inode,
.statfs = simple_statfs,
};
@@ -214,8 +214,7 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
{
struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
long rv = -ENOTTY;
- long (*unlocked_ioctl)(struct file *, unsigned int, unsigned long);
- int (*ioctl)(struct inode *, struct file *, unsigned int, unsigned long);
+ long (*ioctl)(struct file *, unsigned int, unsigned long);
spin_lock(&pde->pde_unload_lock);
if (!pde->proc_fops) {
@@ -223,19 +222,11 @@ static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigne
return rv;
}
pde->pde_users++;
- unlocked_ioctl = pde->proc_fops->unlocked_ioctl;
- ioctl = pde->proc_fops->ioctl;
+ ioctl = pde->proc_fops->unlocked_ioctl;
spin_unlock(&pde->pde_unload_lock);
- if (unlocked_ioctl) {
- rv = unlocked_ioctl(file, cmd, arg);
- if (rv == -ENOIOCTLCMD)
- rv = -EINVAL;
- } else if (ioctl) {
- WARN_ONCE(1, "Procfs ioctl handlers must use unlocked_ioctl, "
- "%pf will be called without the Bkl held\n", ioctl);
- rv = ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
- }
+ if (ioctl)
+ rv = ioctl(file, cmd, arg);
pde_users_dec(pde);
return rv;
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index ce94801f48c..d9396a4fc7f 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -209,6 +209,9 @@ void proc_device_tree_add_node(struct device_node *np,
for (pp = np->properties; pp != NULL; pp = pp->next) {
p = pp->name;
+ if (strchr(p, '/'))
+ continue;
+
if (duplicate_name(de, p))
p = fixup_name(np, de, p);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 6ff9981f0a1..5be436ea088 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -329,10 +329,19 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
return -EPERM;
error = inode_change_ok(inode, attr);
- if (!error)
- error = inode_setattr(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
- return error;
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index aea1d3f1ffb..439fc1f1c1c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -210,6 +210,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
int flags = vma->vm_flags;
unsigned long ino = 0;
unsigned long long pgoff = 0;
+ unsigned long start;
dev_t dev = 0;
int len;
@@ -220,8 +221,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
}
+ /* We don't show the stack guard page in /proc/maps */
+ start = vma->vm_start;
+ if (vma->vm_flags & VM_GROWSDOWN)
+ start += PAGE_SIZE;
+
seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
- vma->vm_start,
+ start,
vma->vm_end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 46d4b5d72bd..cb6306e6384 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -122,11 +122,20 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
return size;
}
+static void pad_len_spaces(struct seq_file *m, int len)
+{
+ len = 25 + sizeof(void*) * 6 - len;
+ if (len < 1)
+ len = 1;
+ seq_printf(m, "%*c", len, ' ');
+}
+
/*
* display a single VMA to a sequenced file
*/
static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
{
+ struct mm_struct *mm = vma->vm_mm;
unsigned long ino = 0;
struct file *file;
dev_t dev = 0;
@@ -155,11 +164,14 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
MAJOR(dev), MINOR(dev), ino, &len);
if (file) {
- len = 25 + sizeof(void *) * 6 - len;
- if (len < 1)
- len = 1;
- seq_printf(m, "%*c", len, ' ');
+ pad_len_spaces(m, len);
seq_path(m, &file->f_path, "");
+ } else if (mm) {
+ if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+ pad_len_spaces(m, len);
+ seq_puts(m, "[stack]");
+ }
}
seq_putc(m, '\n');
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 277575ddc05..16829722be9 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -320,10 +320,19 @@ static int qnx4_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata)
{
struct qnx4_inode_info *qnx4_inode = qnx4_i(mapping->host);
+ int ret;
+
*pagep = NULL;
- return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+ ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
qnx4_get_block,
&qnx4_inode->mmu_private);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
{
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 12c233da1b6..aad1316a977 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -132,6 +132,22 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
EXPORT_SYMBOL(dq_data_lock);
+void __quota_error(struct super_block *sb, const char *func,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ if (printk_ratelimit()) {
+ va_start(args, fmt);
+ printk(KERN_ERR "Quota error (device %s): %s: ",
+ sb->s_id, func);
+ vprintk(fmt, args);
+ printk("\n");
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL(__quota_error);
+
#if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
static char *quotatypes[] = INITQFNAMES;
#endif
@@ -676,7 +692,7 @@ static void prune_dqcache(int count)
* This is called from kswapd when we think we need some
* more memory
*/
-static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
+static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
if (nr) {
spin_lock(&dq_list_lock);
@@ -705,11 +721,8 @@ void dqput(struct dquot *dquot)
return;
#ifdef CONFIG_QUOTA_DEBUG
if (!atomic_read(&dquot->dq_count)) {
- printk("VFS: dqput: trying to free free dquot\n");
- printk("VFS: device %s, dquot of %s %d\n",
- dquot->dq_sb->s_id,
- quotatypes[dquot->dq_type],
- dquot->dq_id);
+ quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
+ quotatypes[dquot->dq_type], dquot->dq_id);
BUG();
}
#endif
@@ -732,9 +745,9 @@ we_slept:
/* Commit dquot before releasing */
ret = dquot->dq_sb->dq_op->write_dquot(dquot);
if (ret < 0) {
- printk(KERN_ERR "VFS: cannot write quota structure on "
- "device %s (error %d). Quota may get out of "
- "sync!\n", dquot->dq_sb->s_id, ret);
+ quota_error(dquot->dq_sb, "Can't write quota structure"
+ " (error %d). Quota may get out of sync!",
+ ret);
/*
* We clear dirty bit anyway, so that we avoid
* infinite loop here
@@ -885,7 +898,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
continue;
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
@@ -914,9 +927,9 @@ static void add_dquot_ref(struct super_block *sb, int type)
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
- printk(KERN_WARNING "VFS (%s): Writes happened before quota"
- " was turned on thus quota information is probably "
- "inconsistent. Please run quotacheck(8).\n", sb->s_id);
+ quota_error(sb, "Writes happened before quota was turned on "
+ "thus quota information is probably inconsistent. "
+ "Please run quotacheck(8)");
}
#endif
}
@@ -947,7 +960,9 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
if (dqput_blocks(dquot)) {
#ifdef CONFIG_QUOTA_DEBUG
if (atomic_read(&dquot->dq_count) != 1)
- printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
+ quota_error(inode->i_sb, "Adding dquot with "
+ "dq_count %d to dispose list",
+ atomic_read(&dquot->dq_count));
#endif
spin_lock(&dq_list_lock);
/* As dquot must have currently users it can't be on
@@ -986,6 +1001,7 @@ static void remove_dquot_ref(struct super_block *sb, int type,
struct list_head *tofree_head)
{
struct inode *inode;
+ int reserved = 0;
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
@@ -995,10 +1011,20 @@ static void remove_dquot_ref(struct super_block *sb, int type,
* only quota pointers and these have separate locking
* (dqptr_sem).
*/
- if (!IS_NOQUOTA(inode))
+ if (!IS_NOQUOTA(inode)) {
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
remove_inode_dquot_ref(inode, type, tofree_head);
+ }
}
spin_unlock(&inode_lock);
+#ifdef CONFIG_QUOTA_DEBUG
+ if (reserved) {
+ printk(KERN_WARNING "VFS (%s): Writes happened after quota"
+ " was disabled thus quota information is probably "
+ "inconsistent. Please run quotacheck(8).\n", sb->s_id);
+ }
+#endif
}
/* Gather all references from inodes and drop them */
@@ -1304,6 +1330,15 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
return QUOTA_NL_NOWARN;
}
+static int dquot_active(const struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (IS_NOQUOTA(inode))
+ return 0;
+ return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
+}
+
/*
* Initialize quota pointers in inode
*
@@ -1323,7 +1358,7 @@ static void __dquot_initialize(struct inode *inode, int type)
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
+ if (!dquot_active(inode))
return;
/* First get references to structures we might need. */
@@ -1507,7 +1542,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex
*/
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
+ if (!dquot_active(inode)) {
inode_incr_space(inode, number, reserve);
goto out;
}
@@ -1559,7 +1594,7 @@ int dquot_alloc_inode(const struct inode *inode)
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
+ if (!dquot_active(inode))
return 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@@ -1596,7 +1631,7 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
int cnt;
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
+ if (!dquot_active(inode)) {
inode_claim_rsv_space(inode, number);
return 0;
}
@@ -1629,7 +1664,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) {
+ if (!dquot_active(inode)) {
inode_decr_space(inode, number, reserve);
return;
}
@@ -1667,7 +1702,7 @@ void dquot_free_inode(const struct inode *inode)
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
- if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode))
+ if (!dquot_active(inode))
return;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
@@ -1790,7 +1825,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
struct super_block *sb = inode->i_sb;
int ret;
- if (!sb_any_quota_active(sb) || IS_NOQUOTA(inode))
+ if (!dquot_active(inode))
return 0;
if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
@@ -1957,7 +1992,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
truncate_inode_pages(&toputinode[cnt]->i_data,
0);
mutex_unlock(&toputinode[cnt]->i_mutex);
- mark_inode_dirty(toputinode[cnt]);
+ mark_inode_dirty_sync(toputinode[cnt]);
}
mutex_unlock(&dqopt->dqonoff_mutex);
}
@@ -2270,7 +2305,7 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
memset(di, 0, sizeof(*di));
di->d_version = FS_DQUOT_VERSION;
di->d_flags = dquot->dq_type == USRQUOTA ?
- XFS_USER_QUOTA : XFS_GROUP_QUOTA;
+ FS_USER_QUOTA : FS_GROUP_QUOTA;
di->d_id = dquot->dq_id;
spin_lock(&dq_data_lock);
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 24f03407eeb..9e48874eabc 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -65,8 +65,7 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
info->dqi_usable_bs, blk << info->dqi_blocksize_bits);
if (ret != info->dqi_usable_bs) {
- q_warn(KERN_WARNING "VFS: dquota write failed on "
- "dev %s\n", sb->s_id);
+ quota_error(sb, "dquota write failed");
if (ret >= 0)
ret = -EIO;
}
@@ -160,9 +159,8 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
/* No matter whether write succeeds block is out of list */
if (write_blk(info, blk, buf) < 0)
- q_warn(KERN_ERR
- "VFS: Can't write block (%u) with free entries.\n",
- blk);
+ quota_error(info->dqi_sb, "Can't write block (%u) "
+ "with free entries", blk);
return 0;
out_buf:
kfree(tmpbuf);
@@ -252,9 +250,8 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
*err = remove_free_dqentry(info, buf, blk);
if (*err < 0) {
- q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't "
- "remove block (%u) from entry free list.\n",
- blk);
+ quota_error(dquot->dq_sb, "Can't remove block (%u) "
+ "from entry free list", blk);
goto out_buf;
}
}
@@ -268,16 +265,15 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
}
#ifdef __QUOTA_QT_PARANOIA
if (i == qtree_dqstr_in_blk(info)) {
- printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
- "but it shouldn't.\n");
+ quota_error(dquot->dq_sb, "Data block full but it shouldn't");
*err = -EIO;
goto out_buf;
}
#endif
*err = write_blk(info, blk, buf);
if (*err < 0) {
- q_warn(KERN_ERR "VFS: find_free_dqentry(): Can't write quota "
- "data block %u.\n", blk);
+ quota_error(dquot->dq_sb, "Can't write quota data block %u",
+ blk);
goto out_buf;
}
dquot->dq_off = (blk << info->dqi_blocksize_bits) +
@@ -311,8 +307,8 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} else {
ret = read_blk(info, *treeblk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't read tree quota block "
- "%u.\n", *treeblk);
+ quota_error(dquot->dq_sb, "Can't read tree quota "
+ "block %u", *treeblk);
goto out_buf;
}
}
@@ -323,9 +319,9 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (depth == info->dqi_qtree_depth - 1) {
#ifdef __QUOTA_QT_PARANOIA
if (newblk) {
- printk(KERN_ERR "VFS: Inserting already present quota "
- "entry (block %u).\n",
- le32_to_cpu(ref[get_index(info,
+ quota_error(dquot->dq_sb, "Inserting already present "
+ "quota entry (block %u)",
+ le32_to_cpu(ref[get_index(info,
dquot->dq_id, depth)]));
ret = -EIO;
goto out_buf;
@@ -373,8 +369,8 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (!dquot->dq_off) {
ret = dq_insert_tree(info, dquot);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Error %zd occurred while "
- "creating quota.\n", ret);
+ quota_error(sb, "Error %zd occurred while creating "
+ "quota", ret);
kfree(ddquot);
return ret;
}
@@ -385,8 +381,7 @@ int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
dquot->dq_off);
if (ret != info->dqi_entry_size) {
- q_warn(KERN_WARNING "VFS: dquota write failed on dev %s\n",
- sb->s_id);
+ quota_error(sb, "dquota write failed");
if (ret >= 0)
ret = -ENOSPC;
} else {
@@ -410,14 +405,15 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (!buf)
return -ENOMEM;
if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
- q_warn(KERN_ERR "VFS: Quota structure has offset to other "
- "block (%u) than it should (%u).\n", blk,
- (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
+ quota_error(dquot->dq_sb, "Quota structure has offset to "
+ "other block (%u) than it should (%u)", blk,
+ (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
goto out_buf;
}
ret = read_blk(info, blk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", blk);
+ quota_error(dquot->dq_sb, "Can't read quota data block %u",
+ blk);
goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
@@ -427,8 +423,8 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
if (ret >= 0)
ret = put_free_dqblk(info, buf, blk);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't move quota data block (%u) "
- "to free list.\n", blk);
+ quota_error(dquot->dq_sb, "Can't move quota data block "
+ "(%u) to free list", blk);
goto out_buf;
}
} else {
@@ -440,15 +436,15 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
/* Insert will write block itself */
ret = insert_free_dqentry(info, buf, blk);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't insert quota data "
- "block (%u) to free entry list.\n", blk);
+ quota_error(dquot->dq_sb, "Can't insert quota "
+ "data block (%u) to free entry list", blk);
goto out_buf;
}
} else {
ret = write_blk(info, blk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't write quota data "
- "block %u\n", blk);
+ quota_error(dquot->dq_sb, "Can't write quota "
+ "data block %u", blk);
goto out_buf;
}
}
@@ -472,7 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
return -ENOMEM;
ret = read_blk(info, *blk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't read quota data block %u\n", *blk);
+ quota_error(dquot->dq_sb, "Can't read quota data "
+ "block %u", blk);
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -496,8 +493,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
} else {
ret = write_blk(info, *blk, buf);
if (ret < 0)
- q_warn(KERN_ERR "VFS: Can't write quota tree "
- "block %u.\n", *blk);
+ quota_error(dquot->dq_sb, "Can't write quota "
+ "tree block %u", blk);
}
}
out_buf:
@@ -529,7 +526,8 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+ quota_error(dquot->dq_sb, "Can't read quota tree "
+ "block %u", blk);
goto out_buf;
}
ddquot = buf + sizeof(struct qt_disk_dqdbheader);
@@ -539,8 +537,8 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
ddquot += info->dqi_entry_size;
}
if (i == qtree_dqstr_in_blk(info)) {
- q_warn(KERN_ERR "VFS: Quota for id %u referenced "
- "but not present.\n", dquot->dq_id);
+ quota_error(dquot->dq_sb, "Quota for id %u referenced "
+ "but not present", dquot->dq_id);
ret = -EIO;
goto out_buf;
} else {
@@ -564,7 +562,8 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
return -ENOMEM;
ret = read_blk(info, blk, buf);
if (ret < 0) {
- q_warn(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
+ quota_error(dquot->dq_sb, "Can't read quota tree block %u",
+ blk);
goto out_buf;
}
ret = 0;
@@ -598,7 +597,7 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
#ifdef __QUOTA_QT_PARANOIA
/* Invalidated quota? */
if (!sb_dqopt(dquot->dq_sb)->files[type]) {
- printk(KERN_ERR "VFS: Quota invalidated while reading!\n");
+ quota_error(sb, "Quota invalidated while reading!");
return -EIO;
}
#endif
@@ -607,8 +606,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
offset = find_dqentry(info, dquot);
if (offset <= 0) { /* Entry not present? */
if (offset < 0)
- q_warn(KERN_ERR "VFS: Can't read quota "
- "structure for id %u.\n", dquot->dq_id);
+ quota_error(sb, "Can't read quota structure "
+ "for id %u", dquot->dq_id);
dquot->dq_off = 0;
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
@@ -625,8 +624,8 @@ int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
if (ret != info->dqi_entry_size) {
if (ret >= 0)
ret = -EIO;
- q_warn(KERN_ERR "VFS: Error while reading quota "
- "structure for id %u.\n", dquot->dq_id);
+ quota_error(sb, "Error while reading quota structure for id %u",
+ dquot->dq_id);
set_bit(DQ_FAKE_B, &dquot->dq_flags);
memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
kfree(ddquot);
diff --git a/fs/quota/quota_tree.h b/fs/quota/quota_tree.h
index ccc3e71fb1d..a1ab8db81a5 100644
--- a/fs/quota/quota_tree.h
+++ b/fs/quota/quota_tree.h
@@ -22,10 +22,4 @@ struct qt_disk_dqdbheader {
#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
-#define q_warn(fmt, args...) \
-do { \
- if (printk_ratelimit()) \
- printk(fmt, ## args); \
-} while(0)
-
#endif /* _LINUX_QUOTAIO_TREE_H */
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 4af344c5852..34b37a67bb1 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -95,8 +95,7 @@ static int v1_commit_dqblk(struct dquot *dquot)
(char *)&dqblk, sizeof(struct v1_disk_dqblk),
v1_dqoff(dquot->dq_id));
if (ret != sizeof(struct v1_disk_dqblk)) {
- printk(KERN_WARNING "VFS: dquota write failed on dev %s\n",
- dquot->dq_sb->s_id);
+ quota_error(dquot->dq_sb, "dquota write failed");
if (ret >= 0)
ret = -EIO;
goto out;
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 135206af145..65444d29406 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -63,9 +63,8 @@ static int v2_read_header(struct super_block *sb, int type,
size = sb->s_op->quota_read(sb, type, (char *)dqhead,
sizeof(struct v2_disk_dqheader), 0);
if (size != sizeof(struct v2_disk_dqheader)) {
- q_warn(KERN_WARNING "quota_v2: Failed header read:"
- " expected=%zd got=%zd\n",
- sizeof(struct v2_disk_dqheader), size);
+ quota_error(sb, "Failed header read: expected=%zd got=%zd",
+ sizeof(struct v2_disk_dqheader), size);
return 0;
}
return 1;
@@ -106,8 +105,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
size = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
- q_warn(KERN_WARNING "quota_v2: Can't read info structure on device %s.\n",
- sb->s_id);
+ quota_error(sb, "Can't read info structure");
return -1;
}
info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS);
@@ -167,8 +165,7 @@ static int v2_write_file_info(struct super_block *sb, int type)
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF);
if (size != sizeof(struct v2_disk_dqinfo)) {
- q_warn(KERN_WARNING "Can't write info structure on device %s.\n",
- sb->s_id);
+ quota_error(sb, "Can't write info structure");
return -1;
}
return 0;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index d532c20fc17..9eead2c796b 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -146,9 +146,8 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
return ret;
}
- ret = simple_setsize(inode, newsize);
-
- return ret;
+ truncate_setsize(inode, newsize);
+ return 0;
}
/*****************************************************************************/
@@ -183,7 +182,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
}
}
- generic_setattr(inode, ia);
+ setattr_copy(inode, ia);
out:
ia->ia_valid = old_ia_valid;
return ret;
diff --git a/fs/read_write.c b/fs/read_write.c
index 9c0485236e6..74e36586e4d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -311,7 +311,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
else
ret = do_sync_read(file, buf, count, pos);
if (ret > 0) {
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
add_rchar(current, ret);
}
inc_syscr(current);
@@ -367,7 +367,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
else
ret = do_sync_write(file, buf, count, pos);
if (ret > 0) {
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
add_wchar(current, ret);
}
inc_syscw(current);
@@ -675,9 +675,9 @@ out:
kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/readdir.c b/fs/readdir.c
index 7723401f8d8..356f71528ad 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -4,6 +4,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
+#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
@@ -54,7 +55,6 @@ EXPORT_SYMBOL(vfs_readdir);
* anyway. Thus the special "fillonedir()" function for that
* case (the low-level handlers don't need to care about this).
*/
-#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
#ifdef __ARCH_WANT_OLD_READDIR
@@ -152,7 +152,8 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
struct linux_dirent __user * dirent;
struct getdents_callback * buf = (struct getdents_callback *) __buf;
unsigned long d_ino;
- int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 2, sizeof(long));
+ int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
+ sizeof(long));
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
@@ -237,7 +238,8 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
{
struct linux_dirent64 __user *dirent;
struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf;
- int reclen = ALIGN(NAME_OFFSET(dirent) + namlen + 1, sizeof(u64));
+ int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
+ sizeof(u64));
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index b82cdd8a45d..6846371498b 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -38,20 +38,24 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
BUG_ON(!S_ISREG(inode->i_mode));
+ if (atomic_add_unless(&REISERFS_I(inode)->openers, -1, 1))
+ return 0;
+
+ mutex_lock(&(REISERFS_I(inode)->tailpack));
+
+ if (!atomic_dec_and_test(&REISERFS_I(inode)->openers)) {
+ mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ return 0;
+ }
+
/* fast out for when nothing needs to be done */
- if ((atomic_read(&inode->i_count) > 1 ||
- !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
+ if ((!(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
REISERFS_I(inode)->i_prealloc_count <= 0) {
+ mutex_unlock(&(REISERFS_I(inode)->tailpack));
return 0;
}
- mutex_lock(&inode->i_mutex);
-
- mutex_lock(&(REISERFS_I(inode)->i_mmap));
- if (REISERFS_I(inode)->i_flags & i_ever_mapped)
- REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
-
reiserfs_write_lock(inode->i_sb);
/* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
@@ -94,9 +98,10 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
if (!err)
err = jbegin_failure;
- if (!err && atomic_read(&inode->i_count) <= 1 &&
+ if (!err &&
(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
tail_has_to_be_packed(inode)) {
+
/* if regular file is released by last holder and it has been
appended (we append by unformatted node only) or its direct
item(s) had to be converted, then it may have to be
@@ -104,27 +109,28 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
err = reiserfs_truncate_file(inode, 0);
}
out:
- mutex_unlock(&(REISERFS_I(inode)->i_mmap));
- mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
+ mutex_unlock(&(REISERFS_I(inode)->tailpack));
return err;
}
-static int reiserfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int reiserfs_file_open(struct inode *inode, struct file *file)
{
- struct inode *inode;
-
- inode = file->f_path.dentry->d_inode;
- mutex_lock(&(REISERFS_I(inode)->i_mmap));
- REISERFS_I(inode)->i_flags |= i_ever_mapped;
- mutex_unlock(&(REISERFS_I(inode)->i_mmap));
-
- return generic_file_mmap(file, vma);
+ int err = dquot_file_open(inode, file);
+ if (!atomic_inc_not_zero(&REISERFS_I(inode)->openers)) {
+ /* somebody might be tailpacking on final close; wait for it */
+ mutex_lock(&(REISERFS_I(inode)->tailpack));
+ atomic_inc(&REISERFS_I(inode)->openers);
+ mutex_unlock(&(REISERFS_I(inode)->tailpack));
+ }
+ return err;
}
static void reiserfs_vfs_truncate_file(struct inode *inode)
{
+ mutex_lock(&(REISERFS_I(inode)->tailpack));
reiserfs_truncate_file(inode, 1);
+ mutex_unlock(&(REISERFS_I(inode)->tailpack));
}
/* Sync a reiserfs file. */
@@ -288,8 +294,8 @@ const struct file_operations reiserfs_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = reiserfs_compat_ioctl,
#endif
- .mmap = reiserfs_file_mmap,
- .open = dquot_file_open,
+ .mmap = generic_file_mmap,
+ .open = reiserfs_file_open,
.release = reiserfs_file_release,
.fsync = reiserfs_sync_file,
.aio_read = generic_file_aio_read,
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0f22fdaf54a..ae35413dcbe 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -25,7 +25,7 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int reiserfs_prepare_write(struct file *f, struct page *page,
unsigned from, unsigned to);
-void reiserfs_delete_inode(struct inode *inode)
+void reiserfs_evict_inode(struct inode *inode)
{
/* We need blocks for transaction + (user+group) quota update (possibly delete) */
int jbegin_count =
@@ -35,10 +35,12 @@ void reiserfs_delete_inode(struct inode *inode)
int depth;
int err;
- if (!is_bad_inode(inode))
+ if (!inode->i_nlink && !is_bad_inode(inode))
dquot_initialize(inode);
truncate_inode_pages(&inode->i_data, 0);
+ if (inode->i_nlink)
+ goto no_delete;
depth = reiserfs_write_lock_once(inode->i_sb);
@@ -77,9 +79,14 @@ void reiserfs_delete_inode(struct inode *inode)
;
}
out:
- clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
+ end_writeback(inode); /* note this must go after the journal_end to prevent deadlock */
+ dquot_drop(inode);
inode->i_blocks = 0;
reiserfs_write_unlock_once(inode->i_sb, depth);
+
+no_delete:
+ end_writeback(inode);
+ dquot_drop(inode);
}
static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
@@ -1138,7 +1145,6 @@ static void init_inode(struct inode *inode, struct treepath *path)
REISERFS_I(inode)->i_prealloc_count = 0;
REISERFS_I(inode)->i_trans_id = 0;
REISERFS_I(inode)->i_jl = NULL;
- mutex_init(&(REISERFS_I(inode)->i_mmap));
reiserfs_init_xattr_rwsem(inode);
if (stat_data_v1(ih)) {
@@ -1221,7 +1227,7 @@ static void init_inode(struct inode *inode, struct treepath *path)
inode_set_bytes(inode,
to_real_used_space(inode, inode->i_blocks,
SD_V2_SIZE));
- /* read persistent inode attributes from sd and initalise
+ /* read persistent inode attributes from sd and initialise
generic inode flags from them */
REISERFS_I(inode)->i_attrs = sd_v2_attrs(sd);
sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
@@ -1841,7 +1847,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
REISERFS_I(inode)->i_attrs =
REISERFS_I(dir)->i_attrs & REISERFS_INHERIT_MASK;
sd_attrs_to_i_attrs(REISERFS_I(inode)->i_attrs, inode);
- mutex_init(&(REISERFS_I(inode)->i_mmap));
reiserfs_init_xattr_rwsem(inode);
/* key to search for correct place for new stat data */
@@ -2587,8 +2592,7 @@ static int reiserfs_write_begin(struct file *file,
old_ref = th->t_refcount;
th->t_refcount++;
}
- ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- reiserfs_get_block);
+ ret = __block_write_begin(page, pos, len, reiserfs_get_block);
if (ret && reiserfs_transaction_running(inode->i_sb)) {
struct reiserfs_transaction_handle *th = current->journal_info;
/* this gets a little ugly. If reiserfs_get_block returned an
@@ -3059,10 +3063,25 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
- return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs,
reiserfs_get_blocks_direct_io, NULL);
+
+ /*
+ * In case of error extending write may have instantiated a few
+ * blocks outside i_size. Trim these off again.
+ */
+ if (unlikely((rw & WRITE) && ret < 0)) {
+ loff_t isize = i_size_read(inode);
+ loff_t end = offset + iov_length(iov, nr_segs);
+
+ if (end > isize)
+ vmtruncate(inode, isize);
+ }
+
+ return ret;
}
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
@@ -3072,6 +3091,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
int depth;
int error;
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
@@ -3121,55 +3144,58 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
goto out;
}
- error = inode_change_ok(inode, attr);
- if (!error) {
- if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
- error = reiserfs_chown_xattrs(inode, attr);
+ if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ struct reiserfs_transaction_handle th;
+ int jbegin_count =
+ 2 *
+ (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
+ REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
+ 2;
- if (!error) {
- struct reiserfs_transaction_handle th;
- int jbegin_count =
- 2 *
- (REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb) +
- REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb)) +
- 2;
-
- /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
- error =
- journal_begin(&th, inode->i_sb,
- jbegin_count);
- if (error)
- goto out;
- error = dquot_transfer(inode, attr);
- if (error) {
- journal_end(&th, inode->i_sb,
- jbegin_count);
- goto out;
- }
- /* Update corresponding info in inode so that everything is in
- * one transaction */
- if (attr->ia_valid & ATTR_UID)
- inode->i_uid = attr->ia_uid;
- if (attr->ia_valid & ATTR_GID)
- inode->i_gid = attr->ia_gid;
- mark_inode_dirty(inode);
- error =
- journal_end(&th, inode->i_sb, jbegin_count);
- }
- }
- if (!error) {
- /*
- * Relax the lock here, as it might truncate the
- * inode pages and wait for inode pages locks.
- * To release such page lock, the owner needs the
- * reiserfs lock
- */
- reiserfs_write_unlock_once(inode->i_sb, depth);
- error = inode_setattr(inode, attr);
- depth = reiserfs_write_lock_once(inode->i_sb);
+ error = reiserfs_chown_xattrs(inode, attr);
+
+ if (error)
+ return error;
+
+ /* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+ error = journal_begin(&th, inode->i_sb, jbegin_count);
+ if (error)
+ goto out;
+ error = dquot_transfer(inode, attr);
+ if (error) {
+ journal_end(&th, inode->i_sb, jbegin_count);
+ goto out;
}
+
+ /* Update corresponding info in inode so that everything is in
+ * one transaction */
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = attr->ia_uid;
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = attr->ia_gid;
+ mark_inode_dirty(inode);
+ error = journal_end(&th, inode->i_sb, jbegin_count);
+ if (error)
+ goto out;
+ }
+
+ /*
+ * Relax the lock here, as it might truncate the
+ * inode pages and wait for inode pages locks.
+ * To release such page lock, the owner needs the
+ * reiserfs lock
+ */
+ reiserfs_write_unlock_once(inode->i_sb, depth);
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode))
+ error = vmtruncate(inode, attr->ia_size);
+
+ if (!error) {
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
}
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (!error && reiserfs_posixacl(inode->i_sb)) {
if (attr->ia_valid & ATTR_MODE)
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 19fbc810e8e..1ec952b1f03 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -983,7 +983,6 @@ static int flush_older_commits(struct super_block *s,
static int reiserfs_async_progress_wait(struct super_block *s)
{
- DEFINE_WAIT(wait);
struct reiserfs_journal *j = SB_JOURNAL(s);
if (atomic_read(&j->j_async_throttle)) {
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 9822fa15118..e15ff612002 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -525,6 +525,8 @@ static struct inode *reiserfs_alloc_inode(struct super_block *sb)
kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
+ atomic_set(&ei->openers, 0);
+ mutex_init(&ei->tailpack);
return &ei->vfs_inode;
}
@@ -589,11 +591,6 @@ out:
reiserfs_write_unlock_once(inode->i_sb, lock_depth);
}
-static void reiserfs_clear_inode(struct inode *inode)
-{
- dquot_drop(inode);
-}
-
#ifdef CONFIG_QUOTA
static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
size_t, loff_t);
@@ -606,8 +603,7 @@ static const struct super_operations reiserfs_sops = {
.destroy_inode = reiserfs_destroy_inode,
.write_inode = reiserfs_write_inode,
.dirty_inode = reiserfs_dirty_inode,
- .clear_inode = reiserfs_clear_inode,
- .delete_inode = reiserfs_delete_inode,
+ .evict_inode = reiserfs_evict_inode,
.put_super = reiserfs_put_super,
.write_super = reiserfs_write_super,
.sync_fs = reiserfs_sync_fs,
diff --git a/fs/signalfd.c b/fs/signalfd.c
index f329849ce3c..1c5a6add779 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -88,6 +88,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid);
err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
+ err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
case __SI_POLL:
err |= __put_user(kinfo->si_band, &uinfo->ssi_band);
@@ -111,6 +112,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid);
err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid);
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
+ err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
default:
/*
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 9551cb6f7fe..450c9194198 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -46,7 +46,7 @@
#define SMB_TTL_DEFAULT 1000
-static void smb_delete_inode(struct inode *);
+static void smb_evict_inode(struct inode *);
static void smb_put_super(struct super_block *);
static int smb_statfs(struct dentry *, struct kstatfs *);
static int smb_show_options(struct seq_file *, struct vfsmount *);
@@ -102,7 +102,7 @@ static const struct super_operations smb_sops =
.alloc_inode = smb_alloc_inode,
.destroy_inode = smb_destroy_inode,
.drop_inode = generic_delete_inode,
- .delete_inode = smb_delete_inode,
+ .evict_inode = smb_evict_inode,
.put_super = smb_put_super,
.statfs = smb_statfs,
.show_options = smb_show_options,
@@ -324,15 +324,15 @@ out:
* All blocking cleanup operations need to go here to avoid races.
*/
static void
-smb_delete_inode(struct inode *ino)
+smb_evict_inode(struct inode *ino)
{
DEBUG1("ino=%ld\n", ino->i_ino);
truncate_inode_pages(&ino->i_data, 0);
+ end_writeback(ino);
lock_kernel();
if (smb_close(ino))
PARANOIA("could not close inode %ld\n", ino->i_ino);
unlock_kernel();
- clear_inode(ino);
}
static struct option opts[] = {
@@ -714,9 +714,7 @@ smb_notify_change(struct dentry *dentry, struct iattr *attr)
error = server->ops->truncate(inode, attr->ia_size);
if (error)
goto out;
- error = simple_setsize(inode, attr->ia_size);
- if (error)
- goto out;
+ truncate_setsize(inode, attr->ia_size);
refresh = 1;
}
diff --git a/fs/splice.c b/fs/splice.c
index ac22b00d86c..8f1dfaecc8f 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -354,7 +354,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
break;
error = add_to_page_cache_lru(page, mapping, index,
- mapping_gfp_mask(mapping));
+ GFP_KERNEL);
if (unlikely(error)) {
page_cache_release(page);
if (error == -EEXIST)
@@ -399,17 +399,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
* If the page isn't uptodate, we may need to start io on it
*/
if (!PageUptodate(page)) {
- /*
- * If in nonblock mode then dont block on waiting
- * for an in-flight io page
- */
- if (flags & SPLICE_F_NONBLOCK) {
- if (!trylock_page(page)) {
- error = -EAGAIN;
- break;
- }
- } else
- lock_page(page);
+ lock_page(page);
/*
* Page was truncated, or invalidated by the
@@ -597,7 +587,6 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct iovec *vec, __vec[PIPE_DEF_BUFFERS];
- pgoff_t index;
ssize_t res;
size_t this_len;
int error;
@@ -621,7 +610,6 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
goto shrink_ret;
}
- index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
@@ -1282,7 +1270,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
{
struct file *file = sd->u.file;
- return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
+ return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+ sd->flags);
}
/**
@@ -1371,8 +1360,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_in)
return -ESPIPE;
if (off_out) {
- if (!out->f_op || !out->f_op->llseek ||
- out->f_op->llseek == no_llseek)
+ if (!(out->f_mode & FMODE_PWRITE))
return -EINVAL;
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
@@ -1392,8 +1380,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
if (off_out)
return -ESPIPE;
if (off_in) {
- if (!in->f_op || !in->f_op->llseek ||
- in->f_op->llseek == no_llseek)
+ if (!(in->f_mode & FMODE_PREAD))
return -EINVAL;
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index cc6ce8a84c2..e5f63da64d0 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -5,13 +5,13 @@ config SQUASHFS
help
Saying Y here includes support for SquashFS 4.0 (a Compressed
Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib compression to compress both
+ filesystem for Linux. It uses zlib/lzo compression to compress both
files, inodes and directories. Inodes in the system are very small
and all blocks are packed to minimise data overhead. Block sizes
greater than 4K are supported up to a maximum of 1 Mbytes (default
block size 128K). SquashFS 4.0 supports 64 bit filesystems and files
(larger than 4GB), full uid/gid information, hard links and
- timestamps.
+ timestamps.
Squashfs is intended for general read-only filesystem use, for
archival use (i.e. in cases where a .tar.gz file may be used), and in
@@ -26,7 +26,7 @@ config SQUASHFS
If unsure, say N.
-config SQUASHFS_XATTRS
+config SQUASHFS_XATTR
bool "Squashfs XATTR support"
depends on SQUASHFS
default n
@@ -37,9 +37,24 @@ config SQUASHFS_XATTRS
If unsure, say N.
-config SQUASHFS_EMBEDDED
+config SQUASHFS_LZO
+ bool "Include support for LZO compressed file systems"
+ depends on SQUASHFS
+ default n
+ select LZO_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with LZO compresssion. LZO compression is mainly
+ aimed at embedded systems with slower CPUs where the overheads
+ of zlib are too high.
- bool "Additional option for memory-constrained systems"
+ LZO is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+ bool "Additional option for memory-constrained systems"
depends on SQUASHFS
default n
help
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 2cee3e9fa45..7672bac8d32 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,5 +5,5 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
-squashfs-$(CONFIG_SQUASHFS_XATTRS) += xattr.o xattr_id.o
-
+squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o
+squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index 157478da6ac..24af9ce9722 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -40,9 +40,11 @@ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = {
NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0
};
+#ifndef CONFIG_SQUASHFS_LZO
static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = {
NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0
};
+#endif
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
NULL, NULL, NULL, 0, "unknown", 0
@@ -51,7 +53,11 @@ static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
static const struct squashfs_decompressor *decompressor[] = {
&squashfs_zlib_comp_ops,
&squashfs_lzma_unsupported_comp_ops,
+#ifdef CONFIG_SQUASHFS_LZO
+ &squashfs_lzo_comp_ops,
+#else
&squashfs_lzo_unsupported_comp_ops,
+#endif
&squashfs_unknown_comp_ops
};
diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c
new file mode 100644
index 00000000000..5d87789bf1c
--- /dev/null
+++ b/fs/squashfs/lzo_wrapper.c
@@ -0,0 +1,136 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2010 LG Electronics
+ * Chan Jeong <chan.jeong@lge.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * lzo_wrapper.c
+ */
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/lzo.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs_fs_i.h"
+#include "squashfs.h"
+#include "decompressor.h"
+
+struct squashfs_lzo {
+ void *input;
+ void *output;
+};
+
+static void *lzo_init(struct squashfs_sb_info *msblk)
+{
+ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
+
+ struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ goto failed;
+ stream->input = vmalloc(block_size);
+ if (stream->input == NULL)
+ goto failed;
+ stream->output = vmalloc(block_size);
+ if (stream->output == NULL)
+ goto failed2;
+
+ return stream;
+
+failed2:
+ vfree(stream->input);
+failed:
+ ERROR("Failed to allocate lzo workspace\n");
+ kfree(stream);
+ return NULL;
+}
+
+
+static void lzo_free(void *strm)
+{
+ struct squashfs_lzo *stream = strm;
+
+ if (stream) {
+ vfree(stream->input);
+ vfree(stream->output);
+ }
+ kfree(stream);
+}
+
+
+static int lzo_uncompress(struct squashfs_sb_info *msblk, void **buffer,
+ struct buffer_head **bh, int b, int offset, int length, int srclength,
+ int pages)
+{
+ struct squashfs_lzo *stream = msblk->stream;
+ void *buff = stream->input;
+ int avail, i, bytes = length, res;
+ size_t out_len = srclength;
+
+ mutex_lock(&msblk->read_data_mutex);
+
+ for (i = 0; i < b; i++) {
+ wait_on_buffer(bh[i]);
+ if (!buffer_uptodate(bh[i]))
+ goto block_release;
+
+ avail = min(bytes, msblk->devblksize - offset);
+ memcpy(buff, bh[i]->b_data + offset, avail);
+ buff += avail;
+ bytes -= avail;
+ offset = 0;
+ put_bh(bh[i]);
+ }
+
+ res = lzo1x_decompress_safe(stream->input, (size_t)length,
+ stream->output, &out_len);
+ if (res != LZO_E_OK)
+ goto failed;
+
+ res = bytes = (int)out_len;
+ for (i = 0, buff = stream->output; bytes && i < pages; i++) {
+ avail = min_t(int, bytes, PAGE_CACHE_SIZE);
+ memcpy(buffer[i], buff, avail);
+ buff += avail;
+ bytes -= avail;
+ }
+
+ mutex_unlock(&msblk->read_data_mutex);
+ return res;
+
+block_release:
+ for (; i < b; i++)
+ put_bh(bh[i]);
+
+failed:
+ mutex_unlock(&msblk->read_data_mutex);
+
+ ERROR("lzo decompression failed, data probably corrupt\n");
+ return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_lzo_comp_ops = {
+ .init = lzo_init,
+ .free = lzo_free,
+ .decompress = lzo_uncompress,
+ .id = LZO_COMPRESSION,
+ .name = "lzo",
+ .supported = 1
+};
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 733a17c4294..5d45569d5f7 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -104,3 +104,6 @@ extern const struct xattr_handler *squashfs_xattr_handlers[];
/* zlib_wrapper.c */
extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
+
+/* lzo_wrapper.c */
+extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 8eabb808b78..c5137fc9ab1 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -274,7 +274,7 @@ struct squashfs_base_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
};
struct squashfs_ipc_inode {
@@ -283,7 +283,7 @@ struct squashfs_ipc_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
};
@@ -293,7 +293,7 @@ struct squashfs_lipc_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
__le32 xattr;
};
@@ -304,7 +304,7 @@ struct squashfs_dev_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
__le32 rdev;
};
@@ -315,7 +315,7 @@ struct squashfs_ldev_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
__le32 rdev;
__le32 xattr;
@@ -327,7 +327,7 @@ struct squashfs_symlink_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
__le32 symlink_size;
char symlink[0];
@@ -339,7 +339,7 @@ struct squashfs_reg_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 start_block;
__le32 fragment;
__le32 offset;
@@ -353,7 +353,7 @@ struct squashfs_lreg_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le64 start_block;
__le64 file_size;
__le64 sparse;
@@ -370,7 +370,7 @@ struct squashfs_dir_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 start_block;
__le32 nlink;
__le16 file_size;
@@ -384,7 +384,7 @@ struct squashfs_ldir_inode {
__le16 uid;
__le16 guid;
__le32 mtime;
- __le32 inode_number;
+ __le32 inode_number;
__le32 nlink;
__le32 file_size;
__le32 start_block;
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index c7655e8b31c..652b8541f9c 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -18,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
- * xattr_id.c
+ * xattr.c
*/
#include <linux/init.h>
@@ -295,7 +295,7 @@ static const struct xattr_handler squashfs_xattr_security_handler = {
.get = squashfs_security_get
};
-static inline const struct xattr_handler *squashfs_xattr_handler(int type)
+static const struct xattr_handler *squashfs_xattr_handler(int type)
{
if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL))
/* ignore unrecognised type */
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
index 9da071ae181..49fe0d719fb 100644
--- a/fs/squashfs/xattr.h
+++ b/fs/squashfs/xattr.h
@@ -21,7 +21,7 @@
* xattr.h
*/
-#ifdef CONFIG_SQUASHFS_XATTRS
+#ifdef CONFIG_SQUASHFS_XATTR
extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
u64 *, int *);
extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
diff --git a/fs/stat.c b/fs/stat.c
index c4ecd52c573..12e90e21390 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -68,7 +68,8 @@ int vfs_fstat(unsigned int fd, struct kstat *stat)
}
EXPORT_SYMBOL(vfs_fstat);
-int vfs_fstatat(int dfd, char __user *filename, struct kstat *stat, int flag)
+int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
+ int flag)
{
struct path path;
int error = -EINVAL;
@@ -91,13 +92,13 @@ out:
}
EXPORT_SYMBOL(vfs_fstatat);
-int vfs_stat(char __user *name, struct kstat *stat)
+int vfs_stat(const char __user *name, struct kstat *stat)
{
return vfs_fstatat(AT_FDCWD, name, stat, 0);
}
EXPORT_SYMBOL(vfs_stat);
-int vfs_lstat(char __user *name, struct kstat *stat)
+int vfs_lstat(const char __user *name, struct kstat *stat)
{
return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
}
@@ -147,7 +148,8 @@ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * sta
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
-SYSCALL_DEFINE2(stat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
+SYSCALL_DEFINE2(stat, const char __user *, filename,
+ struct __old_kernel_stat __user *, statbuf)
{
struct kstat stat;
int error;
@@ -159,7 +161,8 @@ SYSCALL_DEFINE2(stat, char __user *, filename, struct __old_kernel_stat __user *
return cp_old_stat(&stat, statbuf);
}
-SYSCALL_DEFINE2(lstat, char __user *, filename, struct __old_kernel_stat __user *, statbuf)
+SYSCALL_DEFINE2(lstat, const char __user *, filename,
+ struct __old_kernel_stat __user *, statbuf)
{
struct kstat stat;
int error;
@@ -234,7 +237,8 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
-SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf)
+SYSCALL_DEFINE2(newstat, const char __user *, filename,
+ struct stat __user *, statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
@@ -244,7 +248,8 @@ SYSCALL_DEFINE2(newstat, char __user *, filename, struct stat __user *, statbuf)
return cp_new_stat(&stat, statbuf);
}
-SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf)
+SYSCALL_DEFINE2(newlstat, const char __user *, filename,
+ struct stat __user *, statbuf)
{
struct kstat stat;
int error;
@@ -257,7 +262,7 @@ SYSCALL_DEFINE2(newlstat, char __user *, filename, struct stat __user *, statbuf
}
#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
-SYSCALL_DEFINE4(newfstatat, int, dfd, char __user *, filename,
+SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
struct stat __user *, statbuf, int, flag)
{
struct kstat stat;
@@ -355,7 +360,8 @@ static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
-SYSCALL_DEFINE2(stat64, char __user *, filename, struct stat64 __user *, statbuf)
+SYSCALL_DEFINE2(stat64, const char __user *, filename,
+ struct stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
@@ -366,7 +372,8 @@ SYSCALL_DEFINE2(stat64, char __user *, filename, struct stat64 __user *, statbuf
return error;
}
-SYSCALL_DEFINE2(lstat64, char __user *, filename, struct stat64 __user *, statbuf)
+SYSCALL_DEFINE2(lstat64, const char __user *, filename,
+ struct stat64 __user *, statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
@@ -388,7 +395,7 @@ SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
return error;
}
-SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename,
+SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
struct stat64 __user *, statbuf, int, flag)
{
struct kstat stat;
diff --git a/fs/statfs.c b/fs/statfs.c
index 4ef021f3b61..30ea8c8a996 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -2,38 +2,83 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/file.h>
+#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/uaccess.h>
-int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+static int flags_by_mnt(int mnt_flags)
{
- int retval = -ENODEV;
-
- if (dentry) {
- retval = -ENOSYS;
- if (dentry->d_sb->s_op->statfs) {
- memset(buf, 0, sizeof(*buf));
- retval = security_sb_statfs(dentry);
- if (retval)
- return retval;
- retval = dentry->d_sb->s_op->statfs(dentry, buf);
- if (retval == 0 && buf->f_frsize == 0)
- buf->f_frsize = buf->f_bsize;
- }
- }
+ int flags = 0;
+
+ if (mnt_flags & MNT_READONLY)
+ flags |= ST_RDONLY;
+ if (mnt_flags & MNT_NOSUID)
+ flags |= ST_NOSUID;
+ if (mnt_flags & MNT_NODEV)
+ flags |= ST_NODEV;
+ if (mnt_flags & MNT_NOEXEC)
+ flags |= ST_NOEXEC;
+ if (mnt_flags & MNT_NOATIME)
+ flags |= ST_NOATIME;
+ if (mnt_flags & MNT_NODIRATIME)
+ flags |= ST_NODIRATIME;
+ if (mnt_flags & MNT_RELATIME)
+ flags |= ST_RELATIME;
+ return flags;
+}
+
+static int flags_by_sb(int s_flags)
+{
+ int flags = 0;
+ if (s_flags & MS_SYNCHRONOUS)
+ flags |= ST_SYNCHRONOUS;
+ if (s_flags & MS_MANDLOCK)
+ flags |= ST_MANDLOCK;
+ return flags;
+}
+
+static int calculate_f_flags(struct vfsmount *mnt)
+{
+ return ST_VALID | flags_by_mnt(mnt->mnt_flags) |
+ flags_by_sb(mnt->mnt_sb->s_flags);
+}
+
+int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf)
+{
+ int retval;
+
+ if (!dentry->d_sb->s_op->statfs)
+ return -ENOSYS;
+
+ memset(buf, 0, sizeof(*buf));
+ retval = security_sb_statfs(dentry);
+ if (retval)
+ return retval;
+ retval = dentry->d_sb->s_op->statfs(dentry, buf);
+ if (retval == 0 && buf->f_frsize == 0)
+ buf->f_frsize = buf->f_bsize;
return retval;
}
+int vfs_statfs(struct path *path, struct kstatfs *buf)
+{
+ int error;
+
+ error = statfs_by_dentry(path->dentry, buf);
+ if (!error)
+ buf->f_flags = calculate_f_flags(path->mnt);
+ return error;
+}
EXPORT_SYMBOL(vfs_statfs);
-static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
+static int do_statfs_native(struct path *path, struct statfs *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(dentry, &st);
+ retval = vfs_statfs(path, &st);
if (retval)
return retval;
@@ -67,17 +112,18 @@ static int vfs_statfs_native(struct dentry *dentry, struct statfs *buf)
buf->f_fsid = st.f_fsid;
buf->f_namelen = st.f_namelen;
buf->f_frsize = st.f_frsize;
+ buf->f_flags = st.f_flags;
memset(buf->f_spare, 0, sizeof(buf->f_spare));
}
return 0;
}
-static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
+static int do_statfs64(struct path *path, struct statfs64 *buf)
{
struct kstatfs st;
int retval;
- retval = vfs_statfs(dentry, &st);
+ retval = vfs_statfs(path, &st);
if (retval)
return retval;
@@ -94,6 +140,7 @@ static int vfs_statfs64(struct dentry *dentry, struct statfs64 *buf)
buf->f_fsid = st.f_fsid;
buf->f_namelen = st.f_namelen;
buf->f_frsize = st.f_frsize;
+ buf->f_flags = st.f_flags;
memset(buf->f_spare, 0, sizeof(buf->f_spare));
}
return 0;
@@ -107,7 +154,7 @@ SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, b
error = user_path(pathname, &path);
if (!error) {
struct statfs tmp;
- error = vfs_statfs_native(path.dentry, &tmp);
+ error = do_statfs_native(&path, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_put(&path);
@@ -125,7 +172,7 @@ SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct stat
error = user_path(pathname, &path);
if (!error) {
struct statfs64 tmp;
- error = vfs_statfs64(path.dentry, &tmp);
+ error = do_statfs64(&path, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
path_put(&path);
@@ -143,7 +190,7 @@ SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf)
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs_native(file->f_path.dentry, &tmp);
+ error = do_statfs_native(&file->f_path, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -164,7 +211,7 @@ SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user
file = fget(fd);
if (!file)
goto out;
- error = vfs_statfs64(file->f_path.dentry, &tmp);
+ error = do_statfs64(&file->f_path, &tmp);
if (!error && copy_to_user(buf, &tmp, sizeof(tmp)))
error = -EFAULT;
fput(file);
@@ -183,7 +230,7 @@ SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf)
if (!s)
return -EINVAL;
- err = vfs_statfs(s->s_root, &sbuf);
+ err = statfs_by_dentry(s->s_root, &sbuf);
drop_super(s);
if (err)
return err;
diff --git a/fs/super.c b/fs/super.c
index 5c35bc7a499..9674ab2c871 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -305,8 +305,13 @@ retry:
if (s) {
up_write(&s->s_umount);
destroy_super(s);
+ s = NULL;
}
down_write(&old->s_umount);
+ if (unlikely(!(old->s_flags & MS_BORN))) {
+ deactivate_locked_super(old);
+ goto retry;
+ }
return old;
}
}
@@ -358,10 +363,10 @@ EXPORT_SYMBOL(drop_super);
*/
void sync_supers(void)
{
- struct super_block *sb, *n;
+ struct super_block *sb, *p = NULL;
spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+ list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
if (sb->s_op->write_super && sb->s_dirt) {
@@ -374,9 +379,13 @@ void sync_supers(void)
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- __put_super(sb);
+ if (p)
+ __put_super(p);
+ p = sb;
}
}
+ if (p)
+ __put_super(p);
spin_unlock(&sb_lock);
}
@@ -390,10 +399,10 @@ void sync_supers(void)
*/
void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
{
- struct super_block *sb, *n;
+ struct super_block *sb, *p = NULL;
spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+ list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++;
@@ -405,8 +414,12 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
up_read(&sb->s_umount);
spin_lock(&sb_lock);
- __put_super(sb);
+ if (p)
+ __put_super(p);
+ p = sb;
}
+ if (p)
+ __put_super(p);
spin_unlock(&sb_lock);
}
@@ -568,10 +581,10 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
static void do_emergency_remount(struct work_struct *work)
{
- struct super_block *sb, *n;
+ struct super_block *sb, *p = NULL;
spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+ list_for_each_entry(sb, &super_blocks, s_list) {
if (list_empty(&sb->s_instances))
continue;
sb->s_count++;
@@ -585,8 +598,12 @@ static void do_emergency_remount(struct work_struct *work)
}
up_write(&sb->s_umount);
spin_lock(&sb_lock);
- __put_super(sb);
+ if (p)
+ __put_super(p);
+ p = sb;
}
+ if (p)
+ __put_super(p);
spin_unlock(&sb_lock);
kfree(work);
printk("Emergency Remount complete\n");
@@ -767,7 +784,16 @@ int get_sb_bdev(struct file_system_type *fs_type,
goto error_bdev;
}
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). close_bdev_exclusive()
+ * acquires bd_mutex and can't be called under
+ * s_umount. Drop s_umount temporarily. This is safe
+ * as we're holding an active reference.
+ */
+ up_write(&s->s_umount);
close_bdev_exclusive(bdev, mode);
+ down_write(&s->s_umount);
} else {
char b[BDEVNAME_SIZE];
@@ -903,6 +929,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
goto out_free_secdata;
BUG_ON(!mnt->mnt_sb);
WARN_ON(!mnt->mnt_sb->s_bdi);
+ mnt->mnt_sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
if (error)
diff --git a/fs/sync.c b/fs/sync.c
index c9f83f480ec..ba76b9623e7 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -42,7 +42,7 @@ static int __sync_filesystem(struct super_block *sb, int wait)
if (wait)
sync_inodes_sb(sb);
else
- writeback_inodes_sb_locked(sb);
+ writeback_inodes_sb(sb);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, wait);
@@ -128,31 +128,6 @@ void emergency_sync(void)
}
}
-/*
- * Generic function to fsync a file.
- */
-int file_fsync(struct file *filp, int datasync)
-{
- struct inode *inode = filp->f_mapping->host;
- struct super_block * sb;
- int ret, err;
-
- /* sync the inode to buffers */
- ret = write_inode_now(inode, 0);
-
- /* sync the superblock to buffers */
- sb = inode->i_sb;
- if (sb->s_dirt && sb->s_op->write_super)
- sb->s_op->write_super(sb);
-
- /* .. finally sync the buffers to disk */
- err = sync_blockdev(sb->s_bdev);
- if (!ret)
- ret = err;
- return ret;
-}
-EXPORT_SYMBOL(file_fsync);
-
/**
* vfs_fsync_range - helper to sync a range of data & metadata to disk
* @file: file to sync
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 1beaa739d0a..1b27b5688f6 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -593,7 +593,8 @@ EXPORT_SYMBOL_GPL(sysfs_add_file_to_group);
* @mode: file permissions.
*
*/
-int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
+int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
+ mode_t mode)
{
struct sysfs_dirent *sd;
struct iattr newattrs;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index bde1a4c3679..cffb1fd8ba3 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -117,10 +117,12 @@ int sysfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (error)
goto out;
- /* this ignores size changes */
- generic_setattr(inode, iattr);
-
error = sysfs_sd_setattr(sd, iattr);
+ if (error)
+ goto out;
+
+ /* this ignores size changes */
+ setattr_copy(inode, iattr);
out:
mutex_unlock(&sysfs_mutex);
@@ -310,15 +312,15 @@ struct inode * sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd)
* The sysfs_dirent serves as both an inode and a directory entry for sysfs.
* To prevent the sysfs inode numbers from being freed prematurely we take a
* reference to sysfs_dirent from the sysfs inode. A
- * super_operations.delete_inode() implementation is needed to drop that
+ * super_operations.evict_inode() implementation is needed to drop that
* reference upon inode destruction.
*/
-void sysfs_delete_inode(struct inode *inode)
+void sysfs_evict_inode(struct inode *inode)
{
struct sysfs_dirent *sd = inode->i_private;
truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
+ end_writeback(inode);
sysfs_put(sd);
}
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 281c0c9bc39..f2af22574c5 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -29,7 +29,7 @@ struct kmem_cache *sysfs_dir_cachep;
static const struct super_operations sysfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
- .delete_inode = sysfs_delete_inode,
+ .evict_inode = sysfs_evict_inode,
};
struct sysfs_dirent sysfs_root = {
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index f71246bebfe..a7ac78f8e67 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -28,6 +28,7 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
struct sysfs_dirent *target_sd = NULL;
struct sysfs_dirent *sd = NULL;
struct sysfs_addrm_cxt acxt;
+ enum kobj_ns_type ns_type;
int error;
BUG_ON(!name);
@@ -58,16 +59,29 @@ static int sysfs_do_create_link(struct kobject *kobj, struct kobject *target,
if (!sd)
goto out_put;
- if (sysfs_ns_type(parent_sd))
+ ns_type = sysfs_ns_type(parent_sd);
+ if (ns_type)
sd->s_ns = target->ktype->namespace(target);
sd->s_symlink.target_sd = target_sd;
target_sd = NULL; /* reference is now owned by the symlink */
sysfs_addrm_start(&acxt, parent_sd);
- if (warn)
- error = sysfs_add_one(&acxt, sd);
- else
- error = __sysfs_add_one(&acxt, sd);
+ /* Symlinks must be between directories with the same ns_type */
+ if (!ns_type ||
+ (ns_type == sysfs_ns_type(sd->s_symlink.target_sd->s_parent))) {
+ if (warn)
+ error = sysfs_add_one(&acxt, sd);
+ else
+ error = __sysfs_add_one(&acxt, sd);
+ } else {
+ error = -EINVAL;
+ WARN(1, KERN_WARNING
+ "sysfs: symlink across ns_types %s/%s -> %s/%s\n",
+ parent_sd->s_name,
+ sd->s_name,
+ sd->s_symlink.target_sd->s_parent->s_name,
+ sd->s_symlink.target_sd->s_name);
+ }
sysfs_addrm_finish(&acxt);
if (error)
@@ -122,7 +136,7 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
{
const void *ns = NULL;
spin_lock(&sysfs_assoc_lock);
- if (targ->sd)
+ if (targ->sd && sysfs_ns_type(kobj->sd))
ns = targ->sd->s_ns;
spin_unlock(&sysfs_assoc_lock);
sysfs_hash_and_remove(kobj->sd, ns, name);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 6a13105b559..d9be60a2e95 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -198,7 +198,7 @@ static inline void __sysfs_put(struct sysfs_dirent *sd)
* inode.c
*/
struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd);
-void sysfs_delete_inode(struct inode *inode);
+void sysfs_evict_inode(struct inode *inode);
int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
int sysfs_permission(struct inode *inode, int mask);
int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 79941e4964a..a77c4215762 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -218,8 +218,7 @@ got_it:
pos = page_offset(page) +
(char*)de - (char*)page_address(page);
lock_page(page);
- err = __sysv_write_begin(NULL, page->mapping, pos, SYSV_DIRSIZE,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
if (err)
goto out_unlock;
memcpy (de->name, name, namelen);
@@ -239,15 +238,13 @@ out_unlock:
int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
{
- struct address_space *mapping = page->mapping;
- struct inode *inode = (struct inode*)mapping->host;
+ struct inode *inode = page->mapping->host;
char *kaddr = (char*)page_address(page);
loff_t pos = page_offset(page) + (char *)de - kaddr;
int err;
lock_page(page);
- err = __sysv_write_begin(NULL, mapping, pos, SYSV_DIRSIZE,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
BUG_ON(err);
de->inode = 0;
err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
@@ -259,16 +256,14 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
int sysv_make_empty(struct inode *inode, struct inode *dir)
{
- struct address_space *mapping = inode->i_mapping;
- struct page *page = grab_cache_page(mapping, 0);
+ struct page *page = grab_cache_page(inode->i_mapping, 0);
struct sysv_dir_entry * de;
char *base;
int err;
if (!page)
return -ENOMEM;
- err = __sysv_write_begin(NULL, mapping, 0, 2 * SYSV_DIRSIZE,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE);
if (err) {
unlock_page(page);
goto fail;
@@ -341,15 +336,13 @@ not_empty:
void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
struct inode *inode)
{
- struct address_space *mapping = page->mapping;
- struct inode *dir = mapping->host;
+ struct inode *dir = page->mapping->host;
loff_t pos = page_offset(page) +
(char *)de-(char*)page_address(page);
int err;
lock_page(page);
- err = __sysv_write_begin(NULL, mapping, pos, SYSV_DIRSIZE,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
BUG_ON(err);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 750cc22349b..0a65939508e 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -30,7 +30,29 @@ const struct file_operations sysv_file_operations = {
.splice_read = generic_file_splice_read,
};
+static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
const struct inode_operations sysv_file_inode_operations = {
.truncate = sysv_truncate,
+ .setattr = sysv_setattr,
.getattr = sysv_getattr,
};
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index bbd69bdb0fa..0c96c98bd1d 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -25,6 +25,7 @@
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
#include "sysv.h"
/* We don't trust the value of
@@ -112,7 +113,6 @@ void sysv_free_inode(struct inode * inode)
return;
}
raw_inode = sysv_raw_inode(sb, ino, &bh);
- clear_inode(inode);
if (!raw_inode) {
printk("sysv_free_inode: unable to read inode block on device "
"%s\n", inode->i_sb->s_id);
@@ -139,6 +139,9 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
struct inode *inode;
sysv_ino_t ino;
unsigned count;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_NONE
+ };
inode = new_inode(sb);
if (!inode)
@@ -168,7 +171,7 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode)
insert_inode_hash(inode);
mark_inode_dirty(inode);
- sysv_write_inode(inode, 0); /* ensure inode not allocated again */
+ sysv_write_inode(inode, &wbc); /* ensure inode not allocated again */
mark_inode_dirty(inode); /* cleared by sysv_write_inode() */
/* That's it. */
unlock_super(sb);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index d4a5380b566..de44d067b9e 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -71,8 +71,8 @@ static int sysv_remount(struct super_block *sb, int *flags, char *data)
lock_super(sb);
if (sbi->s_forced_ro)
*flags |= MS_RDONLY;
- if (!(*flags & MS_RDONLY))
- sb->s_dirt = 1;
+ if (*flags & MS_RDONLY)
+ sysv_write_super(sb);
unlock_super(sb);
return 0;
}
@@ -308,12 +308,17 @@ int sysv_sync_inode(struct inode *inode)
return __sysv_write_inode(inode, 1);
}
-static void sysv_delete_inode(struct inode *inode)
+static void sysv_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- inode->i_size = 0;
- sysv_truncate(inode);
- sysv_free_inode(inode);
+ if (!inode->i_nlink) {
+ inode->i_size = 0;
+ sysv_truncate(inode);
+ }
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
+ if (!inode->i_nlink)
+ sysv_free_inode(inode);
}
static struct kmem_cache *sysv_inode_cachep;
@@ -344,7 +349,7 @@ const struct super_operations sysv_sops = {
.alloc_inode = sysv_alloc_inode,
.destroy_inode = sysv_destroy_inode,
.write_inode = sysv_write_inode,
- .delete_inode = sysv_delete_inode,
+ .evict_inode = sysv_evict_inode,
.put_super = sysv_put_super,
.write_super = sysv_write_super,
.sync_fs = sysv_sync_fs,
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index f042eec464c..9ca66276315 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -459,20 +459,25 @@ static int sysv_readpage(struct file *file, struct page *page)
return block_read_full_page(page,get_block);
}
-int __sysv_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len)
{
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- get_block);
+ return __block_write_begin(page, pos, len, get_block);
}
static int sysv_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return __sysv_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep, get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 5a903da5455..a0b0cda6927 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -347,7 +347,6 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
sb->s_flags |= MS_RDONLY;
if (sbi->s_truncate)
sb->s_root->d_op = &sysv_dentry_operations;
- sb->s_dirt = 1;
return 1;
}
@@ -435,12 +434,46 @@ Ebadsize:
goto failed;
}
-static int v7_fill_super(struct super_block *sb, void *data, int silent)
+static int v7_sanity_check(struct super_block *sb, struct buffer_head *bh)
{
- struct sysv_sb_info *sbi;
- struct buffer_head *bh, *bh2 = NULL;
struct v7_super_block *v7sb;
struct sysv_inode *v7i;
+ struct buffer_head *bh2;
+ struct sysv_sb_info *sbi;
+
+ sbi = sb->s_fs_info;
+
+ /* plausibility check on superblock */
+ v7sb = (struct v7_super_block *) bh->b_data;
+ if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE ||
+ fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD ||
+ fs32_to_cpu(sbi, v7sb->s_fsize) > V7_MAXSIZE)
+ return 0;
+
+ /* plausibility check on root inode: it is a directory,
+ with a nonzero size that is a multiple of 16 */
+ bh2 = sb_bread(sb, 2);
+ if (bh2 == NULL)
+ return 0;
+
+ v7i = (struct sysv_inode *)(bh2->b_data + 64);
+ if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR ||
+ (fs32_to_cpu(sbi, v7i->i_size) == 0) ||
+ (fs32_to_cpu(sbi, v7i->i_size) & 017) ||
+ (fs32_to_cpu(sbi, v7i->i_size) > V7_NFILES *
+ sizeof(struct sysv_dir_entry))) {
+ brelse(bh2);
+ return 0;
+ }
+
+ brelse(bh2);
+ return 1;
+}
+
+static int v7_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct sysv_sb_info *sbi;
+ struct buffer_head *bh;
if (440 != sizeof (struct v7_super_block))
panic("V7 FS: bad super-block size");
@@ -454,7 +487,6 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sb = sb;
sbi->s_block_base = 0;
sbi->s_type = FSTYPE_V7;
- sbi->s_bytesex = BYTESEX_PDP;
sb->s_fs_info = sbi;
sb_set_blocksize(sb, 512);
@@ -466,32 +498,27 @@ static int v7_fill_super(struct super_block *sb, void *data, int silent)
goto failed;
}
- /* plausibility check on superblock */
- v7sb = (struct v7_super_block *) bh->b_data;
- if (fs16_to_cpu(sbi, v7sb->s_nfree) > V7_NICFREE ||
- fs16_to_cpu(sbi, v7sb->s_ninode) > V7_NICINOD ||
- fs32_to_cpu(sbi, v7sb->s_time) == 0)
- goto failed;
+ /* Try PDP-11 UNIX */
+ sbi->s_bytesex = BYTESEX_PDP;
+ if (v7_sanity_check(sb, bh))
+ goto detected;
- /* plausibility check on root inode: it is a directory,
- with a nonzero size that is a multiple of 16 */
- if ((bh2 = sb_bread(sb, 2)) == NULL)
- goto failed;
- v7i = (struct sysv_inode *)(bh2->b_data + 64);
- if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != S_IFDIR ||
- (fs32_to_cpu(sbi, v7i->i_size) == 0) ||
- (fs32_to_cpu(sbi, v7i->i_size) & 017) != 0)
- goto failed;
- brelse(bh2);
- bh2 = NULL;
+ /* Try PC/IX, v7/x86 */
+ sbi->s_bytesex = BYTESEX_LE;
+ if (v7_sanity_check(sb, bh))
+ goto detected;
+ goto failed;
+
+detected:
sbi->s_bh1 = bh;
sbi->s_bh2 = bh;
if (complete_read_super(sb, silent, 1))
return 0;
failed:
- brelse(bh2);
+ printk(KERN_ERR "VFS: could not find a valid V7 on %s.\n",
+ sb->s_id);
brelse(bh);
kfree(sbi);
return -EINVAL;
@@ -560,4 +587,5 @@ static void __exit exit_sysv_fs(void)
module_init(init_sysv_fs)
module_exit(exit_sysv_fs)
+MODULE_ALIAS("v7");
MODULE_LICENSE("GPL");
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 94cb9b4d76c..bb55cdb394b 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -136,9 +136,7 @@ extern unsigned long sysv_count_free_blocks(struct super_block *);
/* itree.c */
extern void sysv_truncate(struct inode *);
-extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
+extern int sysv_prepare_chunk(struct page *page, loff_t pos, unsigned len);
/* inode.c */
extern struct inode *sysv_iget(struct super_block *, unsigned int);
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 076ca50e993..c8ff0d1ae5d 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -62,7 +62,9 @@
*/
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
+ down_read(&c->vfs_sb->s_umount);
writeback_inodes_sb(c->vfs_sb);
+ up_read(&c->vfs_sb->s_umount);
}
/**
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 12f445cee9f..03ae894c45d 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -967,14 +967,15 @@ static int do_writepage(struct page *page, int len)
* the page locked, and it locks @ui_mutex. However, write-back does take inode
* @i_mutex, which means other VFS operations may be run on this inode at the
* same time. And the problematic one is truncation to smaller size, from where
- * we have to call 'simple_setsize()', which first changes @inode->i_size, then
+ * we have to call 'truncate_setsize()', which first changes @inode->i_size, then
* drops the truncated pages. And while dropping the pages, it takes the page
- * lock. This means that 'do_truncation()' cannot call 'simple_setsize()' with
+ * lock. This means that 'do_truncation()' cannot call 'truncate_setsize()' with
* @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This
* means that @inode->i_size is changed while @ui_mutex is unlocked.
*
- * XXX: with the new truncate the above is not true anymore, the simple_setsize
- * calls can be replaced with the individual components.
+ * XXX(truncate): with the new truncate sequence this is not true anymore,
+ * and the calls to truncate_setsize can be move around freely. They should
+ * be moved to the very end of the truncate sequence.
*
* But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
* inode size. How do we do this if @inode->i_size may became smaller while we
@@ -1128,9 +1129,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
budgeted = 0;
}
- err = simple_setsize(inode, new_size);
- if (err)
- goto out_budg;
+ truncate_setsize(inode, new_size);
if (offset) {
pgoff_t index = new_size >> PAGE_CACHE_SHIFT;
@@ -1217,16 +1216,14 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
if (attr->ia_valid & ATTR_SIZE) {
dbg_gen("size %lld -> %lld", inode->i_size, new_size);
- err = simple_setsize(inode, new_size);
- if (err)
- goto out;
+ truncate_setsize(inode, new_size);
}
mutex_lock(&ui->ui_mutex);
if (attr->ia_valid & ATTR_SIZE) {
/* Truncation changes inode [mc]time */
inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
- /* 'simple_setsize()' changed @i_size, update @ui_size */
+ /* 'truncate_setsize()' changed @i_size, update @ui_size */
ui->ui_size = inode->i_size;
}
@@ -1248,10 +1245,6 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
if (IS_SYNC(inode))
err = inode->i_sb->s_op->write_inode(inode, NULL);
return err;
-
-out:
- ubifs_release_budget(c, &req);
- return err;
}
int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ad7f67b827e..0084a33c4c6 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1457,13 +1457,13 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
- return ERR_PTR(PTR_ERR(nnode));
+ return ERR_CAST(nnode);
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
pnode = ubifs_get_pnode(c, nnode, iip);
if (IS_ERR(pnode))
- return ERR_PTR(PTR_ERR(pnode));
+ return ERR_CAST(pnode);
iip = (i & (UBIFS_LPT_FANOUT - 1));
dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
pnode->lprops[iip].free, pnode->lprops[iip].dirty,
@@ -1586,7 +1586,7 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
nnode = c->nroot;
nnode = dirty_cow_nnode(c, nnode);
if (IS_ERR(nnode))
- return ERR_PTR(PTR_ERR(nnode));
+ return ERR_CAST(nnode);
i = lnum - c->main_first;
shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
for (h = 1; h < c->lpt_hght; h++) {
@@ -1594,19 +1594,19 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
- return ERR_PTR(PTR_ERR(nnode));
+ return ERR_CAST(nnode);
nnode = dirty_cow_nnode(c, nnode);
if (IS_ERR(nnode))
- return ERR_PTR(PTR_ERR(nnode));
+ return ERR_CAST(nnode);
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
shft -= UBIFS_LPT_FANOUT_SHIFT;
pnode = ubifs_get_pnode(c, nnode, iip);
if (IS_ERR(pnode))
- return ERR_PTR(PTR_ERR(pnode));
+ return ERR_CAST(pnode);
pnode = dirty_cow_pnode(c, pnode);
if (IS_ERR(pnode))
- return ERR_PTR(PTR_ERR(pnode));
+ return ERR_CAST(pnode);
iip = (i & (UBIFS_LPT_FANOUT - 1));
dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
pnode->lprops[iip].free, pnode->lprops[iip].dirty,
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 13cb7a4237b..d12535b7fc7 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -646,7 +646,7 @@ static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i)
shft -= UBIFS_LPT_FANOUT_SHIFT;
nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
- return ERR_PTR(PTR_ERR(nnode));
+ return ERR_CAST(nnode);
}
iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
return ubifs_get_pnode(c, nnode, iip);
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 109c6ea03bb..daae9e1f538 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -24,7 +24,7 @@
* This file implements functions needed to recover from unclean un-mounts.
* When UBIFS is mounted, it checks a flag on the master node to determine if
* an un-mount was completed successfully. If not, the process of mounting
- * incorparates additional checking and fixing of on-flash data structures.
+ * incorporates additional checking and fixing of on-flash data structures.
* UBIFS always cleans away all remnants of an unclean un-mount, so that
* errors do not accumulate. However UBIFS defers recovery if it is mounted
* read-only, and the flash is not modified in that case.
@@ -1063,8 +1063,21 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
}
err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2);
if (err) {
- if (err == -ENOSPC)
- dbg_err("could not find a dirty LEB");
+ /*
+ * There are no dirty or empty LEBs subject to here being
+ * enough for the index. Try to use
+ * 'ubifs_find_free_leb_for_idx()', which will return any empty
+ * LEBs (ignoring index requirements). If the index then
+ * doesn't have enough LEBs the recovery commit will fail -
+ * which is the same result anyway i.e. recovery fails. So
+ * there is no problem ignoring index requirements and just
+ * grabbing a free LEB since we have already established there
+ * is not a dirty LEB we could have used instead.
+ */
+ if (err == -ENOSPC) {
+ dbg_rcvry("could not find a dirty LEB");
+ goto find_free;
+ }
return err;
}
ubifs_assert(!(lp.flags & LPROPS_INDEX));
@@ -1139,8 +1152,8 @@ int ubifs_rcvry_gc_commit(struct ubifs_info *c)
find_free:
/*
* There is no GC head LEB or the free space in the GC head LEB is too
- * small. Allocate gc_lnum by calling 'ubifs_find_free_leb_for_idx()' so
- * GC is not run.
+ * small, or there are not dirty LEBs. Allocate gc_lnum by calling
+ * 'ubifs_find_free_leb_for_idx()' so GC is not run.
*/
lnum = ubifs_find_free_leb_for_idx(c);
if (lnum < 0) {
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 02feb59cefc..0b201114a5a 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -277,7 +277,7 @@ static int kick_a_thread(void)
return 0;
}
-int ubifs_shrinker(int nr, gfp_t gfp_mask)
+int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
int freed, contention = 0;
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 4d2f2157dd3..cd5900b85d3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -327,7 +327,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-static void ubifs_delete_inode(struct inode *inode)
+static void ubifs_evict_inode(struct inode *inode)
{
int err;
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -343,9 +343,12 @@ static void ubifs_delete_inode(struct inode *inode)
dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode);
ubifs_assert(!atomic_read(&inode->i_count));
- ubifs_assert(inode->i_nlink == 0);
truncate_inode_pages(&inode->i_data, 0);
+
+ if (inode->i_nlink)
+ goto done;
+
if (is_bad_inode(inode))
goto out;
@@ -367,7 +370,8 @@ out:
c->nospace = c->nospace_rp = 0;
smp_wmb();
}
- clear_inode(inode);
+done:
+ end_writeback(inode);
}
static void ubifs_dirty_inode(struct inode *inode)
@@ -1307,6 +1311,8 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_orphans;
err = ubifs_rcvry_gc_commit(c);
+ if (err)
+ goto out_orphans;
} else {
err = take_gc_lnum(c);
if (err)
@@ -1318,7 +1324,7 @@ static int mount_ubifs(struct ubifs_info *c)
*/
err = ubifs_leb_unmap(c, c->gc_lnum);
if (err)
- return err;
+ goto out_orphans;
}
err = dbg_check_lprops(c);
@@ -1824,7 +1830,7 @@ const struct super_operations ubifs_super_operations = {
.destroy_inode = ubifs_destroy_inode,
.put_super = ubifs_put_super,
.write_inode = ubifs_write_inode,
- .delete_inode = ubifs_delete_inode,
+ .evict_inode = ubifs_evict_inode,
.statfs = ubifs_statfs,
.dirty_inode = ubifs_dirty_inode,
.remount_fs = ubifs_remount_fs,
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 2eef553d50c..0c9876b396d 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -379,7 +379,7 @@ struct ubifs_gced_idx_leb {
* The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses
* @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot
* make sure @inode->i_size is always changed under @ui_mutex, because it
- * cannot call 'simple_setsize()' with @ui_mutex locked, because it would deadlock
+ * cannot call 'truncate_setsize()' with @ui_mutex locked, because it would deadlock
* with 'ubifs_writepage()' (see file.c). All the other inode fields are
* changed under @ui_mutex, so they do not need "shadow" fields. Note, one
* could consider to rework locking and base it on "shadow" fields.
@@ -1575,7 +1575,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int ubifs_tnc_end_commit(struct ubifs_info *c);
/* shrinker.c */
-int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask);
+int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
/* commit.c */
int ubifs_bg_thread(void *info);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 94e06d6bddb..66b9e7e7e4c 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -36,7 +36,6 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/aio.h>
-#include <linux/smp_lock.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -228,6 +227,28 @@ const struct file_operations udf_file_operations = {
.llseek = generic_file_llseek,
};
+static int udf_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ attr->ia_size != i_size_read(inode)) {
+ error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
+ }
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
const struct inode_operations udf_file_inode_operations = {
+ .setattr = udf_setattr,
.truncate = udf_truncate,
};
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 18cd7111185..75d9304d0dc 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -31,8 +31,6 @@ void udf_free_inode(struct inode *inode)
struct super_block *sb = inode->i_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
- clear_inode(inode);
-
mutex_lock(&sbi->s_alloc_mutex);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDescImpUse *lvidiu =
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 124852bcf6f..fc48f37aa2d 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -68,37 +68,23 @@ static void udf_update_extents(struct inode *,
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
-void udf_delete_inode(struct inode *inode)
-{
- truncate_inode_pages(&inode->i_data, 0);
-
- if (is_bad_inode(inode))
- goto no_delete;
-
- inode->i_size = 0;
- udf_truncate(inode);
- lock_kernel();
-
- udf_update_inode(inode, IS_SYNC(inode));
- udf_free_inode(inode);
-
- unlock_kernel();
- return;
-
-no_delete:
- clear_inode(inode);
-}
-
-/*
- * If we are going to release inode from memory, we truncate last inode extent
- * to proper length. We could use drop_inode() but it's called under inode_lock
- * and thus we cannot mark inode dirty there. We use clear_inode() but we have
- * to make sure to write inode as it's not written automatically.
- */
-void udf_clear_inode(struct inode *inode)
+void udf_evict_inode(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
+ int want_delete = 0;
+
+ truncate_inode_pages(&inode->i_data, 0);
+ if (!inode->i_nlink && !is_bad_inode(inode)) {
+ want_delete = 1;
+ inode->i_size = 0;
+ udf_truncate(inode);
+ lock_kernel();
+ udf_update_inode(inode, IS_SYNC(inode));
+ unlock_kernel();
+ }
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
inode->i_size != iinfo->i_lenExtents) {
printk(KERN_WARNING "UDF-fs (%s): Inode %lu (mode %o) has "
@@ -108,9 +94,13 @@ void udf_clear_inode(struct inode *inode)
(unsigned long long)inode->i_size,
(unsigned long long)iinfo->i_lenExtents);
}
-
kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL;
+ if (want_delete) {
+ lock_kernel();
+ udf_free_inode(inode);
+ unlock_kernel();
+ }
}
static int udf_writepage(struct page *page, struct writeback_control *wbc)
@@ -127,9 +117,16 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- udf_get_block);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 612d1e2e285..65412d84a45 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -175,8 +175,7 @@ static const struct super_operations udf_sb_ops = {
.alloc_inode = udf_alloc_inode,
.destroy_inode = udf_destroy_inode,
.write_inode = udf_write_inode,
- .delete_inode = udf_delete_inode,
- .clear_inode = udf_clear_inode,
+ .evict_inode = udf_evict_inode,
.put_super = udf_put_super,
.sync_fs = udf_sync_fs,
.statfs = udf_statfs,
@@ -1579,9 +1578,7 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
{
struct anchorVolDescPtr *anchor;
long main_s, main_e, reserve_s, reserve_e;
- struct udf_sb_info *sbi;
- sbi = UDF_SB(sb);
anchor = (struct anchorVolDescPtr *)bh->b_data;
/* Locate the main sequence */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 2bac0354891..6995ab1f430 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -139,8 +139,7 @@ extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
extern void udf_truncate(struct inode *);
extern void udf_read_inode(struct inode *);
-extern void udf_delete_inode(struct inode *);
-extern void udf_clear_inode(struct inode *);
+extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
extern int udf_extend_file(struct inode *, struct extent_position *,
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index ec784756dc6..dbc90994715 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -95,8 +95,7 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
int err;
lock_page(page);
- err = __ufs_write_begin(NULL, page->mapping, pos, len,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = ufs_prepare_chunk(page, pos, len);
BUG_ON(err);
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
@@ -381,8 +380,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
got_it:
pos = page_offset(page) +
(char*)de - (char*)page_address(page);
- err = __ufs_write_begin(NULL, page->mapping, pos, rec_len,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = ufs_prepare_chunk(page, pos, rec_len);
if (err)
goto out_unlock;
if (de->d_ino) {
@@ -518,7 +516,6 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
struct page * page)
{
struct super_block *sb = inode->i_sb;
- struct address_space *mapping = page->mapping;
char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
unsigned to = ((char*)dir - kaddr) + fs16_to_cpu(sb, dir->d_reclen);
@@ -549,8 +546,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
pos = page_offset(page) + from;
lock_page(page);
- err = __ufs_write_begin(NULL, mapping, pos, to - from,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = ufs_prepare_chunk(page, pos, to - from);
BUG_ON(err);
if (pde)
pde->d_reclen = cpu_to_fs16(sb, to - from);
@@ -577,8 +573,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir)
if (!page)
return -ENOMEM;
- err = __ufs_write_begin(NULL, mapping, 0, chunk_size,
- AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+ err = ufs_prepare_chunk(page, 0, chunk_size);
if (err) {
unlock_page(page);
goto fail;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 594480e537d..428017e018f 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -94,8 +94,6 @@ void ufs_free_inode (struct inode * inode)
is_directory = S_ISDIR(inode->i_mode);
- clear_inode (inode);
-
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
else {
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 73fe773aa03..2b251f2093a 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -558,20 +558,26 @@ static int ufs_readpage(struct file *file, struct page *page)
return block_read_full_page(page,ufs_getfrag_block);
}
-int __ufs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len)
{
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- ufs_getfrag_block);
+ return __block_write_begin(page, pos, len, ufs_getfrag_block);
}
static int ufs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- *pagep = NULL;
- return __ufs_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags, pagep,
+ ufs_getfrag_block);
+ if (unlikely(ret)) {
+ loff_t isize = mapping->host->i_size;
+ if (pos + len > isize)
+ vmtruncate(mapping->host, isize);
+ }
+
+ return ret;
}
static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
@@ -905,24 +911,33 @@ int ufs_sync_inode (struct inode *inode)
return ufs_update_inode (inode, 1);
}
-void ufs_delete_inode (struct inode * inode)
+void ufs_evict_inode(struct inode * inode)
{
- loff_t old_i_size;
+ int want_delete = 0;
+
+ if (!inode->i_nlink && !is_bad_inode(inode))
+ want_delete = 1;
truncate_inode_pages(&inode->i_data, 0);
- if (is_bad_inode(inode))
- goto no_delete;
- /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
- lock_kernel();
- mark_inode_dirty(inode);
- ufs_update_inode(inode, IS_SYNC(inode));
- old_i_size = inode->i_size;
- inode->i_size = 0;
- if (inode->i_blocks && ufs_truncate(inode, old_i_size))
- ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
- ufs_free_inode (inode);
- unlock_kernel();
- return;
-no_delete:
- clear_inode(inode); /* We must guarantee clearing of inode... */
+ if (want_delete) {
+ loff_t old_i_size;
+ /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
+ lock_kernel();
+ mark_inode_dirty(inode);
+ ufs_update_inode(inode, IS_SYNC(inode));
+ old_i_size = inode->i_size;
+ inode->i_size = 0;
+ if (inode->i_blocks && ufs_truncate(inode, old_i_size))
+ ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
+ unlock_kernel();
+ }
+
+ invalidate_inode_buffers(inode);
+ end_writeback(inode);
+
+ if (want_delete) {
+ lock_kernel();
+ ufs_free_inode (inode);
+ unlock_kernel();
+ }
}
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 3ec5a9eb6ef..d510c1b9181 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1440,7 +1440,7 @@ static const struct super_operations ufs_super_ops = {
.alloc_inode = ufs_alloc_inode,
.destroy_inode = ufs_destroy_inode,
.write_inode = ufs_write_inode,
- .delete_inode = ufs_delete_inode,
+ .evict_inode = ufs_evict_inode,
.put_super = ufs_put_super,
.write_super = ufs_write_super,
.sync_fs = ufs_sync_fs,
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index 589e01a465b..34d5cb13532 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -500,11 +500,6 @@ out:
return err;
}
-/*
- * TODO:
- * - truncate case should use proper ordering instead of using
- * simple_setsize
- */
int ufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
@@ -518,14 +513,17 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr)
if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
loff_t old_i_size = inode->i_size;
- error = simple_setsize(inode, attr->ia_size);
- if (error)
- return error;
+ /* XXX(truncate): truncate_setsize should be called last */
+ truncate_setsize(inode, attr->ia_size);
+
error = ufs_truncate(inode, old_i_size);
if (error)
return error;
}
- return inode_setattr(inode, attr);
+
+ setattr_copy(inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
}
const struct inode_operations ufs_file_inode_operations = {
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 179ae6b3180..c08782e1b48 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -108,7 +108,7 @@ extern struct inode * ufs_new_inode (struct inode *, int);
extern struct inode *ufs_iget(struct super_block *, unsigned long);
extern int ufs_write_inode (struct inode *, struct writeback_control *);
extern int ufs_sync_inode (struct inode *);
-extern void ufs_delete_inode (struct inode *);
+extern void ufs_evict_inode (struct inode *);
extern struct buffer_head * ufs_bread (struct inode *, unsigned, int, int *);
extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 23ceed8c8fb..0466036912f 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -257,9 +257,7 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
-extern int __ufs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
+extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
/*
* These functions manipulate ufs buffers
diff --git a/fs/utimes.c b/fs/utimes.c
index e4c75db5d37..179b5869065 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -126,7 +126,8 @@ out:
* must be owner or have write permission.
* Else, update from *times, must be owner or super user.
*/
-long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags)
+long do_utimes(int dfd, const char __user *filename, struct timespec *times,
+ int flags)
{
int error = -EINVAL;
@@ -170,7 +171,7 @@ out:
return error;
}
-SYSCALL_DEFINE4(utimensat, int, dfd, char __user *, filename,
+SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
struct timespec __user *, utimes, int, flags)
{
struct timespec tstimes[2];
@@ -188,7 +189,7 @@ SYSCALL_DEFINE4(utimensat, int, dfd, char __user *, filename,
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
-SYSCALL_DEFINE3(futimesat, int, dfd, char __user *, filename,
+SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename,
struct timeval __user *, utimes)
{
struct timeval times[2];
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index c8fb13f83b3..0dce969d6ca 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -87,11 +87,9 @@ xfs-y += xfs_alloc.o \
xfs_trans_buf.o \
xfs_trans_extfree.o \
xfs_trans_inode.o \
- xfs_trans_item.o \
xfs_utils.o \
xfs_vnodeops.o \
- xfs_rw.o \
- xfs_dmops.o
+ xfs_rw.o
xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 9f769b5b38f..b2771862fd3 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -225,7 +225,7 @@ xfs_check_acl(struct inode *inode, int mask)
struct posix_acl *acl;
int error = -EAGAIN;
- xfs_itrace_entry(ip);
+ trace_xfs_check_acl(ip);
/*
* If there is no attribute fork no ACL exists on this inode and
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 089eaca860b..15412fe15c3 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -21,19 +21,12 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_trans.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
#include "xfs_error.h"
#include "xfs_rw.h"
#include "xfs_iomap.h"
@@ -92,18 +85,15 @@ void
xfs_count_page_state(
struct page *page,
int *delalloc,
- int *unmapped,
int *unwritten)
{
struct buffer_head *bh, *head;
- *delalloc = *unmapped = *unwritten = 0;
+ *delalloc = *unwritten = 0;
bh = head = page_buffers(page);
do {
- if (buffer_uptodate(bh) && !buffer_mapped(bh))
- (*unmapped) = 1;
- else if (buffer_unwritten(bh))
+ if (buffer_unwritten(bh))
(*unwritten) = 1;
else if (buffer_delay(bh))
(*delalloc) = 1;
@@ -212,23 +202,17 @@ xfs_setfilesize(
}
/*
- * Schedule IO completion handling on a xfsdatad if this was
- * the final hold on this ioend. If we are asked to wait,
- * flush the workqueue.
+ * Schedule IO completion handling on the final put of an ioend.
*/
STATIC void
xfs_finish_ioend(
- xfs_ioend_t *ioend,
- int wait)
+ struct xfs_ioend *ioend)
{
if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct workqueue_struct *wq;
-
- wq = (ioend->io_type == IO_UNWRITTEN) ?
- xfsconvertd_workqueue : xfsdatad_workqueue;
- queue_work(wq, &ioend->io_work);
- if (wait)
- flush_workqueue(wq);
+ if (ioend->io_type == IO_UNWRITTEN)
+ queue_work(xfsconvertd_workqueue, &ioend->io_work);
+ else
+ queue_work(xfsdatad_workqueue, &ioend->io_work);
}
}
@@ -272,11 +256,25 @@ xfs_end_io(
*/
if (error == EAGAIN) {
atomic_inc(&ioend->io_remaining);
- xfs_finish_ioend(ioend, 0);
+ xfs_finish_ioend(ioend);
/* ensure we don't spin on blocked ioends */
delay(1);
- } else
+ } else {
+ if (ioend->io_iocb)
+ aio_complete(ioend->io_iocb, ioend->io_result, 0);
xfs_destroy_ioend(ioend);
+ }
+}
+
+/*
+ * Call IO completion handling in caller context on the final put of an ioend.
+ */
+STATIC void
+xfs_finish_ioend_sync(
+ struct xfs_ioend *ioend)
+{
+ if (atomic_dec_and_test(&ioend->io_remaining))
+ xfs_end_io(&ioend->io_work);
}
/*
@@ -309,6 +307,8 @@ xfs_alloc_ioend(
atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
ioend->io_offset = 0;
ioend->io_size = 0;
+ ioend->io_iocb = NULL;
+ ioend->io_result = 0;
INIT_WORK(&ioend->io_work, xfs_end_io);
return ioend;
@@ -358,7 +358,7 @@ xfs_end_bio(
bio->bi_end_io = NULL;
bio_put(bio);
- xfs_finish_ioend(ioend, 0);
+ xfs_finish_ioend(ioend);
}
STATIC void
@@ -500,7 +500,7 @@ xfs_submit_ioend(
}
if (bio)
xfs_submit_ioend_bio(wbc, ioend, bio);
- xfs_finish_ioend(ioend, 0);
+ xfs_finish_ioend(ioend);
} while ((ioend = next) != NULL);
}
@@ -614,31 +614,30 @@ xfs_map_at_offset(
STATIC unsigned int
xfs_probe_page(
struct page *page,
- unsigned int pg_offset,
- int mapped)
+ unsigned int pg_offset)
{
+ struct buffer_head *bh, *head;
int ret = 0;
if (PageWriteback(page))
return 0;
+ if (!PageDirty(page))
+ return 0;
+ if (!page->mapping)
+ return 0;
+ if (!page_has_buffers(page))
+ return 0;
- if (page->mapping && PageDirty(page)) {
- if (page_has_buffers(page)) {
- struct buffer_head *bh, *head;
-
- bh = head = page_buffers(page);
- do {
- if (!buffer_uptodate(bh))
- break;
- if (mapped != buffer_mapped(bh))
- break;
- ret += bh->b_size;
- if (ret >= pg_offset)
- break;
- } while ((bh = bh->b_this_page) != head);
- } else
- ret = mapped ? 0 : PAGE_CACHE_SIZE;
- }
+ bh = head = page_buffers(page);
+ do {
+ if (!buffer_uptodate(bh))
+ break;
+ if (!buffer_mapped(bh))
+ break;
+ ret += bh->b_size;
+ if (ret >= pg_offset)
+ break;
+ } while ((bh = bh->b_this_page) != head);
return ret;
}
@@ -648,8 +647,7 @@ xfs_probe_cluster(
struct inode *inode,
struct page *startpage,
struct buffer_head *bh,
- struct buffer_head *head,
- int mapped)
+ struct buffer_head *head)
{
struct pagevec pvec;
pgoff_t tindex, tlast, tloff;
@@ -658,7 +656,7 @@ xfs_probe_cluster(
/* First sum forwards in this page */
do {
- if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
+ if (!buffer_uptodate(bh) || !buffer_mapped(bh))
return total;
total += bh->b_size;
} while ((bh = bh->b_this_page) != head);
@@ -692,7 +690,7 @@ xfs_probe_cluster(
pg_offset = PAGE_CACHE_SIZE;
if (page->index == tindex && trylock_page(page)) {
- pg_len = xfs_probe_page(page, pg_offset, mapped);
+ pg_len = xfs_probe_page(page, pg_offset);
unlock_page(page);
}
@@ -761,7 +759,6 @@ xfs_convert_page(
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
- int startio,
int all_bh)
{
struct buffer_head *bh, *head;
@@ -832,19 +829,14 @@ xfs_convert_page(
ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
xfs_map_at_offset(inode, bh, imap, offset);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, ioendp, done);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
+ xfs_add_to_ioend(inode, bh, offset, type,
+ ioendp, done);
+
page_dirty--;
count++;
} else {
type = IO_NEW;
- if (buffer_mapped(bh) && all_bh && startio) {
+ if (buffer_mapped(bh) && all_bh) {
lock_buffer(bh);
xfs_add_to_ioend(inode, bh, offset,
type, ioendp, done);
@@ -859,14 +851,12 @@ xfs_convert_page(
if (uptodate && bh == head)
SetPageUptodate(page);
- if (startio) {
- if (count) {
- wbc->nr_to_write--;
- if (wbc->nr_to_write <= 0)
- done = 1;
- }
- xfs_start_page_writeback(page, !page_dirty, count);
+ if (count) {
+ wbc->nr_to_write--;
+ if (wbc->nr_to_write <= 0)
+ done = 1;
}
+ xfs_start_page_writeback(page, !page_dirty, count);
return done;
fail_unlock_page:
@@ -886,7 +876,6 @@ xfs_cluster_write(
struct xfs_bmbt_irec *imap,
xfs_ioend_t **ioendp,
struct writeback_control *wbc,
- int startio,
int all_bh,
pgoff_t tlast)
{
@@ -902,7 +891,7 @@ xfs_cluster_write(
for (i = 0; i < pagevec_count(&pvec); i++) {
done = xfs_convert_page(inode, pvec.pages[i], tindex++,
- imap, ioendp, wbc, startio, all_bh);
+ imap, ioendp, wbc, all_bh);
if (done)
break;
}
@@ -981,7 +970,7 @@ xfs_aops_discard_page(
*/
error = xfs_bmapi(NULL, ip, offset_fsb, 1,
XFS_BMAPI_ENTIRE, NULL, 0, &imap,
- &nimaps, NULL, NULL);
+ &nimaps, NULL);
if (error) {
/* something screwed, just bail */
@@ -1009,7 +998,7 @@ xfs_aops_discard_page(
*/
xfs_bmap_init(&flist, &firstblock);
error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
- &flist, NULL, &done);
+ &flist, &done);
ASSERT(!flist.xbf_count && !flist.xbf_first);
if (error) {
@@ -1032,50 +1021,66 @@ out_invalidate:
}
/*
- * Calling this without startio set means we are being asked to make a dirty
- * page ready for freeing it's buffers. When called with startio set then
- * we are coming from writepage.
+ * Write out a dirty page.
+ *
+ * For delalloc space on the page we need to allocate space and flush it.
+ * For unwritten space on the page we need to start the conversion to
+ * regular allocated space.
+ * For any other dirty buffer heads on the page we should flush them.
*
- * When called with startio set it is important that we write the WHOLE
- * page if possible.
- * The bh->b_state's cannot know if any of the blocks or which block for
- * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only valid if the page itself isn't completely uptodate. Some layers
- * may clear the page dirty flag prior to calling write page, under the
- * assumption the entire page will be written out; by not writing out the
- * whole page the page can be reused before all valid dirty data is
- * written out. Note: in the case of a page that has been dirty'd by
- * mapwrite and but partially setup by block_prepare_write the
- * bh->b_states's will not agree and only ones setup by BPW/BCW will have
- * valid state, thus the whole page must be written out thing.
+ * If we detect that a transaction would be required to flush the page, we
+ * have to check the process flags first, if we are already in a transaction
+ * or disk I/O during allocations is off, we need to fail the writepage and
+ * redirty the page.
*/
-
STATIC int
-xfs_page_state_convert(
- struct inode *inode,
- struct page *page,
- struct writeback_control *wbc,
- int startio,
- int unmapped) /* also implies page uptodate */
+xfs_vm_writepage(
+ struct page *page,
+ struct writeback_control *wbc)
{
+ struct inode *inode = page->mapping->host;
+ int delalloc, unwritten;
struct buffer_head *bh, *head;
struct xfs_bmbt_irec imap;
xfs_ioend_t *ioend = NULL, *iohead = NULL;
loff_t offset;
- unsigned long p_offset = 0;
unsigned int type;
__uint64_t end_offset;
pgoff_t end_index, last_index;
ssize_t size, len;
int flags, err, imap_valid = 0, uptodate = 1;
- int page_dirty, count = 0;
- int trylock = 0;
- int all_bh = unmapped;
+ int count = 0;
+ int all_bh = 0;
- if (startio) {
- if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
- trylock |= BMAPI_TRYLOCK;
- }
+ trace_xfs_writepage(inode, page, 0);
+
+ ASSERT(page_has_buffers(page));
+
+ /*
+ * Refuse to write the page out if we are called from reclaim context.
+ *
+ * This avoids stack overflows when called from deeply used stacks in
+ * random callers for direct reclaim or memcg reclaim. We explicitly
+ * allow reclaim from kswapd as the stack usage there is relatively low.
+ *
+ * This should really be done by the core VM, but until that happens
+ * filesystems like XFS, btrfs and ext4 have to take care of this
+ * by themselves.
+ */
+ if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
+ goto out_fail;
+
+ /*
+ * We need a transaction if there are delalloc or unwritten buffers
+ * on the page.
+ *
+ * If we need a transaction and the process flags say we are already
+ * in a transaction, or no IO is allowed then mark the page dirty
+ * again and leave the page as is.
+ */
+ xfs_count_page_state(page, &delalloc, &unwritten);
+ if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
+ goto out_fail;
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
@@ -1084,50 +1089,33 @@ xfs_page_state_convert(
if (page->index >= end_index) {
if ((page->index >= end_index + 1) ||
!(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
- if (startio)
- unlock_page(page);
+ unlock_page(page);
return 0;
}
}
- /*
- * page_dirty is initially a count of buffers on the page before
- * EOF and is decremented as we move each into a cleanable state.
- *
- * Derivation:
- *
- * End offset is the highest offset that this page should represent.
- * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
- * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
- * hence give us the correct page_dirty count. On any other page,
- * it will be zero and in that case we need page_dirty to be the
- * count of buffers on the page.
- */
end_offset = min_t(unsigned long long,
- (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
+ (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+ offset);
len = 1 << inode->i_blkbits;
- p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
- PAGE_CACHE_SIZE);
- p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
- page_dirty = p_offset / len;
bh = head = page_buffers(page);
offset = page_offset(page);
flags = BMAPI_READ;
type = IO_NEW;
- /* TODO: cleanup count and page_dirty */
-
do {
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
- /*
- * the iomap is actually still valid, but the ioend
- * isn't. shouldn't happen too often.
- */
+
+ /*
+ * A hole may still be marked uptodate because discard_buffer
+ * leaves the flag set.
+ */
+ if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
+ ASSERT(!buffer_dirty(bh));
imap_valid = 0;
continue;
}
@@ -1135,19 +1123,7 @@ xfs_page_state_convert(
if (imap_valid)
imap_valid = xfs_imap_valid(inode, &imap, offset);
- /*
- * First case, map an unwritten extent and prepare for
- * extent state conversion transaction on completion.
- *
- * Second case, allocate space for a delalloc buffer.
- * We can return EAGAIN here in the release page case.
- *
- * Third case, an unmapped buffer was found, and we are
- * in a path where we need to write the whole page out.
- */
- if (buffer_unwritten(bh) || buffer_delay(bh) ||
- ((buffer_uptodate(bh) || PageUptodate(page)) &&
- !buffer_mapped(bh) && (unmapped || startio))) {
+ if (buffer_unwritten(bh) || buffer_delay(bh)) {
int new_ioend = 0;
/*
@@ -1161,15 +1137,16 @@ xfs_page_state_convert(
flags = BMAPI_WRITE | BMAPI_IGNSTATE;
} else if (buffer_delay(bh)) {
type = IO_DELAY;
- flags = BMAPI_ALLOCATE | trylock;
- } else {
- type = IO_NEW;
- flags = BMAPI_WRITE | BMAPI_MMAP;
+ flags = BMAPI_ALLOCATE;
+
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+ wbc->nonblocking)
+ flags |= BMAPI_TRYLOCK;
}
if (!imap_valid) {
/*
- * if we didn't have a valid mapping then we
+ * If we didn't have a valid mapping then we
* need to ensure that we put the new mapping
* in a new ioend structure. This needs to be
* done to ensure that the ioends correctly
@@ -1177,14 +1154,7 @@ xfs_page_state_convert(
* for unwritten extent conversion.
*/
new_ioend = 1;
- if (type == IO_NEW) {
- size = xfs_probe_cluster(inode,
- page, bh, head, 0);
- } else {
- size = len;
- }
-
- err = xfs_map_blocks(inode, offset, size,
+ err = xfs_map_blocks(inode, offset, len,
&imap, flags);
if (err)
goto error;
@@ -1193,19 +1163,11 @@ xfs_page_state_convert(
}
if (imap_valid) {
xfs_map_at_offset(inode, bh, &imap, offset);
- if (startio) {
- xfs_add_to_ioend(inode, bh, offset,
- type, &ioend,
- new_ioend);
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
+ xfs_add_to_ioend(inode, bh, offset, type,
+ &ioend, new_ioend);
count++;
}
- } else if (buffer_uptodate(bh) && startio) {
+ } else if (buffer_uptodate(bh)) {
/*
* we got here because the buffer is already mapped.
* That means it must already have extents allocated
@@ -1213,8 +1175,7 @@ xfs_page_state_convert(
*/
if (!imap_valid || flags != BMAPI_READ) {
flags = BMAPI_READ;
- size = xfs_probe_cluster(inode, page, bh,
- head, 1);
+ size = xfs_probe_cluster(inode, page, bh, head);
err = xfs_map_blocks(inode, offset, size,
&imap, flags);
if (err)
@@ -1233,18 +1194,16 @@ xfs_page_state_convert(
*/
type = IO_NEW;
if (trylock_buffer(bh)) {
- ASSERT(buffer_mapped(bh));
if (imap_valid)
all_bh = 1;
xfs_add_to_ioend(inode, bh, offset, type,
&ioend, !imap_valid);
- page_dirty--;
count++;
} else {
imap_valid = 0;
}
- } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
- (unmapped || startio)) {
+ } else if (PageUptodate(page)) {
+ ASSERT(buffer_mapped(bh));
imap_valid = 0;
}
@@ -1256,8 +1215,7 @@ xfs_page_state_convert(
if (uptodate && bh == head)
SetPageUptodate(page);
- if (startio)
- xfs_start_page_writeback(page, 1, count);
+ xfs_start_page_writeback(page, 1, count);
if (ioend && imap_valid) {
xfs_off_t end_index;
@@ -1275,124 +1233,27 @@ xfs_page_state_convert(
end_index = last_index;
xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
- wbc, startio, all_bh, end_index);
+ wbc, all_bh, end_index);
}
if (iohead)
xfs_submit_ioend(wbc, iohead);
- return page_dirty;
+ return 0;
error:
if (iohead)
xfs_cancel_ioend(iohead);
- /*
- * If it's delalloc and we have nowhere to put it,
- * throw it away, unless the lower layers told
- * us to try again.
- */
- if (err != -EAGAIN) {
- if (!unmapped)
- xfs_aops_discard_page(page);
- ClearPageUptodate(page);
- }
+ xfs_aops_discard_page(page);
+ ClearPageUptodate(page);
+ unlock_page(page);
return err;
-}
-
-/*
- * writepage: Called from one of two places:
- *
- * 1. we are flushing a delalloc buffer head.
- *
- * 2. we are writing out a dirty page. Typically the page dirty
- * state is cleared before we get here. In this case is it
- * conceivable we have no buffer heads.
- *
- * For delalloc space on the page we need to allocate space and
- * flush it. For unmapped buffer heads on the page we should
- * allocate space if the page is uptodate. For any other dirty
- * buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush
- * the page, we have to check the process flags first, if we
- * are already in a transaction or disk I/O during allocations
- * is off, we need to fail the writepage and redirty the page.
- */
-
-STATIC int
-xfs_vm_writepage(
- struct page *page,
- struct writeback_control *wbc)
-{
- int error;
- int need_trans;
- int delalloc, unmapped, unwritten;
- struct inode *inode = page->mapping->host;
-
- trace_xfs_writepage(inode, page, 0);
-
- /*
- * We need a transaction if:
- * 1. There are delalloc buffers on the page
- * 2. The page is uptodate and we have unmapped buffers
- * 3. The page is uptodate and we have no buffers
- * 4. There are unwritten buffers on the page
- */
-
- if (!page_has_buffers(page)) {
- unmapped = 1;
- need_trans = 1;
- } else {
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!PageUptodate(page))
- unmapped = 0;
- need_trans = delalloc + unmapped + unwritten;
- }
-
- /*
- * If we need a transaction and the process flags say
- * we are already in a transaction, or no IO is allowed
- * then mark the page dirty again and leave the page
- * as is.
- */
- if (current_test_flags(PF_FSTRANS) && need_trans)
- goto out_fail;
-
- /*
- * Delay hooking up buffer heads until we have
- * made our go/no-go decision.
- */
- if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
-
-
- /*
- * VM calculation for nr_to_write seems off. Bump it way
- * up, this gets simple streaming writes zippy again.
- * To be reviewed again after Jens' writeback changes.
- */
- wbc->nr_to_write *= 4;
-
- /*
- * Convert delayed allocate, unwritten or unmapped space
- * to real space and flush out to disk.
- */
- error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
- if (error == -EAGAIN)
- goto out_fail;
- if (unlikely(error < 0))
- goto out_unlock;
-
- return 0;
out_fail:
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
-out_unlock:
- unlock_page(page);
- return error;
}
STATIC int
@@ -1406,65 +1267,27 @@ xfs_vm_writepages(
/*
* Called to move a page into cleanable state - and from there
- * to be released. Possibly the page is already clean. We always
+ * to be released. The page should already be clean. We always
* have buffer heads in this call.
*
- * Returns 0 if the page is ok to release, 1 otherwise.
- *
- * Possible scenarios are:
- *
- * 1. We are being called to release a page which has been written
- * to via regular I/O. buffer heads will be dirty and possibly
- * delalloc. If no delalloc buffer heads in this case then we
- * can just return zero.
- *
- * 2. We are called to release a page which has been written via
- * mmap, all we need to do is ensure there is no delalloc
- * state in the buffer heads, if not we can let the caller
- * free them and we should come back later via writepage.
+ * Returns 1 if the page is ok to release, 0 otherwise.
*/
STATIC int
xfs_vm_releasepage(
struct page *page,
gfp_t gfp_mask)
{
- struct inode *inode = page->mapping->host;
- int dirty, delalloc, unmapped, unwritten;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 1,
- };
-
- trace_xfs_releasepage(inode, page, 0);
+ int delalloc, unwritten;
- if (!page_has_buffers(page))
- return 0;
+ trace_xfs_releasepage(page->mapping->host, page, 0);
- xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- if (!delalloc && !unwritten)
- goto free_buffers;
+ xfs_count_page_state(page, &delalloc, &unwritten);
- if (!(gfp_mask & __GFP_FS))
+ if (WARN_ON(delalloc))
return 0;
-
- /* If we are already inside a transaction or the thread cannot
- * do I/O, we cannot release this page.
- */
- if (current_test_flags(PF_FSTRANS))
+ if (WARN_ON(unwritten))
return 0;
- /*
- * Convert delalloc space to real space, do not flush the
- * data out to disk, that will be done by the caller.
- * Never need to allocate space here - we will always
- * come back to writepage in that case.
- */
- dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
- if (dirty == 0 && !unwritten)
- goto free_buffers;
- return 0;
-
-free_buffers:
return try_to_free_buffers(page);
}
@@ -1474,9 +1297,9 @@ __xfs_get_blocks(
sector_t iblock,
struct buffer_head *bh_result,
int create,
- int direct,
- bmapi_flags_t flags)
+ int direct)
{
+ int flags = create ? BMAPI_WRITE : BMAPI_READ;
struct xfs_bmbt_irec imap;
xfs_off_t offset;
ssize_t size;
@@ -1491,8 +1314,11 @@ __xfs_get_blocks(
if (!create && direct && offset >= i_size_read(inode))
return 0;
- error = xfs_iomap(XFS_I(inode), offset, size,
- create ? flags : BMAPI_READ, &imap, &nimap, &new);
+ if (direct && create)
+ flags |= BMAPI_DIRECT;
+
+ error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
+ &new);
if (error)
return -error;
if (nimap == 0)
@@ -1572,8 +1398,7 @@ xfs_get_blocks(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 0, BMAPI_WRITE);
+ return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
}
STATIC int
@@ -1583,61 +1408,59 @@ xfs_get_blocks_direct(
struct buffer_head *bh_result,
int create)
{
- return __xfs_get_blocks(inode, iblock,
- bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+ return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
}
+/*
+ * Complete a direct I/O write request.
+ *
+ * If the private argument is non-NULL __xfs_get_blocks signals us that we
+ * need to issue a transaction to convert the range from unwritten to written
+ * extents. In case this is regular synchronous I/O we just call xfs_end_io
+ * to do this and we are done. But in case this was a successfull AIO
+ * request this handler is called from interrupt context, from which we
+ * can't start transactions. In that case offload the I/O completion to
+ * the workqueues we also use for buffered I/O completion.
+ */
STATIC void
-xfs_end_io_direct(
- struct kiocb *iocb,
- loff_t offset,
- ssize_t size,
- void *private)
+xfs_end_io_direct_write(
+ struct kiocb *iocb,
+ loff_t offset,
+ ssize_t size,
+ void *private,
+ int ret,
+ bool is_async)
{
- xfs_ioend_t *ioend = iocb->private;
+ struct xfs_ioend *ioend = iocb->private;
/*
- * Non-NULL private data means we need to issue a transaction to
- * convert a range from unwritten to written extents. This needs
- * to happen from process context but aio+dio I/O completion
- * happens from irq context so we need to defer it to a workqueue.
- * This is not necessary for synchronous direct I/O, but we do
- * it anyway to keep the code uniform and simpler.
- *
- * Well, if only it were that simple. Because synchronous direct I/O
- * requires extent conversion to occur *before* we return to userspace,
- * we have to wait for extent conversion to complete. Look at the
- * iocb that has been passed to us to determine if this is AIO or
- * not. If it is synchronous, tell xfs_finish_ioend() to kick the
- * workqueue and wait for it to complete.
- *
- * The core direct I/O code might be changed to always call the
- * completion handler in the future, in which case all this can
- * go away.
+ * blockdev_direct_IO can return an error even after the I/O
+ * completion handler was called. Thus we need to protect
+ * against double-freeing.
*/
+ iocb->private = NULL;
+
ioend->io_offset = offset;
ioend->io_size = size;
- if (ioend->io_type == IO_READ) {
- xfs_finish_ioend(ioend, 0);
- } else if (private && size > 0) {
- xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
- } else {
+ if (private && size > 0)
+ ioend->io_type = IO_UNWRITTEN;
+
+ if (is_async) {
/*
- * A direct I/O write ioend starts it's life in unwritten
- * state in case they map an unwritten extent. This write
- * didn't map an unwritten extent so switch it's completion
- * handler.
+ * If we are converting an unwritten extent we need to delay
+ * the AIO completion until after the unwrittent extent
+ * conversion has completed, otherwise do it ASAP.
*/
- ioend->io_type = IO_NEW;
- xfs_finish_ioend(ioend, 0);
+ if (ioend->io_type == IO_UNWRITTEN) {
+ ioend->io_iocb = iocb;
+ ioend->io_result = ret;
+ } else {
+ aio_complete(iocb, ret, 0);
+ }
+ xfs_finish_ioend(ioend);
+ } else {
+ xfs_finish_ioend_sync(ioend);
}
-
- /*
- * blockdev_direct_IO can return an error even after the I/O
- * completion handler was called. Thus we need to protect
- * against double-freeing.
- */
- iocb->private = NULL;
}
STATIC ssize_t
@@ -1648,26 +1471,45 @@ xfs_vm_direct_IO(
loff_t offset,
unsigned long nr_segs)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct block_device *bdev;
- ssize_t ret;
-
- bdev = xfs_find_bdev_for_inode(inode);
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct block_device *bdev = xfs_find_bdev_for_inode(inode);
+ ssize_t ret;
- iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
- IO_UNWRITTEN : IO_READ);
+ if (rw & WRITE) {
+ iocb->private = xfs_alloc_ioend(inode, IO_NEW);
- ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
offset, nr_segs,
xfs_get_blocks_direct,
- xfs_end_io_direct);
+ xfs_end_io_direct_write, NULL, 0);
+ if (ret != -EIOCBQUEUED && iocb->private)
+ xfs_destroy_ioend(iocb->private);
+ } else {
+ ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
+ offset, nr_segs,
+ xfs_get_blocks_direct,
+ NULL, NULL, 0);
+ }
- if (unlikely(ret != -EIOCBQUEUED && iocb->private))
- xfs_destroy_ioend(iocb->private);
return ret;
}
+STATIC void
+xfs_vm_write_failed(
+ struct address_space *mapping,
+ loff_t to)
+{
+ struct inode *inode = mapping->host;
+
+ if (to > inode->i_size) {
+ struct iattr ia = {
+ .ia_valid = ATTR_SIZE | ATTR_FORCE,
+ .ia_size = inode->i_size,
+ };
+ xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK);
+ }
+}
+
STATIC int
xfs_vm_write_begin(
struct file *file,
@@ -1678,9 +1520,31 @@ xfs_vm_write_begin(
struct page **pagep,
void **fsdata)
{
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
- xfs_get_blocks);
+ int ret;
+
+ ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
+ pagep, xfs_get_blocks);
+ if (unlikely(ret))
+ xfs_vm_write_failed(mapping, pos + len);
+ return ret;
+}
+
+STATIC int
+xfs_vm_write_end(
+ struct file *file,
+ struct address_space *mapping,
+ loff_t pos,
+ unsigned len,
+ unsigned copied,
+ struct page *page,
+ void *fsdata)
+{
+ int ret;
+
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+ if (unlikely(ret < len))
+ xfs_vm_write_failed(mapping, pos + len);
+ return ret;
}
STATIC sector_t
@@ -1691,7 +1555,7 @@ xfs_vm_bmap(
struct inode *inode = (struct inode *)mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- xfs_itrace_entry(XFS_I(inode));
+ trace_xfs_vm_bmap(XFS_I(inode));
xfs_ilock(ip, XFS_IOLOCK_SHARED);
xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -1725,7 +1589,7 @@ const struct address_space_operations xfs_address_space_operations = {
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
.write_begin = xfs_vm_write_begin,
- .write_end = generic_write_end,
+ .write_end = xfs_vm_write_end,
.bmap = xfs_vm_bmap,
.direct_IO = xfs_vm_direct_IO,
.migratepage = buffer_migrate_page,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 4cfc6ea87df..c5057fb6237 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -37,6 +37,8 @@ typedef struct xfs_ioend {
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
+ struct kiocb *io_iocb;
+ int io_result;
} xfs_ioend_t;
extern const struct address_space_operations xfs_address_space_operations;
@@ -45,6 +47,6 @@ extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
extern void xfs_ioend_init(void);
extern void xfs_ioend_wait(struct xfs_inode *);
-extern void xfs_count_page_state(struct page *, int *, int *, int *);
+extern void xfs_count_page_state(struct page *, int *, int *);
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 649ade8ef59..ea79072f521 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -39,13 +39,12 @@
#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
static kmem_zone_t *xfs_buf_zone;
STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(int, gfp_t);
+STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
static struct shrinker xfs_buf_shake = {
.shrink = xfsbufd_wakeup,
@@ -340,7 +339,7 @@ _xfs_buf_lookup_pages(
__func__, gfp_mask);
XFS_STATS_INC(xb_page_retries);
- xfsbufd_wakeup(0, gfp_mask);
+ xfsbufd_wakeup(NULL, 0, gfp_mask);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
}
@@ -579,9 +578,9 @@ _xfs_buf_read(
XBF_READ_AHEAD | _XBF_RUN_QUEUES);
status = xfs_buf_iorequest(bp);
- if (!status && !(flags & XBF_ASYNC))
- status = xfs_buf_iowait(bp);
- return status;
+ if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
+ return status;
+ return xfs_buf_iowait(bp);
}
xfs_buf_t *
@@ -897,36 +896,6 @@ xfs_buf_unlock(
trace_xfs_buf_unlock(bp, _RET_IP_);
}
-
-/*
- * Pinning Buffer Storage in Memory
- * Ensure that no attempt to force a buffer to disk will succeed.
- */
-void
-xfs_buf_pin(
- xfs_buf_t *bp)
-{
- trace_xfs_buf_pin(bp, _RET_IP_);
- atomic_inc(&bp->b_pin_count);
-}
-
-void
-xfs_buf_unpin(
- xfs_buf_t *bp)
-{
- trace_xfs_buf_unpin(bp, _RET_IP_);
-
- if (atomic_dec_and_test(&bp->b_pin_count))
- wake_up_all(&bp->b_waiters);
-}
-
-int
-xfs_buf_ispin(
- xfs_buf_t *bp)
-{
- return atomic_read(&bp->b_pin_count);
-}
-
STATIC void
xfs_buf_wait_unpin(
xfs_buf_t *bp)
@@ -1018,13 +987,12 @@ xfs_bwrite(
{
int error;
- bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
xfs_buf_delwri_dequeue(bp);
- xfs_buf_iostrategy(bp);
+ xfs_bdstrat_cb(bp);
error = xfs_buf_iowait(bp);
if (error)
@@ -1040,7 +1008,6 @@ xfs_bdwrite(
{
trace_xfs_buf_bdwrite(bp, _RET_IP_);
- bp->b_strat = xfs_bdstrat_cb;
bp->b_mount = mp;
bp->b_flags &= ~XBF_READ;
@@ -1075,7 +1042,6 @@ xfs_bioerror(
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
xfs_biodone(bp);
return EIO;
@@ -1105,7 +1071,6 @@ xfs_bioerror_relse(
XFS_BUF_DONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_CLR_IODONE_FUNC(bp);
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
if (!(fl & XBF_ASYNC)) {
/*
* Mark b_error and B_ERROR _both_.
@@ -1311,8 +1276,19 @@ submit_io:
if (size)
goto next_chunk;
} else {
- bio_put(bio);
+ /*
+ * if we get here, no pages were added to the bio. However,
+ * we can't just error out here - if the pages are locked then
+ * we have to unlock them otherwise we can hang on a later
+ * access to the page.
+ */
xfs_buf_ioerror(bp, EIO);
+ if (bp->b_flags & _XBF_PAGE_LOCKED) {
+ int i;
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
+ }
+ bio_put(bio);
}
}
@@ -1762,6 +1738,7 @@ xfs_buf_runall_queues(
STATIC int
xfsbufd_wakeup(
+ struct shrinker *shrink,
int priority,
gfp_t mask)
{
@@ -1803,7 +1780,7 @@ xfs_buf_delwri_split(
trace_xfs_buf_delwri_split(bp, _RET_IP_);
ASSERT(bp->b_flags & XBF_DELWRI);
- if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
+ if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
if (!force &&
time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp);
@@ -1888,7 +1865,7 @@ xfsbufd(
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
- xfs_buf_iostrategy(bp);
+ xfs_bdstrat_cb(bp);
count++;
}
if (count)
@@ -1935,7 +1912,7 @@ xfs_flush_buftarg(
bp->b_flags &= ~XBF_ASYNC;
list_add(&bp->b_list, &wait_list);
}
- xfs_buf_iostrategy(bp);
+ xfs_bdstrat_cb(bp);
}
if (wait) {
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 5fbecefa5df..d072e5ff923 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -44,57 +44,57 @@ typedef enum {
XBRW_ZERO = 3, /* Zero target memory */
} xfs_buf_rw_t;
-typedef enum {
- XBF_READ = (1 << 0), /* buffer intended for reading from device */
- XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
- XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
- XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
- XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
- XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
- XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
- XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
- XBF_ORDERED = (1 << 11), /* use ordered writes */
- XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
- XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */
-
- /* flags used only as arguments to access routines */
- XBF_LOCK = (1 << 14), /* lock requested */
- XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
- XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
-
- /* flags used only internally */
- _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
- _XBF_PAGES = (1 << 18), /* backed by refcounted pages */
- _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
- _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
-
- /*
- * Special flag for supporting metadata blocks smaller than a FSB.
- *
- * In this case we can have multiple xfs_buf_t on a single page and
- * need to lock out concurrent xfs_buf_t readers as they only
- * serialise access to the buffer.
- *
- * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
- * between reads of the page. Hence we can have one thread read the
- * page and modify it, but then race with another thread that thinks
- * the page is not up-to-date and hence reads it again.
- *
- * The result is that the first modifcation to the page is lost.
- * This sort of AGF/AGI reading race can happen when unlinking inodes
- * that require truncation and results in the AGI unlinked list
- * modifications being lost.
- */
- _XBF_PAGE_LOCKED = (1 << 22),
-
- /*
- * If we try a barrier write, but it fails we have to communicate
- * this to the upper layers. Unfortunately b_error gets overwritten
- * when the buffer is re-issued so we have to add another flag to
- * keep this information.
- */
- _XFS_BARRIER_FAILED = (1 << 23),
-} xfs_buf_flags_t;
+#define XBF_READ (1 << 0) /* buffer intended for reading from device */
+#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
+#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */
+#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
+#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
+#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
+#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
+#define XBF_FS_MANAGED (1 << 8) /* filesystem controls freeing memory */
+#define XBF_ORDERED (1 << 11)/* use ordered writes */
+#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
+#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
+
+/* flags used only as arguments to access routines */
+#define XBF_LOCK (1 << 14)/* lock requested */
+#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */
+#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
+
+/* flags used only internally */
+#define _XBF_PAGE_CACHE (1 << 17)/* backed by pagecache */
+#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
+#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
+#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
+
+/*
+ * Special flag for supporting metadata blocks smaller than a FSB.
+ *
+ * In this case we can have multiple xfs_buf_t on a single page and
+ * need to lock out concurrent xfs_buf_t readers as they only
+ * serialise access to the buffer.
+ *
+ * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
+ * between reads of the page. Hence we can have one thread read the
+ * page and modify it, but then race with another thread that thinks
+ * the page is not up-to-date and hence reads it again.
+ *
+ * The result is that the first modifcation to the page is lost.
+ * This sort of AGF/AGI reading race can happen when unlinking inodes
+ * that require truncation and results in the AGI unlinked list
+ * modifications being lost.
+ */
+#define _XBF_PAGE_LOCKED (1 << 22)
+
+/*
+ * If we try a barrier write, but it fails we have to communicate
+ * this to the upper layers. Unfortunately b_error gets overwritten
+ * when the buffer is re-issued so we have to add another flag to
+ * keep this information.
+ */
+#define _XFS_BARRIER_FAILED (1 << 23)
+
+typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
{ XBF_READ, "READ" }, \
@@ -187,7 +187,6 @@ typedef struct xfs_buf {
atomic_t b_io_remaining; /* #outstanding I/O requests */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
xfs_buf_relse_t b_relse; /* releasing function */
- xfs_buf_bdstrat_t b_strat; /* pre-write function */
struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
void *b_fspriv2;
@@ -245,11 +244,6 @@ extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
-static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
-{
- return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
-}
-
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return bp ? bp->b_error : ENOMEM;
@@ -258,11 +252,6 @@ static inline int xfs_buf_geterror(xfs_buf_t *bp)
/* Buffer Utility Routines */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
-/* Pinning Buffer Storage in Memory */
-extern void xfs_buf_pin(xfs_buf_t *);
-extern void xfs_buf_unpin(xfs_buf_t *);
-extern int xfs_buf_ispin(xfs_buf_t *);
-
/* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
extern void xfs_buf_delwri_promote(xfs_buf_t *);
@@ -326,8 +315,6 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
-#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
-#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
@@ -351,7 +338,7 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
-#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
+#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
@@ -370,8 +357,6 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
xfs_buf_rele(bp);
}
-#define xfs_bpin(bp) xfs_buf_pin(bp)
-#define xfs_bunpin(bp) xfs_buf_unpin(bp)
#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
#define xfs_biomove(bp, off, len, data, rw) \
diff --git a/fs/xfs/linux-2.6/xfs_dmapi_priv.h b/fs/xfs/linux-2.6/xfs_dmapi_priv.h
deleted file mode 100644
index a8b0b1685ee..00000000000
--- a/fs/xfs/linux-2.6/xfs_dmapi_priv.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2000-2006 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DMAPI_PRIV_H__
-#define __XFS_DMAPI_PRIV_H__
-
-/*
- * Based on IO_ISDIRECT, decide which i_ flag is set.
- */
-#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_IMUX : 0)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
-
-#endif /*__XFS_DMAPI_PRIV_H__*/
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 846b75aeb2a..3764d74790e 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -23,13 +23,13 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_export.h"
#include "xfs_vnodeops.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
+#include "xfs_trace.h"
/*
* Note that we only accept fileids which are long enough rather than allow
@@ -128,13 +128,11 @@ xfs_nfs_get_inode(
return ERR_PTR(-ESTALE);
/*
- * The XFS_IGET_BULKSTAT means that an invalid inode number is just
- * fine and not an indication of a corrupted filesystem. Because
- * clients can send any kind of invalid file handle, e.g. after
- * a restore on the server we have to deal with this case gracefully.
+ * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
+ * fine and not an indication of a corrupted filesystem as clients can
+ * send invalid file handles and we have to handle it gracefully..
*/
- error = xfs_iget(mp, NULL, ino, XFS_IGET_BULKSTAT,
- XFS_ILOCK_SHARED, &ip, 0);
+ error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
if (error) {
/*
* EINVAL means the inode cluster doesn't exist anymore.
@@ -149,11 +147,10 @@ xfs_nfs_get_inode(
}
if (ip->i_d.di_gen != generation) {
- xfs_iput_new(ip, XFS_ILOCK_SHARED);
+ IRELE(ip);
return ERR_PTR(-ENOENT);
}
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
return VFS_I(ip);
}
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 257a56b127c..ba8ad422a16 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -22,23 +22,15 @@
#include "xfs_inum.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_trans.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_vnodeops.h"
#include "xfs_da_btree.h"
#include "xfs_ioctl.h"
@@ -108,7 +100,7 @@ xfs_file_fsync(
int error = 0;
int log_flushed = 0;
- xfs_itrace_entry(ip);
+ trace_xfs_file_fsync(ip);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -XFS_ERROR(EIO);
@@ -166,8 +158,7 @@ xfs_file_fsync(
* transaction. So we play it safe and fire off the
* transaction anyway.
*/
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
error = _xfs_trans_commit(tp, 0, &log_flushed);
@@ -275,20 +266,6 @@ xfs_file_aio_read(
mutex_lock(&inode->i_mutex);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
- int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
- int iolock = XFS_IOLOCK_SHARED;
-
- ret = -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, iocb->ki_pos, size,
- dmflags, &iolock);
- if (ret) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- if (unlikely(ioflags & IO_ISDIRECT))
- mutex_unlock(&inode->i_mutex);
- return ret;
- }
- }
-
if (unlikely(ioflags & IO_ISDIRECT)) {
if (inode->i_mapping->nrpages) {
ret = -xfs_flushinval_pages(ip,
@@ -321,7 +298,6 @@ xfs_file_splice_read(
unsigned int flags)
{
struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
- struct xfs_mount *mp = ip->i_mount;
int ioflags = 0;
ssize_t ret;
@@ -335,18 +311,6 @@ xfs_file_splice_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
- if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) {
- int iolock = XFS_IOLOCK_SHARED;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count,
- FILP_DELAY_FLAG(infilp), &iolock);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- return -error;
- }
- }
-
trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
@@ -367,7 +331,6 @@ xfs_file_splice_write(
{
struct inode *inode = outfilp->f_mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
xfs_fsize_t isize, new_size;
int ioflags = 0;
ssize_t ret;
@@ -382,18 +345,6 @@ xfs_file_splice_write(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) && !(ioflags & IO_INVIS)) {
- int iolock = XFS_IOLOCK_EXCL;
- int error;
-
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, *ppos, count,
- FILP_DELAY_FLAG(outfilp), &iolock);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return -error;
- }
- }
-
new_size = *ppos + count;
xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -463,7 +414,7 @@ xfs_zero_last_block(
last_fsb = XFS_B_TO_FSBT(mp, isize);
nimaps = 1;
error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
- &nimaps, NULL, NULL);
+ &nimaps, NULL);
if (error) {
return error;
}
@@ -558,7 +509,7 @@ xfs_zero_eof(
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
- 0, NULL, 0, &imap, &nimaps, NULL, NULL);
+ 0, NULL, 0, &imap, &nimaps, NULL);
if (error) {
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
return error;
@@ -627,7 +578,6 @@ xfs_file_aio_write(
int ioflags = 0;
xfs_fsize_t isize, new_size;
int iolock;
- int eventsent = 0;
size_t ocount = 0, count;
int need_i_mutex;
@@ -673,33 +623,6 @@ start:
goto out_unlock_mutex;
}
- if ((DM_EVENT_ENABLED(ip, DM_EVENT_WRITE) &&
- !(ioflags & IO_INVIS) && !eventsent)) {
- int dmflags = FILP_DELAY_FLAG(file);
-
- if (need_i_mutex)
- dmflags |= DM_FLAGS_IMUX;
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- error = XFS_SEND_DATA(ip->i_mount, DM_EVENT_WRITE, ip,
- pos, count, dmflags, &iolock);
- if (error) {
- goto out_unlock_internal;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- eventsent = 1;
-
- /*
- * The iolock was dropped and reacquired in XFS_SEND_DATA
- * so we have to recheck the size when appending.
- * We will only "goto start;" once, since having sent the
- * event prevents another call to XFS_SEND_DATA, which is
- * what allows the size to change in the first place.
- */
- if ((file->f_flags & O_APPEND) && pos != ip->i_size)
- goto start;
- }
-
if (ioflags & IO_ISDIRECT) {
xfs_buftarg_t *target =
XFS_IS_REALTIME_INODE(ip) ?
@@ -830,22 +753,6 @@ write_retry:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- if (ret == -ENOSPC &&
- DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) {
- xfs_iunlock(ip, iolock);
- if (need_i_mutex)
- mutex_unlock(&inode->i_mutex);
- error = XFS_SEND_NAMESP(ip->i_mount, DM_EVENT_NOSPACE, ip,
- DM_RIGHT_NULL, ip, DM_RIGHT_NULL, NULL, NULL,
- 0, 0, 0); /* Delay flag intentionally unused */
- if (need_i_mutex)
- mutex_lock(&inode->i_mutex);
- xfs_ilock(ip, iolock);
- if (error)
- goto out_unlock_internal;
- goto start;
- }
-
error = -ret;
if (ret <= 0)
goto out_unlock_internal;
@@ -1014,9 +921,6 @@ const struct file_operations xfs_file_operations = {
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
-#ifdef HAVE_FOP_OPEN_EXEC
- .open_exec = xfs_file_open_exec,
-#endif
};
const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index b6918d76bc7..1f279b012f9 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -21,10 +21,6 @@
#include "xfs_inode.h"
#include "xfs_trace.h"
-int fs_noerr(void) { return 0; }
-int fs_nosys(void) { return ENOSYS; }
-void fs_noval(void) { return; }
-
/*
* note: all filemap functions return negative error codes. These
* need to be inverted before returning to the xfs core functions.
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.h b/fs/xfs/linux-2.6/xfs_fs_subr.h
deleted file mode 100644
index 82bb19b2599..00000000000
--- a/fs/xfs/linux-2.6/xfs_fs_subr.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_FS_SUBR_H__
-#define __XFS_FS_SUBR_H__
-
-extern int fs_noerr(void);
-extern int fs_nosys(void);
-extern void fs_noval(void);
-
-#endif /* __XFS_FS_SUBR_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 699b60cbab9..237f5ffb2ee 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -23,24 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ioctl.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_bmap.h"
#include "xfs_buf_item.h"
@@ -679,10 +670,9 @@ xfs_ioc_bulkstat(
error = xfs_bulkstat_single(mp, &inlast,
bulkreq.ubuffer, &done);
else /* XFS_IOC_FSBULKSTAT */
- error = xfs_bulkstat(mp, &inlast, &count,
- (bulkstat_one_pf)xfs_bulkstat_one, NULL,
- sizeof(xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
+ error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one,
+ sizeof(xfs_bstat_t), bulkreq.ubuffer,
+ &done);
if (error)
return -error;
@@ -909,7 +899,7 @@ xfs_ioctl_setattr(
struct xfs_dquot *olddquot = NULL;
int code;
- xfs_itrace_entry(ip);
+ trace_xfs_ioctl_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
@@ -1044,8 +1034,7 @@ xfs_ioctl_setattr(
}
}
- xfs_trans_ijoin(tp, ip, lock_flags);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* Change file ownership. Must be the owner or privileged.
@@ -1117,16 +1106,7 @@ xfs_ioctl_setattr(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
- if (code)
- return code;
-
- if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE)) {
- XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
- NULL, DM_RIGHT_NULL, NULL, NULL, 0, 0,
- (mask & FSX_NONBLOCK) ? DM_FLAGS_NDELAY : 0);
- }
-
- return 0;
+ return code;
error_return:
xfs_qm_dqrele(udqp);
@@ -1302,7 +1282,7 @@ xfs_file_ioctl(
if (filp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
- xfs_itrace_entry(ip);
+ trace_xfs_file_ioctl(ip);
switch (cmd) {
case XFS_IOC_ALLOCSP:
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 9287135e9bf..6c83f7f62dc 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -28,12 +28,8 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
#include "xfs_vnode.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -237,15 +233,12 @@ xfs_bulkstat_one_compat(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
- void *private_data, /* my private data */
- xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt_compat, bno,
- ubused, dibuff, stat);
+ xfs_bulkstat_one_fmt_compat,
+ ubused, stat);
}
/* copied from xfs_ioctl.c */
@@ -298,13 +291,11 @@ xfs_compat_ioc_bulkstat(
int res;
error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
- sizeof(compat_xfs_bstat_t),
- NULL, 0, NULL, NULL, &res);
+ sizeof(compat_xfs_bstat_t), 0, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one_compat, NULL,
- sizeof(compat_xfs_bstat_t), bulkreq.ubuffer,
- BULKSTAT_FG_QUICK, &done);
+ xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
+ bulkreq.ubuffer, &done);
} else
error = XFS_ERROR(EINVAL);
if (error)
@@ -549,7 +540,7 @@ xfs_file_compat_ioctl(
if (filp->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
- xfs_itrace_entry(ip);
+ trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
/* No size or alignment issues on any arch */
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 9c8019c78c9..68be25dcd30 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -24,21 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
@@ -88,7 +80,7 @@ xfs_mark_inode_dirty_sync(
{
struct inode *inode = VFS_I(ip);
- if (!(inode->i_state & (I_WILL_FREE|I_FREEING|I_CLEAR)))
+ if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
mark_inode_dirty_sync(inode);
}
@@ -98,7 +90,7 @@ xfs_mark_inode_dirty(
{
struct inode *inode = VFS_I(ip);
- if (!(inode->i_state & (I_WILL_FREE|I_FREEING|I_CLEAR)))
+ if (!(inode->i_state & (I_WILL_FREE|I_FREEING)))
mark_inode_dirty(inode);
}
@@ -496,7 +488,7 @@ xfs_vn_getattr(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- xfs_itrace_entry(ip);
+ trace_xfs_getattr(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -548,21 +540,6 @@ xfs_vn_setattr(
return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
}
-/*
- * block_truncate_page can return an error, but we can't propagate it
- * at all here. Leave a complaint + stack trace in the syslog because
- * this could be bad. If it is bad, we need to propagate the error further.
- */
-STATIC void
-xfs_vn_truncate(
- struct inode *inode)
-{
- int error;
- error = block_truncate_page(inode->i_mapping, inode->i_size,
- xfs_get_blocks);
- WARN_ON(error);
-}
-
STATIC long
xfs_vn_fallocate(
struct inode *inode,
@@ -585,11 +562,20 @@ xfs_vn_fallocate(
bf.l_len = len;
xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+ /* check the new inode size is valid before allocating */
+ if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+ offset + len > i_size_read(inode)) {
+ new_size = offset + len;
+ error = inode_newsize_ok(inode, new_size);
+ if (error)
+ goto out_unlock;
+ }
+
error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
0, XFS_ATTR_NOLOCK);
- if (!error && !(mode & FALLOC_FL_KEEP_SIZE) &&
- offset + len > i_size_read(inode))
- new_size = offset + len;
+ if (error)
+ goto out_unlock;
/* Change file size if needed */
if (new_size) {
@@ -600,6 +586,7 @@ xfs_vn_fallocate(
error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
}
+out_unlock:
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
out_error:
return error;
@@ -692,7 +679,6 @@ xfs_vn_fiemap(
static const struct inode_operations xfs_inode_operations = {
.check_acl = xfs_check_acl,
- .truncate = xfs_vn_truncate,
.getattr = xfs_vn_getattr,
.setattr = xfs_vn_setattr,
.setxattr = generic_setxattr,
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index facfb323a70..2fa0bd9ebc7 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -87,7 +87,6 @@
#include <xfs_aops.h>
#include <xfs_super.h>
#include <xfs_globals.h>
-#include <xfs_fs_subr.h>
#include <xfs_buf.h>
/*
@@ -157,8 +156,6 @@
*/
#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack()
-#define xfs_itruncate_data(ip, off) \
- (-vmtruncate(VFS_I(ip), (off)))
/* Move the kernel do_div definition off to one side */
diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c
index 9ac8aea9152..29b9d642e93 100644
--- a/fs/xfs/linux-2.6/xfs_quotaops.c
+++ b/fs/xfs/linux-2.6/xfs_quotaops.c
@@ -16,14 +16,12 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
-#include "xfs_dmapi.h"
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_quota.h"
-#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_bmap_btree.h"
#include "xfs_inode.h"
@@ -70,15 +68,15 @@ xfs_fs_set_xstate(
if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
return -ENOSYS;
- if (uflags & XFS_QUOTA_UDQ_ACCT)
+ if (uflags & FS_QUOTA_UDQ_ACCT)
flags |= XFS_UQUOTA_ACCT;
- if (uflags & XFS_QUOTA_PDQ_ACCT)
+ if (uflags & FS_QUOTA_PDQ_ACCT)
flags |= XFS_PQUOTA_ACCT;
- if (uflags & XFS_QUOTA_GDQ_ACCT)
+ if (uflags & FS_QUOTA_GDQ_ACCT)
flags |= XFS_GQUOTA_ACCT;
- if (uflags & XFS_QUOTA_UDQ_ENFD)
+ if (uflags & FS_QUOTA_UDQ_ENFD)
flags |= XFS_UQUOTA_ENFD;
- if (uflags & (XFS_QUOTA_PDQ_ENFD|XFS_QUOTA_GDQ_ENFD))
+ if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD))
flags |= XFS_OQUOTA_ENFD;
switch (op) {
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f2d1718c916..15c35b62ff1 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -25,14 +25,11 @@
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -43,7 +40,6 @@
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_fsops.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
@@ -94,7 +90,6 @@ mempool_t *xfs_ioend_pool;
#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
* unwritten extent conversion */
#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
-#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
@@ -116,9 +111,6 @@ mempool_t *xfs_ioend_pool;
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
-#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
-#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */
#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
@@ -172,15 +164,13 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
STATIC int
xfs_parseargs(
struct xfs_mount *mp,
- char *options,
- char **mtpt)
+ char *options)
{
struct super_block *sb = mp->m_super;
char *this_char, *value, *eov;
int dsunit = 0;
int dswidth = 0;
int iosize = 0;
- int dmapi_implies_ikeep = 1;
__uint8_t iosizelog = 0;
/*
@@ -243,15 +233,10 @@ xfs_parseargs(
if (!mp->m_logname)
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_MTPT)) {
- if (!value || !*value) {
- cmn_err(CE_WARN,
- "XFS: %s option requires an argument",
- this_char);
- return EINVAL;
- }
- *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
- if (!*mtpt)
- return ENOMEM;
+ cmn_err(CE_WARN,
+ "XFS: %s option not allowed on this system",
+ this_char);
+ return EINVAL;
} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
if (!value || !*value) {
cmn_err(CE_WARN,
@@ -288,8 +273,6 @@ xfs_parseargs(
mp->m_flags &= ~XFS_MOUNT_GRPID;
} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
mp->m_flags |= XFS_MOUNT_WSYNC;
- } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
- mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
mp->m_flags |= XFS_MOUNT_NORECOVERY;
} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
@@ -329,7 +312,6 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
mp->m_flags |= XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
- dmapi_implies_ikeep = 0;
mp->m_flags &= ~XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
@@ -370,12 +352,6 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_OQUOTA_ENFD;
- } else if (!strcmp(this_char, MNTOPT_DMAPI)) {
- mp->m_flags |= XFS_MOUNT_DMAPI;
- } else if (!strcmp(this_char, MNTOPT_XDSM)) {
- mp->m_flags |= XFS_MOUNT_DMAPI;
- } else if (!strcmp(this_char, MNTOPT_DMI)) {
- mp->m_flags |= XFS_MOUNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
mp->m_flags |= XFS_MOUNT_DELAYLOG;
cmn_err(CE_WARN,
@@ -387,9 +363,11 @@ xfs_parseargs(
cmn_err(CE_WARN,
"XFS: ihashsize no longer used, option is deprecated.");
} else if (!strcmp(this_char, "osyncisdsync")) {
- /* no-op, this is now the default */
cmn_err(CE_WARN,
- "XFS: osyncisdsync is now the default, option is deprecated.");
+ "XFS: osyncisdsync has no effect, option is deprecated.");
+ } else if (!strcmp(this_char, "osyncisosync")) {
+ cmn_err(CE_WARN,
+ "XFS: osyncisosync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "irixsgid")) {
cmn_err(CE_WARN,
"XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
@@ -430,12 +408,6 @@ xfs_parseargs(
return EINVAL;
}
- if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) {
- printk("XFS: %s option needs the mount point option as well\n",
- MNTOPT_DMAPI);
- return EINVAL;
- }
-
if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
cmn_err(CE_WARN,
"XFS: sunit and swidth must be specified together");
@@ -449,18 +421,6 @@ xfs_parseargs(
return EINVAL;
}
- /*
- * Applications using DMI filesystems often expect the
- * inode generation number to be monotonically increasing.
- * If we delete inode chunks we break this assumption, so
- * keep unused inode chunks on disk for DMI filesystems
- * until we come up with a better solution.
- * Note that if "ikeep" or "noikeep" mount options are
- * supplied, then they are honored.
- */
- if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep)
- mp->m_flags |= XFS_MOUNT_IKEEP;
-
done:
if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
/*
@@ -539,10 +499,8 @@ xfs_showargs(
{ XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
{ XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
- { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
{ XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
{ XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
- { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI },
{ XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
{ XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
{ 0, NULL }
@@ -947,7 +905,7 @@ xfs_fs_destroy_inode(
{
struct xfs_inode *ip = XFS_I(inode);
- xfs_itrace_entry(ip);
+ trace_xfs_destroy_inode(ip);
XFS_STATS_INC(vn_reclaim);
@@ -1063,10 +1021,8 @@ xfs_log_inode(
* an inode in another recent transaction. So we play it safe and
* fire off the transaction anyway.
*/
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
@@ -1082,27 +1038,18 @@ xfs_fs_write_inode(
struct xfs_mount *mp = ip->i_mount;
int error = EAGAIN;
- xfs_itrace_entry(ip);
+ trace_xfs_write_inode(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
if (wbc->sync_mode == WB_SYNC_ALL) {
/*
- * Make sure the inode has hit stable storage. By using the
- * log and the fsync transactions we reduce the IOs we have
- * to do here from two (log and inode) to just the log.
- *
- * Note: We still need to do a delwri write of the inode after
- * this to flush it to the backing buffer so that bulkstat
- * works properly if this is the first time the inode has been
- * written. Because we hold the ilock atomically over the
- * transaction commit and the inode flush we are guaranteed
- * that the inode is not pinned when it returns. If the flush
- * lock is already held, then the inode has already been
- * flushed once and we don't need to flush it again. Hence
- * the code will only flush the inode if it isn't already
- * being flushed.
+ * Make sure the inode has made it it into the log. Instead
+ * of forcing it all the way to stable storage using a
+ * synchronous transaction we let the log force inside the
+ * ->sync_fs call do that for thus, which reduces the number
+ * of synchronous log foces dramatically.
*/
xfs_ioend_wait(ip);
xfs_ilock(ip, XFS_ILOCK_SHARED);
@@ -1116,27 +1063,29 @@ xfs_fs_write_inode(
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
* This prevents the flush path from blocking on inodes inside
- * another operation right now, they get caught later by xfs_sync.
+ * another operation right now, they get caught later by
+ * xfs_sync.
*/
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
goto out;
- }
- if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
- goto out_unlock;
+ if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
+ goto out_unlock;
- /*
- * Now we have the flush lock and the inode is not pinned, we can check
- * if the inode is really clean as we know that there are no pending
- * transaction completions, it is not waiting on the delayed write
- * queue and there is no IO in progress.
- */
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- error = 0;
- goto out_unlock;
+ /*
+ * Now we have the flush lock and the inode is not pinned, we
+ * can check if the inode is really clean as we know that
+ * there are no pending transaction completions, it is not
+ * waiting on the delayed write queue and there is no IO in
+ * progress.
+ */
+ if (xfs_inode_clean(ip)) {
+ xfs_ifunlock(ip);
+ error = 0;
+ goto out_unlock;
+ }
+ error = xfs_iflush(ip, 0);
}
- error = xfs_iflush(ip, 0);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1151,12 +1100,15 @@ xfs_fs_write_inode(
}
STATIC void
-xfs_fs_clear_inode(
+xfs_fs_evict_inode(
struct inode *inode)
{
xfs_inode_t *ip = XFS_I(inode);
- xfs_itrace_entry(ip);
+ trace_xfs_evict_inode(ip);
+
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
@@ -1193,22 +1145,13 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
+ /*
+ * Unregister the memory shrinker before we tear down the mount
+ * structure so we don't have memory reclaim racing with us here.
+ */
+ xfs_inode_shrinker_unregister(mp);
xfs_syncd_stop(mp);
- if (!(sb->s_flags & MS_RDONLY)) {
- /*
- * XXX(hch): this should be SYNC_WAIT.
- *
- * Or more likely not needed at all because the VFS is already
- * calling ->sync_fs after shutting down all filestem
- * operations and just before calling ->put_super.
- */
- xfs_sync_data(mp, 0);
- xfs_sync_attr(mp, 0);
- }
-
- XFS_SEND_PREUNMOUNT(mp);
-
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
@@ -1218,14 +1161,10 @@ xfs_fs_put_super(
XFS_bflush(mp->m_ddev_targp);
- XFS_SEND_UNMOUNT(mp);
-
xfs_unmountfs(mp);
xfs_freesb(mp);
- xfs_inode_shrinker_unregister(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
- xfs_dmops_put(mp);
xfs_free_fsname(mp);
kfree(mp);
}
@@ -1543,7 +1482,6 @@ xfs_fs_fill_super(
struct inode *root;
struct xfs_mount *mp = NULL;
int flags = 0, error = ENOMEM;
- char *mtpt = NULL;
mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
if (!mp)
@@ -1559,7 +1497,7 @@ xfs_fs_fill_super(
mp->m_super = sb;
sb->s_fs_info = mp;
- error = xfs_parseargs(mp, (char *)data, &mtpt);
+ error = xfs_parseargs(mp, (char *)data);
if (error)
goto out_free_fsname;
@@ -1571,16 +1509,12 @@ xfs_fs_fill_super(
#endif
sb->s_op = &xfs_super_operations;
- error = xfs_dmops_get(mp);
- if (error)
- goto out_free_fsname;
-
if (silent)
flags |= XFS_MFSI_QUIET;
error = xfs_open_devices(mp);
if (error)
- goto out_put_dmops;
+ goto out_free_fsname;
if (xfs_icsb_init_counters(mp))
mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
@@ -1608,8 +1542,6 @@ xfs_fs_fill_super(
if (error)
goto out_filestream_unmount;
- XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
-
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
@@ -1638,7 +1570,6 @@ xfs_fs_fill_super(
xfs_inode_shrinker_register(mp);
- kfree(mtpt);
return 0;
out_filestream_unmount:
@@ -1648,11 +1579,8 @@ xfs_fs_fill_super(
out_destroy_counters:
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
- out_put_dmops:
- xfs_dmops_put(mp);
out_free_fsname:
xfs_free_fsname(mp);
- kfree(mtpt);
kfree(mp);
out:
return -error;
@@ -1696,7 +1624,7 @@ static const struct super_operations xfs_super_operations = {
.destroy_inode = xfs_fs_destroy_inode,
.dirty_inode = xfs_fs_dirty_inode,
.write_inode = xfs_fs_write_inode,
- .clear_inode = xfs_fs_clear_inode,
+ .evict_inode = xfs_fs_evict_inode,
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
@@ -1759,6 +1687,12 @@ xfs_init_zones(void)
if (!xfs_trans_zone)
goto out_destroy_ifork_zone;
+ xfs_log_item_desc_zone =
+ kmem_zone_init(sizeof(struct xfs_log_item_desc),
+ "xfs_log_item_desc");
+ if (!xfs_log_item_desc_zone)
+ goto out_destroy_trans_zone;
+
/*
* The size of the zone allocated buf log item is the maximum
* size possible under XFS. This wastes a little bit of memory,
@@ -1768,7 +1702,7 @@ xfs_init_zones(void)
(((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
NBWORD) * sizeof(int))), "xfs_buf_item");
if (!xfs_buf_item_zone)
- goto out_destroy_trans_zone;
+ goto out_destroy_log_item_desc_zone;
xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
@@ -1805,6 +1739,8 @@ xfs_init_zones(void)
kmem_zone_destroy(xfs_efd_zone);
out_destroy_buf_item_zone:
kmem_zone_destroy(xfs_buf_item_zone);
+ out_destroy_log_item_desc_zone:
+ kmem_zone_destroy(xfs_log_item_desc_zone);
out_destroy_trans_zone:
kmem_zone_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
@@ -1835,6 +1771,7 @@ xfs_destroy_zones(void)
kmem_zone_destroy(xfs_efi_zone);
kmem_zone_destroy(xfs_efd_zone);
kmem_zone_destroy(xfs_buf_item_zone);
+ kmem_zone_destroy(xfs_log_item_desc_zone);
kmem_zone_destroy(xfs_trans_zone);
kmem_zone_destroy(xfs_ifork_zone);
kmem_zone_destroy(xfs_dabuf_zone);
@@ -1883,7 +1820,6 @@ init_xfs_fs(void)
goto out_cleanup_procfs;
vfs_initquota();
- xfs_inode_shrinker_init();
error = register_filesystem(&xfs_fs_type);
if (error)
@@ -1911,7 +1847,6 @@ exit_xfs_fs(void)
{
vfs_exitquota();
unregister_filesystem(&xfs_fs_type);
- xfs_inode_shrinker_destroy();
xfs_sysctl_unregister();
xfs_cleanup_procfs();
xfs_buf_terminate();
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 519618e9279..1ef4a4d2d99 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -56,12 +56,6 @@ extern void xfs_qm_exit(void);
# define XFS_BIGFS_STRING
#endif
-#ifdef CONFIG_XFS_DMAPI
-# define XFS_DMAPI_STRING "dmapi support, "
-#else
-# define XFS_DMAPI_STRING
-#endif
-
#ifdef DEBUG
# define XFS_DBG_STRING "debug"
#else
@@ -72,7 +66,6 @@ extern void xfs_qm_exit(void);
XFS_SECURITY_STRING \
XFS_REALTIME_STRING \
XFS_BIGFS_STRING \
- XFS_DMAPI_STRING \
XFS_DBG_STRING /* DBG must be last */
struct xfs_inode;
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 3884e20bc14..dfcbd98d159 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -24,25 +24,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_inode.h"
#include "xfs_dinode.h"
#include "xfs_error.h"
-#include "xfs_mru_cache.h"
#include "xfs_filestream.h"
#include "xfs_vnodeops.h"
-#include "xfs_utils.h"
-#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
-#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
@@ -144,6 +133,41 @@ restart:
return last_error;
}
+/*
+ * Select the next per-ag structure to iterate during the walk. The reclaim
+ * walk is optimised only to walk AGs with reclaimable inodes in them.
+ */
+static struct xfs_perag *
+xfs_inode_ag_iter_next_pag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t *first,
+ int tag)
+{
+ struct xfs_perag *pag = NULL;
+
+ if (tag == XFS_ICI_RECLAIM_TAG) {
+ int found;
+ int ref;
+
+ spin_lock(&mp->m_perag_lock);
+ found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
+ (void **)&pag, *first, 1, tag);
+ if (found <= 0) {
+ spin_unlock(&mp->m_perag_lock);
+ return NULL;
+ }
+ *first = pag->pag_agno + 1;
+ /* open coded pag reference increment */
+ ref = atomic_inc_return(&pag->pag_ref);
+ spin_unlock(&mp->m_perag_lock);
+ trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
+ } else {
+ pag = xfs_perag_get(mp, *first);
+ (*first)++;
+ }
+ return pag;
+}
+
int
xfs_inode_ag_iterator(
struct xfs_mount *mp,
@@ -154,20 +178,15 @@ xfs_inode_ag_iterator(
int exclusive,
int *nr_to_scan)
{
+ struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
int nr;
nr = nr_to_scan ? *nr_to_scan : INT_MAX;
- for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
- struct xfs_perag *pag;
-
- pag = xfs_perag_get(mp, ag);
- if (!pag->pag_ici_init) {
- xfs_perag_put(pag);
- continue;
- }
+ ag = 0;
+ while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
exclusive, &nr);
xfs_perag_put(pag);
@@ -289,7 +308,7 @@ xfs_sync_inode_attr(
/*
* Write out pagecache data for the whole filesystem.
*/
-int
+STATIC int
xfs_sync_data(
struct xfs_mount *mp,
int flags)
@@ -310,7 +329,7 @@ xfs_sync_data(
/*
* Write out inode metadata (attributes) for the whole filesystem.
*/
-int
+STATIC int
xfs_sync_attr(
struct xfs_mount *mp,
int flags)
@@ -343,8 +362,7 @@ xfs_commit_dummy_trans(
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -644,6 +662,17 @@ __xfs_inode_set_reclaim_tag(
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
+
+ if (!pag->pag_ici_reclaimable) {
+ /* propagate the reclaim tag up into the perag radix tree */
+ spin_lock(&ip->i_mount->m_perag_lock);
+ radix_tree_tag_set(&ip->i_mount->m_perag_tree,
+ XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&ip->i_mount->m_perag_lock);
+ trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
+ -1, _RET_IP_);
+ }
pag->pag_ici_reclaimable++;
}
@@ -678,6 +707,16 @@ __xfs_inode_clear_reclaim_tag(
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
pag->pag_ici_reclaimable--;
+ if (!pag->pag_ici_reclaimable) {
+ /* clear the reclaim tag from the perag radix tree */
+ spin_lock(&ip->i_mount->m_perag_lock);
+ radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
+ XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&ip->i_mount->m_perag_lock);
+ trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
+ -1, _RET_IP_);
+ }
}
/*
@@ -816,7 +855,36 @@ out:
reclaim:
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_ireclaim(ip);
+
+ XFS_STATS_INC(xs_ig_reclaims);
+ /*
+ * Remove the inode from the per-AG radix tree.
+ *
+ * Because radix_tree_delete won't complain even if the item was never
+ * added to the tree assert that it's been there before to catch
+ * problems with the inode life time early on.
+ */
+ write_lock(&pag->pag_ici_lock);
+ if (!radix_tree_delete(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
+ ASSERT(0);
+ write_unlock(&pag->pag_ici_lock);
+
+ /*
+ * Here we do an (almost) spurious inode lock in order to coordinate
+ * with inode cache radix tree lookups. This is because the lookup
+ * can reference the inodes in the cache without taking references.
+ *
+ * We make that OK here by ensuring that we wait until the inode is
+ * unlocked after the lookup before we go ahead and free it. We get
+ * both the ilock and the iolock because the code may need to drop the
+ * ilock one but will still hold the iolock.
+ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_qm_dqdetach(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+ xfs_inode_free(ip);
return error;
}
@@ -832,88 +900,52 @@ xfs_reclaim_inodes(
/*
* Shrinker infrastructure.
- *
- * This is all far more complex than it needs to be. It adds a global list of
- * mounts because the shrinkers can only call a global context. We need to make
- * the shrinkers pass a context to avoid the need for global state.
*/
-static LIST_HEAD(xfs_mount_list);
-static struct rw_semaphore xfs_mount_list_lock;
-
static int
xfs_reclaim_inode_shrink(
+ struct shrinker *shrink,
int nr_to_scan,
gfp_t gfp_mask)
{
struct xfs_mount *mp;
struct xfs_perag *pag;
xfs_agnumber_t ag;
- int reclaimable = 0;
+ int reclaimable;
+ mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) {
if (!(gfp_mask & __GFP_FS))
return -1;
- down_read(&xfs_mount_list_lock);
- list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
- xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
+ xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
- if (nr_to_scan <= 0)
- break;
- }
- up_read(&xfs_mount_list_lock);
- }
-
- down_read(&xfs_mount_list_lock);
- list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
- for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
+ /* if we don't exhaust the scan, don't bother coming back */
+ if (nr_to_scan > 0)
+ return -1;
+ }
- pag = xfs_perag_get(mp, ag);
- if (!pag->pag_ici_init) {
- xfs_perag_put(pag);
- continue;
- }
- reclaimable += pag->pag_ici_reclaimable;
- xfs_perag_put(pag);
- }
+ reclaimable = 0;
+ ag = 0;
+ while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
+ XFS_ICI_RECLAIM_TAG))) {
+ reclaimable += pag->pag_ici_reclaimable;
+ xfs_perag_put(pag);
}
- up_read(&xfs_mount_list_lock);
return reclaimable;
}
-static struct shrinker xfs_inode_shrinker = {
- .shrink = xfs_reclaim_inode_shrink,
- .seeks = DEFAULT_SEEKS,
-};
-
-void __init
-xfs_inode_shrinker_init(void)
-{
- init_rwsem(&xfs_mount_list_lock);
- register_shrinker(&xfs_inode_shrinker);
-}
-
-void
-xfs_inode_shrinker_destroy(void)
-{
- ASSERT(list_empty(&xfs_mount_list));
- unregister_shrinker(&xfs_inode_shrinker);
-}
-
void
xfs_inode_shrinker_register(
struct xfs_mount *mp)
{
- down_write(&xfs_mount_list_lock);
- list_add_tail(&mp->m_mplist, &xfs_mount_list);
- up_write(&xfs_mount_list_lock);
+ mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
+ mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
+ register_shrinker(&mp->m_inode_shrink);
}
void
xfs_inode_shrinker_unregister(
struct xfs_mount *mp)
{
- down_write(&xfs_mount_list_lock);
- list_del(&mp->m_mplist);
- up_write(&xfs_mount_list_lock);
+ unregister_shrinker(&mp->m_inode_shrink);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index cdcbaaca988..fe78726196f 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -35,9 +35,6 @@ typedef struct xfs_sync_work {
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
-int xfs_sync_attr(struct xfs_mount *mp, int flags);
-int xfs_sync_data(struct xfs_mount *mp, int flags);
-
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);
@@ -55,8 +52,6 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
int flags, int tag, int write_lock, int *nr_to_scan);
-void xfs_inode_shrinker_init(void);
-void xfs_inode_shrinker_destroy(void);
void xfs_inode_shrinker_register(struct xfs_mount *mp);
void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c
index 207fa77f63a..88d25d4aa56 100644
--- a/fs/xfs/linux-2.6/xfs_trace.c
+++ b/fs/xfs/linux-2.6/xfs_trace.c
@@ -24,17 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
@@ -50,7 +46,6 @@
#include "quota/xfs_dquot_item.h"
#include "quota/xfs_dquot.h"
#include "xfs_log_recover.h"
-#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
/*
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index ff6bc797baf..be5dffd282a 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -82,33 +82,6 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
)
)
-#define DEFINE_PERAG_REF_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
- unsigned long caller_ip), \
- TP_ARGS(mp, agno, refcount, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_agnumber_t, agno) \
- __field(int, refcount) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = mp->m_super->s_dev; \
- __entry->agno = agno; \
- __entry->refcount = refcount; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d agno %u refcount %d caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->agno, \
- __entry->refcount, \
- (char *)__entry->caller_ip) \
-);
-
-DEFINE_PERAG_REF_EVENT(xfs_perag_get)
-DEFINE_PERAG_REF_EVENT(xfs_perag_put)
-
#define DEFINE_ATTR_LIST_EVENT(name) \
DEFINE_EVENT(xfs_attr_list_class, name, \
TP_PROTO(struct xfs_attr_list_context *ctx), \
@@ -122,6 +95,40 @@ DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound);
+DECLARE_EVENT_CLASS(xfs_perag_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agno, refcount, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, refcount)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->refcount = refcount;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno %u refcount %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->refcount,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_PERAG_REF_EVENT(name) \
+DEFINE_EVENT(xfs_perag_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, refcount, caller_ip))
+DEFINE_PERAG_REF_EVENT(xfs_perag_get);
+DEFINE_PERAG_REF_EVENT(xfs_perag_get_reclaim);
+DEFINE_PERAG_REF_EVENT(xfs_perag_put);
+DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim);
+DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim);
+
TRACE_EVENT(xfs_attr_list_node_descend,
TP_PROTO(struct xfs_attr_list_context *ctx,
struct xfs_da_node_entry *btree),
@@ -310,8 +317,6 @@ DEFINE_BUF_EVENT(xfs_buf_init);
DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele);
-DEFINE_BUF_EVENT(xfs_buf_pin);
-DEFINE_BUF_EVENT(xfs_buf_unpin);
DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_iorequest);
DEFINE_BUF_EVENT(xfs_buf_bawrite);
@@ -534,7 +539,7 @@ DEFINE_LOCK_EVENT(xfs_ilock_nowait);
DEFINE_LOCK_EVENT(xfs_ilock_demote);
DEFINE_LOCK_EVENT(xfs_iunlock);
-DECLARE_EVENT_CLASS(xfs_iget_class,
+DECLARE_EVENT_CLASS(xfs_inode_class,
TP_PROTO(struct xfs_inode *ip),
TP_ARGS(ip),
TP_STRUCT__entry(
@@ -550,16 +555,38 @@ DECLARE_EVENT_CLASS(xfs_iget_class,
__entry->ino)
)
-#define DEFINE_IGET_EVENT(name) \
-DEFINE_EVENT(xfs_iget_class, name, \
+#define DEFINE_INODE_EVENT(name) \
+DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip), \
TP_ARGS(ip))
-DEFINE_IGET_EVENT(xfs_iget_skip);
-DEFINE_IGET_EVENT(xfs_iget_reclaim);
-DEFINE_IGET_EVENT(xfs_iget_found);
-DEFINE_IGET_EVENT(xfs_iget_alloc);
-
-DECLARE_EVENT_CLASS(xfs_inode_class,
+DEFINE_INODE_EVENT(xfs_iget_skip);
+DEFINE_INODE_EVENT(xfs_iget_reclaim);
+DEFINE_INODE_EVENT(xfs_iget_reclaim_fail);
+DEFINE_INODE_EVENT(xfs_iget_hit);
+DEFINE_INODE_EVENT(xfs_iget_miss);
+
+DEFINE_INODE_EVENT(xfs_getattr);
+DEFINE_INODE_EVENT(xfs_setattr);
+DEFINE_INODE_EVENT(xfs_readlink);
+DEFINE_INODE_EVENT(xfs_alloc_file_space);
+DEFINE_INODE_EVENT(xfs_free_file_space);
+DEFINE_INODE_EVENT(xfs_readdir);
+#ifdef CONFIG_XFS_POSIX_ACL
+DEFINE_INODE_EVENT(xfs_check_acl);
+#endif
+DEFINE_INODE_EVENT(xfs_vm_bmap);
+DEFINE_INODE_EVENT(xfs_file_ioctl);
+DEFINE_INODE_EVENT(xfs_file_compat_ioctl);
+DEFINE_INODE_EVENT(xfs_ioctl_setattr);
+DEFINE_INODE_EVENT(xfs_file_fsync);
+DEFINE_INODE_EVENT(xfs_destroy_inode);
+DEFINE_INODE_EVENT(xfs_write_inode);
+DEFINE_INODE_EVENT(xfs_evict_inode);
+
+DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
+DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
+
+DECLARE_EVENT_CLASS(xfs_iref_class,
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
TP_ARGS(ip, caller_ip),
TP_STRUCT__entry(
@@ -584,20 +611,71 @@ DECLARE_EVENT_CLASS(xfs_inode_class,
(char *)__entry->caller_ip)
)
-#define DEFINE_INODE_EVENT(name) \
-DEFINE_EVENT(xfs_inode_class, name, \
+#define DEFINE_IREF_EVENT(name) \
+DEFINE_EVENT(xfs_iref_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
TP_ARGS(ip, caller_ip))
-DEFINE_INODE_EVENT(xfs_ihold);
-DEFINE_INODE_EVENT(xfs_irele);
-DEFINE_INODE_EVENT(xfs_inode_pin);
-DEFINE_INODE_EVENT(xfs_inode_unpin);
-DEFINE_INODE_EVENT(xfs_inode_unpin_nowait);
+DEFINE_IREF_EVENT(xfs_ihold);
+DEFINE_IREF_EVENT(xfs_irele);
+DEFINE_IREF_EVENT(xfs_inode_pin);
+DEFINE_IREF_EVENT(xfs_inode_unpin);
+DEFINE_IREF_EVENT(xfs_inode_unpin_nowait);
+
+DECLARE_EVENT_CLASS(xfs_namespace_class,
+ TP_PROTO(struct xfs_inode *dp, struct xfs_name *name),
+ TP_ARGS(dp, name),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, dp_ino)
+ __dynamic_array(char, name, name->len)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(dp)->i_sb->s_dev;
+ __entry->dp_ino = dp->i_ino;
+ memcpy(__get_str(name), name->name, name->len);
+ ),
+ TP_printk("dev %d:%d dp ino 0x%llx name %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dp_ino,
+ __get_str(name))
+)
-/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
-DEFINE_INODE_EVENT(xfs_inode);
-#define xfs_itrace_entry(ip) \
- trace_xfs_inode(ip, _THIS_IP_)
+#define DEFINE_NAMESPACE_EVENT(name) \
+DEFINE_EVENT(xfs_namespace_class, name, \
+ TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \
+ TP_ARGS(dp, name))
+DEFINE_NAMESPACE_EVENT(xfs_remove);
+DEFINE_NAMESPACE_EVENT(xfs_link);
+DEFINE_NAMESPACE_EVENT(xfs_lookup);
+DEFINE_NAMESPACE_EVENT(xfs_create);
+DEFINE_NAMESPACE_EVENT(xfs_symlink);
+
+TRACE_EVENT(xfs_rename,
+ TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp,
+ struct xfs_name *src_name, struct xfs_name *target_name),
+ TP_ARGS(src_dp, target_dp, src_name, target_name),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, src_dp_ino)
+ __field(xfs_ino_t, target_dp_ino)
+ __dynamic_array(char, src_name, src_name->len)
+ __dynamic_array(char, target_name, target_name->len)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(src_dp)->i_sb->s_dev;
+ __entry->src_dp_ino = src_dp->i_ino;
+ __entry->target_dp_ino = target_dp->i_ino;
+ memcpy(__get_str(src_name), src_name->name, src_name->len);
+ memcpy(__get_str(target_name), target_name->name, target_name->len);
+ ),
+ TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx"
+ " src name %s target name %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->src_dp_ino,
+ __entry->target_dp_ino,
+ __get_str(src_name),
+ __get_str(target_name))
+)
DECLARE_EVENT_CLASS(xfs_dquot_class,
TP_PROTO(struct xfs_dquot *dqp),
@@ -677,9 +755,6 @@ DEFINE_DQUOT_EVENT(xfs_dqrele);
DEFINE_DQUOT_EVENT(xfs_dqflush);
DEFINE_DQUOT_EVENT(xfs_dqflush_force);
DEFINE_DQUOT_EVENT(xfs_dqflush_done);
-/* not really iget events, but we re-use the format */
-DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
-DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
DECLARE_EVENT_CLASS(xfs_loggrant_class,
TP_PROTO(struct log *log, struct xlog_ticket *tic),
@@ -775,165 +850,177 @@ DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub);
-#define DEFINE_RW_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
- TP_ARGS(ip, count, offset, flags), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(xfs_fsize_t, size) \
- __field(xfs_fsize_t, new_size) \
- __field(loff_t, offset) \
- __field(size_t, count) \
- __field(int, flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->size = ip->i_d.di_size; \
- __entry->new_size = ip->i_new_size; \
- __entry->offset = offset; \
- __entry->count = count; \
- __entry->flags = flags; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
- "offset 0x%llx count 0x%zx ioflags %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->size, \
- __entry->new_size, \
- __entry->offset, \
- __entry->count, \
- __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \
+DECLARE_EVENT_CLASS(xfs_file_class,
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
+ TP_ARGS(ip, count, offset, flags),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fsize_t, new_size)
+ __field(loff_t, offset)
+ __field(size_t, count)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = ip->i_new_size;
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->flags = flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+ "offset 0x%llx count 0x%zx ioflags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->new_size,
+ __entry->offset,
+ __entry->count,
+ __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
)
+
+#define DEFINE_RW_EVENT(name) \
+DEFINE_EVENT(xfs_file_class, name, \
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
+ TP_ARGS(ip, count, offset, flags))
DEFINE_RW_EVENT(xfs_file_read);
DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
DEFINE_RW_EVENT(xfs_file_splice_read);
DEFINE_RW_EVENT(xfs_file_splice_write);
-
-#define DEFINE_PAGE_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
- TP_ARGS(inode, page, off), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(pgoff_t, pgoff) \
- __field(loff_t, size) \
- __field(unsigned long, offset) \
- __field(int, delalloc) \
- __field(int, unmapped) \
- __field(int, unwritten) \
- ), \
- TP_fast_assign( \
- int delalloc = -1, unmapped = -1, unwritten = -1; \
- \
- if (page_has_buffers(page)) \
- xfs_count_page_state(page, &delalloc, \
- &unmapped, &unwritten); \
- __entry->dev = inode->i_sb->s_dev; \
- __entry->ino = XFS_I(inode)->i_ino; \
- __entry->pgoff = page_offset(page); \
- __entry->size = i_size_read(inode); \
- __entry->offset = off; \
- __entry->delalloc = delalloc; \
- __entry->unmapped = unmapped; \
- __entry->unwritten = unwritten; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \
- "delalloc %d unmapped %d unwritten %d", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->pgoff, \
- __entry->size, \
- __entry->offset, \
- __entry->delalloc, \
- __entry->unmapped, \
- __entry->unwritten) \
+DECLARE_EVENT_CLASS(xfs_page_class,
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off),
+ TP_ARGS(inode, page, off),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(pgoff_t, pgoff)
+ __field(loff_t, size)
+ __field(unsigned long, offset)
+ __field(int, delalloc)
+ __field(int, unwritten)
+ ),
+ TP_fast_assign(
+ int delalloc = -1, unwritten = -1;
+
+ if (page_has_buffers(page))
+ xfs_count_page_state(page, &delalloc, &unwritten);
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = XFS_I(inode)->i_ino;
+ __entry->pgoff = page_offset(page);
+ __entry->size = i_size_read(inode);
+ __entry->offset = off;
+ __entry->delalloc = delalloc;
+ __entry->unwritten = unwritten;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx "
+ "delalloc %d unwritten %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->size,
+ __entry->offset,
+ __entry->delalloc,
+ __entry->unwritten)
)
+
+#define DEFINE_PAGE_EVENT(name) \
+DEFINE_EVENT(xfs_page_class, name, \
+ TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \
+ TP_ARGS(inode, page, off))
DEFINE_PAGE_EVENT(xfs_writepage);
DEFINE_PAGE_EVENT(xfs_releasepage);
DEFINE_PAGE_EVENT(xfs_invalidatepage);
-#define DEFINE_IOMAP_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
- int flags, struct xfs_bmbt_irec *irec), \
- TP_ARGS(ip, offset, count, flags, irec), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(loff_t, size) \
- __field(loff_t, new_size) \
- __field(loff_t, offset) \
- __field(size_t, count) \
- __field(int, flags) \
- __field(xfs_fileoff_t, startoff) \
- __field(xfs_fsblock_t, startblock) \
- __field(xfs_filblks_t, blockcount) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->size = ip->i_d.di_size; \
- __entry->new_size = ip->i_new_size; \
- __entry->offset = offset; \
- __entry->count = count; \
- __entry->flags = flags; \
- __entry->startoff = irec ? irec->br_startoff : 0; \
- __entry->startblock = irec ? irec->br_startblock : 0; \
- __entry->blockcount = irec ? irec->br_blockcount : 0; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
- "offset 0x%llx count %zd flags %s " \
- "startoff 0x%llx startblock %lld blockcount 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->size, \
- __entry->new_size, \
- __entry->offset, \
- __entry->count, \
- __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
- __entry->startoff, \
- (__int64_t)__entry->startblock, \
- __entry->blockcount) \
+DECLARE_EVENT_CLASS(xfs_iomap_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
+ int flags, struct xfs_bmbt_irec *irec),
+ TP_ARGS(ip, offset, count, flags, irec),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(loff_t, size)
+ __field(loff_t, new_size)
+ __field(loff_t, offset)
+ __field(size_t, count)
+ __field(int, flags)
+ __field(xfs_fileoff_t, startoff)
+ __field(xfs_fsblock_t, startblock)
+ __field(xfs_filblks_t, blockcount)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = ip->i_new_size;
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->flags = flags;
+ __entry->startoff = irec ? irec->br_startoff : 0;
+ __entry->startblock = irec ? irec->br_startblock : 0;
+ __entry->blockcount = irec ? irec->br_blockcount : 0;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+ "offset 0x%llx count %zd flags %s "
+ "startoff 0x%llx startblock %lld blockcount 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->new_size,
+ __entry->offset,
+ __entry->count,
+ __print_flags(__entry->flags, "|", BMAPI_FLAGS),
+ __entry->startoff,
+ (__int64_t)__entry->startblock,
+ __entry->blockcount)
)
+
+#define DEFINE_IOMAP_EVENT(name) \
+DEFINE_EVENT(xfs_iomap_class, name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
+ int flags, struct xfs_bmbt_irec *irec), \
+ TP_ARGS(ip, offset, count, flags, irec))
DEFINE_IOMAP_EVENT(xfs_iomap_enter);
DEFINE_IOMAP_EVENT(xfs_iomap_found);
DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
-#define DEFINE_SIMPLE_IO_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
- TP_ARGS(ip, offset, count), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(loff_t, size) \
- __field(loff_t, new_size) \
- __field(loff_t, offset) \
- __field(size_t, count) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->size = ip->i_d.di_size; \
- __entry->new_size = ip->i_new_size; \
- __entry->offset = offset; \
- __entry->count = count; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
- "offset 0x%llx count %zd", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->size, \
- __entry->new_size, \
- __entry->offset, \
- __entry->count) \
+DECLARE_EVENT_CLASS(xfs_simple_io_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
+ TP_ARGS(ip, offset, count),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(loff_t, size)
+ __field(loff_t, new_size)
+ __field(loff_t, offset)
+ __field(size_t, count)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = ip->i_new_size;
+ __entry->offset = offset;
+ __entry->count = count;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+ "offset 0x%llx count %zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->new_size,
+ __entry->offset,
+ __entry->count)
);
+
+#define DEFINE_SIMPLE_IO_EVENT(name) \
+DEFINE_EVENT(xfs_simple_io_class, name, \
+ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \
+ TP_ARGS(ip, offset, count))
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 585e7633dfc..e1a2f6800e0 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
@@ -64,8 +54,6 @@
flush lock - ditto.
*/
-STATIC void xfs_qm_dqflush_done(xfs_buf_t *, xfs_dq_logitem_t *);
-
#ifdef DEBUG
xfs_buftarg_t *xfs_dqerror_target;
int xfs_do_dqerror;
@@ -390,21 +378,14 @@ xfs_qm_dqalloc(
return (ESRCH);
}
- /*
- * xfs_trans_commit normally decrements the vnode ref count
- * when it unlocks the inode. Since we want to keep the quota
- * inode around, we bump the vnode ref count now.
- */
- IHOLD(quotip);
-
- xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL);
nmaps = 1;
if ((error = xfs_bmapi(tp, quotip,
offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA | XFS_BMAPI_WRITE,
&firstblock,
XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &flist, NULL))) {
+ &map, &nmaps, &flist))) {
goto error0;
}
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -520,7 +501,7 @@ xfs_qm_dqtobp(
error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset,
XFS_DQUOT_CLUSTER_SIZE_FSB,
XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmaps, NULL, NULL);
+ NULL, 0, &map, &nmaps, NULL);
xfs_iunlock(quotip, XFS_ILOCK_SHARED);
if (error)
@@ -1141,6 +1122,46 @@ xfs_qm_dqrele(
xfs_qm_dqput(dqp);
}
+/*
+ * This is the dquot flushing I/O completion routine. It is called
+ * from interrupt level when the buffer containing the dquot is
+ * flushed to disk. It is responsible for removing the dquot logitem
+ * from the AIL if it has not been re-logged, and unlocking the dquot's
+ * flush lock. This behavior is very similar to that of inodes..
+ */
+STATIC void
+xfs_qm_dqflush_done(
+ struct xfs_buf *bp,
+ struct xfs_log_item *lip)
+{
+ xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
+ xfs_dquot_t *dqp = qip->qli_dquot;
+ struct xfs_ail *ailp = lip->li_ailp;
+
+ /*
+ * We only want to pull the item from the AIL if its
+ * location in the log has not changed since we started the flush.
+ * Thus, we only bother if the dquot's lsn has
+ * not changed. First we check the lsn outside the lock
+ * since it's cheaper, and then we recheck while
+ * holding the lock before removing the dquot from the AIL.
+ */
+ if ((lip->li_flags & XFS_LI_IN_AIL) &&
+ lip->li_lsn == qip->qli_flush_lsn) {
+
+ /* xfs_trans_ail_delete() drops the AIL lock. */
+ spin_lock(&ailp->xa_lock);
+ if (lip->li_lsn == qip->qli_flush_lsn)
+ xfs_trans_ail_delete(ailp, lip);
+ else
+ spin_unlock(&ailp->xa_lock);
+ }
+
+ /*
+ * Release the dq's flush lock since we're done with it.
+ */
+ xfs_dqfunlock(dqp);
+}
/*
* Write a modified dquot to disk.
@@ -1222,8 +1243,9 @@ xfs_qm_dqflush(
* Attach an iodone routine so that we can remove this dquot from the
* AIL and release the flush lock once the dquot is synced to disk.
*/
- xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t *, xfs_log_item_t *))
- xfs_qm_dqflush_done, &(dqp->q_logitem.qli_item));
+ xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
+ &dqp->q_logitem.qli_item);
+
/*
* If the buffer is pinned then push on the log so we won't
* get stuck waiting in the write for too long.
@@ -1247,50 +1269,6 @@ xfs_qm_dqflush(
}
-/*
- * This is the dquot flushing I/O completion routine. It is called
- * from interrupt level when the buffer containing the dquot is
- * flushed to disk. It is responsible for removing the dquot logitem
- * from the AIL if it has not been re-logged, and unlocking the dquot's
- * flush lock. This behavior is very similar to that of inodes..
- */
-/*ARGSUSED*/
-STATIC void
-xfs_qm_dqflush_done(
- xfs_buf_t *bp,
- xfs_dq_logitem_t *qip)
-{
- xfs_dquot_t *dqp;
- struct xfs_ail *ailp;
-
- dqp = qip->qli_dquot;
- ailp = qip->qli_item.li_ailp;
-
- /*
- * We only want to pull the item from the AIL if its
- * location in the log has not changed since we started the flush.
- * Thus, we only bother if the dquot's lsn has
- * not changed. First we check the lsn outside the lock
- * since it's cheaper, and then we recheck while
- * holding the lock before removing the dquot from the AIL.
- */
- if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- qip->qli_item.li_lsn == qip->qli_flush_lsn) {
-
- /* xfs_trans_ail_delete() drops the AIL lock. */
- spin_lock(&ailp->xa_lock);
- if (qip->qli_item.li_lsn == qip->qli_flush_lsn)
- xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip);
- else
- spin_unlock(&ailp->xa_lock);
- }
-
- /*
- * Release the dq's flush lock since we're done with it.
- */
- xfs_dqfunlock(dqp);
-}
-
int
xfs_qm_dqlock_nowait(
xfs_dquot_t *dqp)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 8d89a24ae32..2a1f3dc10a0 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -23,42 +23,36 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_qm.h"
+static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_dq_logitem, qli_item);
+}
+
/*
* returns the number of iovecs needed to log the given dquot item.
*/
-/* ARGSUSED */
STATIC uint
xfs_qm_dquot_logitem_size(
- xfs_dq_logitem_t *logitem)
+ struct xfs_log_item *lip)
{
/*
* we need only two iovecs, one for the format, one for the real thing
*/
- return (2);
+ return 2;
}
/*
@@ -66,22 +60,21 @@ xfs_qm_dquot_logitem_size(
*/
STATIC void
xfs_qm_dquot_logitem_format(
- xfs_dq_logitem_t *logitem,
- xfs_log_iovec_t *logvec)
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *logvec)
{
- ASSERT(logitem);
- ASSERT(logitem->qli_dquot);
+ struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
- logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
+ logvec->i_addr = &qlip->qli_format;
logvec->i_len = sizeof(xfs_dq_logformat_t);
logvec->i_type = XLOG_REG_TYPE_QFORMAT;
logvec++;
- logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
+ logvec->i_addr = &qlip->qli_dquot->q_core;
logvec->i_len = sizeof(xfs_disk_dquot_t);
logvec->i_type = XLOG_REG_TYPE_DQUOT;
- ASSERT(2 == logitem->qli_item.li_desc->lid_size);
- logitem->qli_format.qlf_size = 2;
+ ASSERT(2 == lip->li_desc->lid_size);
+ qlip->qli_format.qlf_size = 2;
}
@@ -90,9 +83,9 @@ xfs_qm_dquot_logitem_format(
*/
STATIC void
xfs_qm_dquot_logitem_pin(
- xfs_dq_logitem_t *logitem)
+ struct xfs_log_item *lip)
{
- xfs_dquot_t *dqp = logitem->qli_dquot;
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
atomic_inc(&dqp->q_pincount);
@@ -104,27 +97,18 @@ xfs_qm_dquot_logitem_pin(
* dquot must have been previously pinned with a call to
* xfs_qm_dquot_logitem_pin().
*/
-/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin(
- xfs_dq_logitem_t *logitem)
+ struct xfs_log_item *lip,
+ int remove)
{
- xfs_dquot_t *dqp = logitem->qli_dquot;
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
ASSERT(atomic_read(&dqp->q_pincount) > 0);
if (atomic_dec_and_test(&dqp->q_pincount))
wake_up(&dqp->q_pinwait);
}
-/* ARGSUSED */
-STATIC void
-xfs_qm_dquot_logitem_unpin_remove(
- xfs_dq_logitem_t *logitem,
- xfs_trans_t *tp)
-{
- xfs_qm_dquot_logitem_unpin(logitem);
-}
-
/*
* Given the logitem, this writes the corresponding dquot entry to disk
* asynchronously. This is called with the dquot entry securely locked;
@@ -133,12 +117,10 @@ xfs_qm_dquot_logitem_unpin_remove(
*/
STATIC void
xfs_qm_dquot_logitem_push(
- xfs_dq_logitem_t *logitem)
+ struct xfs_log_item *lip)
{
- xfs_dquot_t *dqp;
- int error;
-
- dqp = logitem->qli_dquot;
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
+ int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
ASSERT(!completion_done(&dqp->q_flush));
@@ -160,27 +142,25 @@ xfs_qm_dquot_logitem_push(
xfs_dqunlock(dqp);
}
-/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_qm_dquot_logitem_committed(
- xfs_dq_logitem_t *l,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
/*
* We always re-log the entire dquot when it becomes dirty,
* so, the latest copy _is_ the only one that matters.
*/
- return (lsn);
+ return lsn;
}
-
/*
* This is called to wait for the given dquot to be unpinned.
* Most of these pin/unpin routines are plagiarized from inode code.
*/
void
xfs_qm_dqunpin_wait(
- xfs_dquot_t *dqp)
+ struct xfs_dquot *dqp)
{
ASSERT(XFS_DQ_IS_LOCKED(dqp));
if (atomic_read(&dqp->q_pincount) == 0)
@@ -206,13 +186,12 @@ xfs_qm_dqunpin_wait(
*/
STATIC void
xfs_qm_dquot_logitem_pushbuf(
- xfs_dq_logitem_t *qip)
+ struct xfs_log_item *lip)
{
- xfs_dquot_t *dqp;
- xfs_mount_t *mp;
- xfs_buf_t *bp;
+ struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
+ struct xfs_dquot *dqp = qlip->qli_dquot;
+ struct xfs_buf *bp;
- dqp = qip->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
@@ -220,22 +199,20 @@ xfs_qm_dquot_logitem_pushbuf(
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
- if (completion_done(&dqp->q_flush) ||
- ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
+ if (completion_done(&dqp->q_flush) ||
+ !(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
return;
}
- mp = dqp->q_mount;
- bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
+
+ bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
+ dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
- return;
-
}
/*
@@ -250,15 +227,14 @@ xfs_qm_dquot_logitem_pushbuf(
*/
STATIC uint
xfs_qm_dquot_logitem_trylock(
- xfs_dq_logitem_t *qip)
+ struct xfs_log_item *lip)
{
- xfs_dquot_t *dqp;
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
- dqp = qip->qli_dquot;
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
- if (! xfs_qm_dqlock_nowait(dqp))
+ if (!xfs_qm_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED;
if (!xfs_dqflock_nowait(dqp)) {
@@ -269,11 +245,10 @@ xfs_qm_dquot_logitem_trylock(
return XFS_ITEM_PUSHBUF;
}
- ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
+ ASSERT(lip->li_flags & XFS_LI_IN_AIL);
return XFS_ITEM_SUCCESS;
}
-
/*
* Unlock the dquot associated with the log item.
* Clear the fields of the dquot and dquot log item that
@@ -282,12 +257,10 @@ xfs_qm_dquot_logitem_trylock(
*/
STATIC void
xfs_qm_dquot_logitem_unlock(
- xfs_dq_logitem_t *ql)
+ struct xfs_log_item *lip)
{
- xfs_dquot_t *dqp;
+ struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
- ASSERT(ql != NULL);
- dqp = ql->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
@@ -304,43 +277,32 @@ xfs_qm_dquot_logitem_unlock(
xfs_dqunlock(dqp);
}
-
/*
* this needs to stamp an lsn into the dquot, I think.
* rpc's that look at user dquot's would then have to
* push on the dependency recorded in the dquot
*/
-/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_committing(
- xfs_dq_logitem_t *l,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
- return;
}
-
/*
* This is the ops vector for dquots
*/
static struct xfs_item_ops xfs_dquot_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_dquot_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
- xfs_qm_dquot_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))
- xfs_qm_dquot_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_dquot_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
- .iop_pushbuf = (void(*)(xfs_log_item_t*))
- xfs_qm_dquot_logitem_pushbuf,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_dquot_logitem_committing
+ .iop_size = xfs_qm_dquot_logitem_size,
+ .iop_format = xfs_qm_dquot_logitem_format,
+ .iop_pin = xfs_qm_dquot_logitem_pin,
+ .iop_unpin = xfs_qm_dquot_logitem_unpin,
+ .iop_trylock = xfs_qm_dquot_logitem_trylock,
+ .iop_unlock = xfs_qm_dquot_logitem_unlock,
+ .iop_committed = xfs_qm_dquot_logitem_committed,
+ .iop_push = xfs_qm_dquot_logitem_push,
+ .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
+ .iop_committing = xfs_qm_dquot_logitem_committing
};
/*
@@ -350,10 +312,9 @@ static struct xfs_item_ops xfs_dquot_item_ops = {
*/
void
xfs_qm_dquot_logitem_init(
- struct xfs_dquot *dqp)
+ struct xfs_dquot *dqp)
{
- xfs_dq_logitem_t *lp;
- lp = &dqp->q_logitem;
+ struct xfs_dq_logitem *lp = &dqp->q_logitem;
xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
&xfs_dquot_item_ops);
@@ -374,16 +335,22 @@ xfs_qm_dquot_logitem_init(
/*------------------ QUOTAOFF LOG ITEMS -------------------*/
+static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_qoff_logitem, qql_item);
+}
+
+
/*
* This returns the number of iovecs needed to log the given quotaoff item.
* We only need 1 iovec for an quotaoff item. It just logs the
* quotaoff_log_format structure.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_size(
+ struct xfs_log_item *lip)
{
- return (1);
+ return 1;
}
/*
@@ -394,53 +361,46 @@ xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
* slots in the quotaoff item have been filled.
*/
STATIC void
-xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
- xfs_log_iovec_t *log_vector)
+xfs_qm_qoff_logitem_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *log_vector)
{
- ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
+ struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
+
+ ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
- log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
+ log_vector->i_addr = &qflip->qql_format;
log_vector->i_len = sizeof(xfs_qoff_logitem_t);
log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
- qf->qql_format.qf_size = 1;
+ qflip->qql_format.qf_size = 1;
}
-
/*
* Pinning has no meaning for an quotaoff item, so just return.
*/
-/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_pin(
+ struct xfs_log_item *lip)
{
- return;
}
-
/*
* Since pinning has no meaning for an quotaoff item, unpinning does
* not either.
*/
-/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_unpin(
+ struct xfs_log_item *lip,
+ int remove)
{
- return;
-}
-
-/*ARGSUSED*/
-STATIC void
-xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
-{
- return;
}
/*
* Quotaoff items have no locking, so just return success.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_trylock(
+ struct xfs_log_item *lip)
{
return XFS_ITEM_LOCKED;
}
@@ -449,53 +409,51 @@ xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
* Quotaoff items have no locking or pushing, so return failure
* so that the caller doesn't bother with us.
*/
-/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_unlock(
+ struct xfs_log_item *lip)
{
- return;
}
/*
* The quotaoff-start-item is logged only once and cannot be moved in the log,
* so simply return the lsn at which it's been logged.
*/
-/*ARGSUSED*/
STATIC xfs_lsn_t
-xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
+xfs_qm_qoff_logitem_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
- return (lsn);
+ return lsn;
}
/*
* There isn't much you can do to push on an quotaoff item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
-/*ARGSUSED*/
STATIC void
-xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
+xfs_qm_qoff_logitem_push(
+ struct xfs_log_item *lip)
{
- return;
}
-/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_qm_qoffend_logitem_committed(
- xfs_qoff_logitem_t *qfe,
- xfs_lsn_t lsn)
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
- xfs_qoff_logitem_t *qfs;
- struct xfs_ail *ailp;
+ struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
+ struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
+ struct xfs_ail *ailp = qfs->qql_item.li_ailp;
- qfs = qfe->qql_start_lip;
- ailp = qfs->qql_item.li_ailp;
- spin_lock(&ailp->xa_lock);
/*
* Delete the qoff-start logitem from the AIL.
* xfs_trans_ail_delete() drops the AIL lock.
*/
+ spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
+
kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
@@ -515,71 +473,52 @@ xfs_qm_qoffend_logitem_committed(
* (truly makes the quotaoff irrevocable). If we do something else,
* then maybe we don't need two.
*/
-/* ARGSUSED */
-STATIC void
-xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
-{
- return;
-}
-
-/* ARGSUSED */
STATIC void
-xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
+xfs_qm_qoff_logitem_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t commit_lsn)
{
- return;
}
static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_qoff_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
- xfs_qm_qoff_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoffend_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoffend_logitem_committing
+ .iop_size = xfs_qm_qoff_logitem_size,
+ .iop_format = xfs_qm_qoff_logitem_format,
+ .iop_pin = xfs_qm_qoff_logitem_pin,
+ .iop_unpin = xfs_qm_qoff_logitem_unpin,
+ .iop_trylock = xfs_qm_qoff_logitem_trylock,
+ .iop_unlock = xfs_qm_qoff_logitem_unlock,
+ .iop_committed = xfs_qm_qoffend_logitem_committed,
+ .iop_push = xfs_qm_qoff_logitem_push,
+ .iop_committing = xfs_qm_qoff_logitem_committing
};
/*
* This is the ops vector shared by all quotaoff-start log items.
*/
static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_qm_qoff_logitem_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
- xfs_qm_qoff_logitem_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoff_logitem_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_qm_qoff_logitem_committing
+ .iop_size = xfs_qm_qoff_logitem_size,
+ .iop_format = xfs_qm_qoff_logitem_format,
+ .iop_pin = xfs_qm_qoff_logitem_pin,
+ .iop_unpin = xfs_qm_qoff_logitem_unpin,
+ .iop_trylock = xfs_qm_qoff_logitem_trylock,
+ .iop_unlock = xfs_qm_qoff_logitem_unlock,
+ .iop_committed = xfs_qm_qoff_logitem_committed,
+ .iop_push = xfs_qm_qoff_logitem_push,
+ .iop_committing = xfs_qm_qoff_logitem_committing
};
/*
* Allocate and initialize an quotaoff item of the correct quota type(s).
*/
-xfs_qoff_logitem_t *
+struct xfs_qoff_logitem *
xfs_qm_qoff_logitem_init(
- struct xfs_mount *mp,
- xfs_qoff_logitem_t *start,
- uint flags)
+ struct xfs_mount *mp,
+ struct xfs_qoff_logitem *start,
+ uint flags)
{
- xfs_qoff_logitem_t *qf;
+ struct xfs_qoff_logitem *qf;
- qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
+ qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
&xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
@@ -587,5 +526,5 @@ xfs_qm_qoff_logitem_init(
qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
qf->qql_format.qf_flags = flags;
qf->qql_start_lip = start;
- return (qf);
+ return qf;
}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 38e76414664..9a92407109a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -23,25 +23,18 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_bmap.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
@@ -69,7 +62,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
-STATIC int xfs_qm_shake(int, gfp_t);
+STATIC int xfs_qm_shake(struct shrinker *, int, gfp_t);
static struct shrinker xfs_qm_shaker = {
.shrink = xfs_qm_shake,
@@ -249,8 +242,10 @@ xfs_qm_hold_quotafs_ref(
if (!xfs_Gqm) {
xfs_Gqm = xfs_Gqm_init();
- if (!xfs_Gqm)
+ if (!xfs_Gqm) {
+ mutex_unlock(&xfs_Gqm_lock);
return ENOMEM;
+ }
}
/*
@@ -1495,7 +1490,7 @@ xfs_qm_dqiterate(
maxlblkcnt - lblkno,
XFS_BMAPI_METADATA,
NULL,
- 0, map, &nmaps, NULL, NULL);
+ 0, map, &nmaps, NULL);
xfs_iunlock(qip, XFS_ILOCK_SHARED);
if (error)
break;
@@ -1630,10 +1625,7 @@ xfs_qm_dqusage_adjust(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
- void *dip, /* on-disk inode pointer (not used) */
int *res) /* result code value */
{
xfs_inode_t *ip;
@@ -1658,7 +1650,7 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip))) {
*res = BULKSTAT_RV_NOTHING;
return error;
}
@@ -1670,7 +1662,8 @@ xfs_qm_dqusage_adjust(
* making us disable quotas for the file system.
*/
if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
- xfs_iput(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ IRELE(ip);
*res = BULKSTAT_RV_GIVEUP;
return error;
}
@@ -1683,7 +1676,8 @@ xfs_qm_dqusage_adjust(
* Walk thru the extent list and count the realtime blocks.
*/
if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
- xfs_iput(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ IRELE(ip);
if (udqp)
xfs_qm_dqput(udqp);
if (gdqp)
@@ -1794,12 +1788,13 @@ xfs_qm_quotacheck(
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters in core.
*/
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_dqusage_adjust, NULL,
- structsz, NULL, BULKSTAT_FG_IGET, &done)))
+ error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_dqusage_adjust,
+ structsz, NULL, &done);
+ if (error)
break;
- } while (! done);
+ } while (!done);
/*
* We've made all the changes that we need to make incore.
@@ -1887,14 +1882,14 @@ xfs_qm_init_quotainos(
mp->m_sb.sb_uquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_uquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0)))
+ 0, 0, &uip)))
return XFS_ERROR(error);
}
if (XFS_IS_OQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0))) {
+ 0, 0, &gip))) {
if (uip)
IRELE(uip);
return XFS_ERROR(error);
@@ -2117,7 +2112,10 @@ xfs_qm_shake_freelist(
*/
/* ARGSUSED */
STATIC int
-xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
+xfs_qm_shake(
+ struct shrinker *shrink,
+ int nr_to_scan,
+ gfp_t gfp_mask)
{
int ndqused, nfree, n;
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index 97b410c1279..bea02d786c5 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
-#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_qm.h"
diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c
index 3d1fc79532e..8671a0b3264 100644
--- a/fs/xfs/quota/xfs_qm_stats.c
+++ b/fs/xfs/quota/xfs_qm_stats.c
@@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
-#include "xfs_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_qm.h"
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 92b002f1805..45e5849df23 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -26,25 +26,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
-#include "xfs_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
@@ -248,40 +238,74 @@ out_unlock:
return error;
}
+STATIC int
+xfs_qm_scall_trunc_qfile(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ struct xfs_inode *ip;
+ struct xfs_trans *tp;
+ int error;
+
+ if (ino == NULLFSINO)
+ return 0;
+
+ error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
+ error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_ITRUNCATE_LOG_COUNT);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ goto out_put;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip);
+
+ error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
+ if (error) {
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
+ XFS_TRANS_ABORT);
+ goto out_unlock;
+ }
+
+ xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+out_put:
+ IRELE(ip);
+ return error;
+}
+
int
xfs_qm_scall_trunc_qfiles(
xfs_mount_t *mp,
uint flags)
{
int error = 0, error2 = 0;
- xfs_inode_t *qip;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
return XFS_ERROR(EINVAL);
}
- if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
- error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
- if (!error) {
- error = xfs_truncate_file(mp, qip);
- IRELE(qip);
- }
- }
-
- if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
- mp->m_sb.sb_gquotino != NULLFSINO) {
- error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
- if (!error2) {
- error2 = xfs_truncate_file(mp, qip);
- IRELE(qip);
- }
- }
+ if (flags & XFS_DQ_USER)
+ error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
+ if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
+ error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
return error ? error : error2;
}
-
/*
* Switch on (a given) quota enforcement for a filesystem. This takes
* effect immediately.
@@ -417,12 +441,12 @@ xfs_qm_scall_getqstat(
}
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
- 0, 0, &uip, 0) == 0)
+ 0, 0, &uip) == 0)
tempuqip = B_TRUE;
}
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
- 0, 0, &gip, 0) == 0)
+ 0, 0, &gip) == 0)
tempgqip = B_TRUE;
}
if (uip) {
@@ -786,9 +810,9 @@ xfs_qm_export_dquot(
}
#ifdef DEBUG
- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == XFS_USER_QUOTA) ||
+ if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
(XFS_IS_OQUOTA_ENFORCED(mp) &&
- (dst->d_flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)))) &&
+ (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
dst->d_id != 0) {
if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
(dst->d_blk_softlimit > 0)) {
@@ -809,17 +833,17 @@ xfs_qm_export_qtype_flags(
/*
* Can't be more than one, or none.
*/
- ASSERT((flags & (XFS_PROJ_QUOTA | XFS_USER_QUOTA)) !=
- (XFS_PROJ_QUOTA | XFS_USER_QUOTA));
- ASSERT((flags & (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA)) !=
- (XFS_PROJ_QUOTA | XFS_GROUP_QUOTA));
- ASSERT((flags & (XFS_USER_QUOTA | XFS_GROUP_QUOTA)) !=
- (XFS_USER_QUOTA | XFS_GROUP_QUOTA));
- ASSERT((flags & (XFS_PROJ_QUOTA|XFS_USER_QUOTA|XFS_GROUP_QUOTA)) != 0);
+ ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
+ (FS_PROJ_QUOTA | FS_USER_QUOTA));
+ ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
+ (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
+ ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
+ (FS_USER_QUOTA | FS_GROUP_QUOTA));
+ ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
return (flags & XFS_DQ_USER) ?
- XFS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
- XFS_PROJ_QUOTA : XFS_GROUP_QUOTA;
+ FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
+ FS_PROJ_QUOTA : FS_GROUP_QUOTA;
}
STATIC uint
@@ -830,16 +854,16 @@ xfs_qm_export_flags(
uflags = 0;
if (flags & XFS_UQUOTA_ACCT)
- uflags |= XFS_QUOTA_UDQ_ACCT;
+ uflags |= FS_QUOTA_UDQ_ACCT;
if (flags & XFS_PQUOTA_ACCT)
- uflags |= XFS_QUOTA_PDQ_ACCT;
+ uflags |= FS_QUOTA_PDQ_ACCT;
if (flags & XFS_GQUOTA_ACCT)
- uflags |= XFS_QUOTA_GDQ_ACCT;
+ uflags |= FS_QUOTA_GDQ_ACCT;
if (flags & XFS_UQUOTA_ENFD)
- uflags |= XFS_QUOTA_UDQ_ENFD;
+ uflags |= FS_QUOTA_UDQ_ENFD;
if (flags & (XFS_OQUOTA_ENFD)) {
uflags |= (flags & XFS_GQUOTA_ACCT) ?
- XFS_QUOTA_GDQ_ENFD : XFS_QUOTA_PDQ_ENFD;
+ FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
}
return (uflags);
}
@@ -875,8 +899,9 @@ xfs_dqrele_inode(
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
- xfs_iput(ip, XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ IRELE(ip);
return 0;
}
@@ -1109,10 +1134,7 @@ xfs_qm_internalqcheck_adjust(
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* not used */
int ubsize, /* not used */
- void *private_data, /* not used */
- xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
- void *dip, /* not used */
int *res) /* bulkstat result code */
{
xfs_inode_t *ip;
@@ -1134,7 +1156,7 @@ xfs_qm_internalqcheck_adjust(
ipreleased = B_FALSE;
again:
lock_flags = XFS_ILOCK_SHARED;
- if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) {
+ if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
*res = BULKSTAT_RV_NOTHING;
return (error);
}
@@ -1146,7 +1168,8 @@ xfs_qm_internalqcheck_adjust(
* of those now.
*/
if (! ipreleased) {
- xfs_iput(ip, lock_flags);
+ xfs_iunlock(ip, lock_flags);
+ IRELE(ip);
ipreleased = B_TRUE;
goto again;
}
@@ -1163,7 +1186,8 @@ xfs_qm_internalqcheck_adjust(
ASSERT(gd);
xfs_qm_internalqcheck_dqadjust(ip, gd);
}
- xfs_iput(ip, lock_flags);
+ xfs_iunlock(ip, lock_flags);
+ IRELE(ip);
*res = BULKSTAT_RV_DIDONE;
return (0);
}
@@ -1205,15 +1229,15 @@ xfs_qm_internalqcheck(
* Iterate thru all the inodes in the file system,
* adjusting the corresponding dquot counters
*/
- if ((error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_internalqcheck_adjust, NULL,
- 0, NULL, BULKSTAT_FG_IGET, &done))) {
+ error = xfs_bulkstat(mp, &lastino, &count,
+ xfs_qm_internalqcheck_adjust,
+ 0, NULL, &done);
+ if (error) {
+ cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
break;
}
- } while (! done);
- if (error) {
- cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error);
- }
+ } while (!done);
+
cmn_err(CE_DEBUG, "Checking results against system dquots");
for (i = 0; i < qmtest_hashmask; i++) {
xfs_dqtest_t *d, *n;
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 061d827da33..7de91d1b75c 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -23,25 +23,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
-#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
@@ -59,16 +49,14 @@ xfs_trans_dqjoin(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
- xfs_dq_logitem_t *lp = &dqp->q_logitem;
-
ASSERT(dqp->q_transp != tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- ASSERT(lp->qli_dquot == dqp);
+ ASSERT(dqp->q_logitem.qli_dquot == dqp);
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
+ xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
/*
* Initialize i_transp so we can later determine if this dquot is
@@ -93,16 +81,11 @@ xfs_trans_log_dquot(
xfs_trans_t *tp,
xfs_dquot_t *dqp)
{
- xfs_log_item_desc_t *lidp;
-
ASSERT(dqp->q_transp == tp);
ASSERT(XFS_DQ_IS_LOCKED(dqp));
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
}
/*
@@ -874,9 +857,8 @@ xfs_trans_get_qoff_item(
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
-
- return (q);
+ xfs_trans_add_item(tp, &q->qql_item);
+ return q;
}
@@ -890,13 +872,8 @@ xfs_trans_log_quotaoff_item(
xfs_trans_t *tp,
xfs_qoff_logitem_t *qlp)
{
- xfs_log_item_desc_t *lidp;
-
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY;
}
STATIC void
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 3f3610a7ee0..975aa10e1a4 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -22,7 +22,6 @@
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 401f364ad36..4917d4eed4e 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,6 @@ typedef struct xfs_perag {
atomic_t pagf_fstrms; /* # of filestreams active in this AG */
- int pag_ici_init; /* incore inode cache initialised */
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
int pag_ici_reclaimable; /* reclaimable inodes */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a7fbe8a99b1..af168faccc7 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -24,18 +24,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -688,8 +683,6 @@ xfs_alloc_ag_vextent_near(
xfs_agblock_t ltbno; /* start bno of left side entry */
xfs_agblock_t ltbnoa; /* aligned ... */
xfs_extlen_t ltdiff; /* difference to left side entry */
- /*REFERENCED*/
- xfs_agblock_t ltend; /* end bno of left side entry */
xfs_extlen_t ltlen; /* length of left side entry */
xfs_extlen_t ltlena; /* aligned ... */
xfs_agblock_t ltnew; /* useful start bno of left side */
@@ -814,8 +807,7 @@ xfs_alloc_ag_vextent_near(
if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
- ltend = ltbno + ltlen;
- ASSERT(ltend <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
@@ -828,7 +820,7 @@ xfs_alloc_ag_vextent_near(
*/
args->agbno = bnew;
ASSERT(bnew >= ltbno);
- ASSERT(bnew + blen <= ltend);
+ ASSERT(bnew + blen <= ltbno + ltlen);
/*
* Set up a cursor for the by-bno tree.
*/
@@ -1157,7 +1149,6 @@ xfs_alloc_ag_vextent_near(
/*
* Fix up the length and compute the useful address.
*/
- ltend = ltbno + ltlen;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
@@ -1170,7 +1161,7 @@ xfs_alloc_ag_vextent_near(
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, ltbno,
ltlen, &ltnew);
ASSERT(ltnew >= ltbno);
- ASSERT(ltnew + rlen <= ltend);
+ ASSERT(ltnew + rlen <= ltbno + ltlen);
ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->agbno = ltnew;
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 6d05199b667..895009a9727 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -27,16 +27,16 @@ struct xfs_busy_extent;
/*
* Freespace allocation types. Argument to xfs_alloc_[v]extent.
*/
-typedef enum xfs_alloctype
-{
- XFS_ALLOCTYPE_ANY_AG, /* allocate anywhere, use rotor */
- XFS_ALLOCTYPE_FIRST_AG, /* ... start at ag 0 */
- XFS_ALLOCTYPE_START_AG, /* anywhere, start in this a.g. */
- XFS_ALLOCTYPE_THIS_AG, /* anywhere in this a.g. */
- XFS_ALLOCTYPE_START_BNO, /* near this block else anywhere */
- XFS_ALLOCTYPE_NEAR_BNO, /* in this a.g. and near this block */
- XFS_ALLOCTYPE_THIS_BNO /* at exactly this block */
-} xfs_alloctype_t;
+#define XFS_ALLOCTYPE_ANY_AG 0x01 /* allocate anywhere, use rotor */
+#define XFS_ALLOCTYPE_FIRST_AG 0x02 /* ... start at ag 0 */
+#define XFS_ALLOCTYPE_START_AG 0x04 /* anywhere, start in this a.g. */
+#define XFS_ALLOCTYPE_THIS_AG 0x08 /* anywhere in this a.g. */
+#define XFS_ALLOCTYPE_START_BNO 0x10 /* near this block else anywhere */
+#define XFS_ALLOCTYPE_NEAR_BNO 0x20 /* in this a.g. and near this block */
+#define XFS_ALLOCTYPE_THIS_BNO 0x40 /* at exactly this block */
+
+/* this should become an enum again when the tracing code is fixed */
+typedef unsigned int xfs_alloctype_t;
#define XFS_ALLOC_TYPES \
{ XFS_ALLOCTYPE_ANY_AG, "ANY_AG" }, \
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 83f49421875..97f7328967f 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -24,19 +24,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index b9c196a53c4..c2568242a90 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -25,19 +25,13 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
@@ -325,8 +319,7 @@ xfs_attr_set_int(
return (error);
}
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
+ xfs_trans_ijoin(args.trans, dp);
/*
* If the attribute list is non-existent or a shortform list,
@@ -396,10 +389,8 @@ xfs_attr_set_int(
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args.trans, dp);
/*
* Commit the leaf transformation. We'll need another (linked)
@@ -544,8 +535,7 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
* No need to make quota reservations here. We expect to release some
* blocks not allocate in the common case.
*/
- xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args.trans, dp);
+ xfs_trans_ijoin(args.trans, dp);
/*
* Decide on what work routines to call based on the inode size.
@@ -821,8 +811,7 @@ xfs_attr_inactive(xfs_inode_t *dp)
* No need to make quota reservations here. We expect to release some
* blocks, not allocate, in the common case.
*/
- xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(trans, dp);
+ xfs_trans_ijoin(trans, dp);
/*
* Decide on what work routines to call based on the inode size.
@@ -981,10 +970,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
/*
* Commit the current trans (including the inode) and start
@@ -1085,10 +1072,8 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_buf_done(bp);
@@ -1161,10 +1146,8 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_buf_done(bp);
return(0);
@@ -1317,10 +1300,8 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
/*
* Commit the node conversion and start the next
@@ -1356,10 +1337,8 @@ restart:
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
} else {
/*
* Addition succeeded, update Btree hashvals.
@@ -1470,10 +1449,8 @@ restart:
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
}
/*
@@ -1604,10 +1581,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
/*
* Commit the Btree join operation and start a new trans.
@@ -1658,10 +1633,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* and started a new one. We need the inode to be
* in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
} else
xfs_da_brelse(args->trans, bp);
}
@@ -2004,7 +1977,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
error = xfs_bmapi(args->trans, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, map, &nmap, NULL, NULL);
+ NULL, 0, map, &nmap, NULL);
if (error)
return(error);
ASSERT(nmap >= 1);
@@ -2083,7 +2056,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA |
XFS_BMAPI_WRITE,
args->firstblock, args->total, &map, &nmap,
- args->flist, NULL);
+ args->flist);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@@ -2099,10 +2072,8 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, dp);
ASSERT(nmap == 1);
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
@@ -2136,7 +2107,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
- NULL, NULL);
+ NULL);
if (error) {
return(error);
}
@@ -2201,7 +2172,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
args->rmtblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
args->firstblock, 0, &map, &nmap,
- args->flist, NULL);
+ args->flist);
if (error) {
return(error);
}
@@ -2239,7 +2210,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
1, args->firstblock, args->flist,
- NULL, &done);
+ &done);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
&committed);
@@ -2255,10 +2226,8 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
* bmap_finish() may have committed the last trans and started
* a new one. We need the inode to be in all transactions.
*/
- if (committed) {
- xfs_trans_ijoin(args->trans, args->dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(args->trans, args->dp);
- }
+ if (committed)
+ xfs_trans_ijoin(args->trans, args->dp);
/*
* Close out trans and start the next one in the chain.
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a90ce74fc25..a6cff8edcdb 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -24,8 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
@@ -33,7 +31,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -2931,7 +2928,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
nmap = 1;
error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
- NULL, 0, &map, &nmap, NULL, NULL);
+ NULL, 0, &map, &nmap, NULL);
if (error) {
return(error);
}
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 99587ded043..23f14e595c1 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -30,13 +30,10 @@
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
@@ -104,7 +101,6 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@@ -122,7 +118,6 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@@ -135,7 +130,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd); /* OK to allocate reserved blocks */
/*
@@ -149,7 +143,6 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork); /* data or attr fork */
/*
@@ -162,8 +155,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta); /* Change made to incore extents */
+ int *logflagsp); /* inode logging flags */
/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
@@ -200,7 +192,6 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp,/* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd); /* OK to allocate reserved blocks */
@@ -489,7 +480,6 @@ xfs_bmap_add_extent(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to use reserved data blocks */
{
@@ -524,15 +514,6 @@ xfs_bmap_add_extent(
logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else
logflags = 0;
- /* DELTA: single new extent */
- if (delta) {
- if (delta->xed_startoff > new->br_startoff)
- delta->xed_startoff = new->br_startoff;
- if (delta->xed_blockcount <
- new->br_startoff + new->br_blockcount)
- delta->xed_blockcount = new->br_startoff +
- new->br_blockcount;
- }
}
/*
* Any kind of new delayed allocation goes here.
@@ -542,7 +523,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
- &logflags, delta, rsvd)))
+ &logflags, rsvd)))
goto done;
}
/*
@@ -553,7 +534,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
- &logflags, delta, whichfork)))
+ &logflags, whichfork)))
goto done;
} else {
xfs_bmbt_irec_t prev; /* old extent at offset idx */
@@ -578,17 +559,17 @@ xfs_bmap_add_extent(
XFS_BTCUR_BPRV_WASDEL);
if ((error = xfs_bmap_add_extent_delay_real(ip,
idx, &cur, new, &da_new, first, flist,
- &logflags, delta, rsvd)))
+ &logflags, rsvd)))
goto done;
} else if (new->br_state == XFS_EXT_NORM) {
ASSERT(new->br_state == XFS_EXT_NORM);
if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags, delta)))
+ ip, idx, &cur, new, &logflags)))
goto done;
} else {
ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
if ((error = xfs_bmap_add_extent_unwritten_real(
- ip, idx, &cur, new, &logflags, delta)))
+ ip, idx, &cur, new, &logflags)))
goto done;
}
ASSERT(*curp == cur || *curp == NULL);
@@ -601,7 +582,7 @@ xfs_bmap_add_extent(
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
- new, &logflags, delta, whichfork)))
+ new, &logflags, whichfork)))
goto done;
}
}
@@ -666,7 +647,6 @@ xfs_bmap_add_extent_delay_real(
xfs_fsblock_t *first, /* pointer to firstblock variable */
xfs_bmap_free_t *flist, /* list of extents to be freed */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to use reserved data block allocation */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -797,11 +777,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
- /* DELTA: Three in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -832,10 +807,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
- /* DELTA: Two in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -867,10 +838,6 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
*dnew = 0;
- /* DELTA: Two in-core extents are replaced by one. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -900,9 +867,6 @@ xfs_bmap_add_extent_delay_real(
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
*dnew = 0;
- /* DELTA: The in-core extent described by new changed type. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -942,10 +906,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
- /* DELTA: The boundary between two in-core extents moved. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING:
@@ -990,9 +950,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
*dnew = temp;
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1031,10 +988,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
- /* DELTA: The boundary between two in-core extents moved. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_RIGHT_FILLING:
@@ -1078,9 +1031,6 @@ xfs_bmap_add_extent_delay_real(
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
*dnew = temp;
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case 0:
@@ -1161,9 +1111,6 @@ xfs_bmap_add_extent_delay_real(
nullstartblock((int)temp2));
trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
*dnew = temp + temp2;
- /* DELTA: One in-core extent is split in three. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1179,13 +1126,6 @@ xfs_bmap_add_extent_delay_real(
ASSERT(0);
}
*curp = cur;
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
- }
done:
*logflagsp = rval;
return error;
@@ -1204,8 +1144,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
- int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta) /* Change made to incore extents */
+ int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
@@ -1219,8 +1158,6 @@ xfs_bmap_add_extent_unwritten_real(
/* left is 0, right is 1, prev is 2 */
int rval=0; /* return value (logging flags) */
int state = 0;/* state bits, accessed thru macros */
- xfs_filblks_t temp=0;
- xfs_filblks_t temp2=0;
#define LEFT r[0]
#define RIGHT r[1]
@@ -1341,11 +1278,6 @@ xfs_bmap_add_extent_unwritten_real(
RIGHT.br_blockcount, LEFT.br_state)))
goto done;
}
- /* DELTA: Three in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
@@ -1382,10 +1314,6 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state)))
goto done;
}
- /* DELTA: Two in-core extents are replaced by one. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1422,10 +1350,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
- /* DELTA: Two in-core extents are replaced by one. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
@@ -1453,9 +1377,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
- /* DELTA: The in-core extent described by new changed type. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
@@ -1501,10 +1422,6 @@ xfs_bmap_add_extent_unwritten_real(
LEFT.br_state))
goto done;
}
- /* DELTA: The boundary between two in-core extents moved. */
- temp = LEFT.br_startoff;
- temp2 = LEFT.br_blockcount +
- PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING:
@@ -1544,9 +1461,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -1587,10 +1501,6 @@ xfs_bmap_add_extent_unwritten_real(
newext)))
goto done;
}
- /* DELTA: The boundary between two in-core extents moved. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount +
- RIGHT.br_blockcount;
break;
case BMAP_RIGHT_FILLING:
@@ -1630,9 +1540,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in two. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case 0:
@@ -1692,9 +1599,6 @@ xfs_bmap_add_extent_unwritten_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: One in-core extent is split in three. */
- temp = PREV.br_startoff;
- temp2 = PREV.br_blockcount;
break;
case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
@@ -1710,13 +1614,6 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(0);
}
*curp = cur;
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
- }
done:
*logflagsp = rval;
return error;
@@ -1736,7 +1633,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_extnum_t idx, /* extent number to update/insert */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int rsvd) /* OK to allocate reserved blocks */
{
xfs_bmbt_rec_host_t *ep; /* extent record for idx */
@@ -1747,7 +1643,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int state; /* state bits, accessed thru macros */
xfs_filblks_t temp=0; /* temp for indirect calculations */
- xfs_filblks_t temp2=0;
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ep = xfs_iext_get_ext(ifp, idx);
@@ -1819,9 +1714,6 @@ xfs_bmap_add_extent_hole_delay(
xfs_iext_remove(ip, idx, 1, state);
ip->i_df.if_lastex = idx - 1;
- /* DELTA: Two in-core extents were replaced by one. */
- temp2 = temp;
- temp = left.br_startoff;
break;
case BMAP_LEFT_CONTIG:
@@ -1841,9 +1733,6 @@ xfs_bmap_add_extent_hole_delay(
trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
ip->i_df.if_lastex = idx - 1;
- /* DELTA: One in-core extent grew into a hole. */
- temp2 = temp;
- temp = left.br_startoff;
break;
case BMAP_RIGHT_CONTIG:
@@ -1862,9 +1751,6 @@ xfs_bmap_add_extent_hole_delay(
trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
ip->i_df.if_lastex = idx;
- /* DELTA: One in-core extent grew into a hole. */
- temp2 = temp;
- temp = new->br_startoff;
break;
case 0:
@@ -1876,9 +1762,6 @@ xfs_bmap_add_extent_hole_delay(
oldlen = newlen = 0;
xfs_iext_insert(ip, idx, 1, new, state);
ip->i_df.if_lastex = idx;
- /* DELTA: A new in-core extent was added in a hole. */
- temp2 = new->br_blockcount;
- temp = new->br_startoff;
break;
}
if (oldlen != newlen) {
@@ -1889,13 +1772,6 @@ xfs_bmap_add_extent_hole_delay(
* Nothing to do for disk quota accounting here.
*/
}
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
- }
*logflagsp = 0;
return 0;
}
@@ -1911,7 +1787,6 @@ xfs_bmap_add_extent_hole_real(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork) /* data or attr fork */
{
xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */
@@ -1922,8 +1797,6 @@ xfs_bmap_add_extent_hole_real(
xfs_bmbt_irec_t right; /* right neighbor extent entry */
int rval=0; /* return value (logging flags) */
int state; /* state bits, accessed thru macros */
- xfs_filblks_t temp=0;
- xfs_filblks_t temp2=0;
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
@@ -2020,11 +1893,6 @@ xfs_bmap_add_extent_hole_real(
left.br_state)))
goto done;
}
- /* DELTA: Two in-core extents were replaced by one. */
- temp = left.br_startoff;
- temp2 = left.br_blockcount +
- new->br_blockcount +
- right.br_blockcount;
break;
case BMAP_LEFT_CONTIG:
@@ -2056,10 +1924,6 @@ xfs_bmap_add_extent_hole_real(
left.br_state)))
goto done;
}
- /* DELTA: One in-core extent grew. */
- temp = left.br_startoff;
- temp2 = left.br_blockcount +
- new->br_blockcount;
break;
case BMAP_RIGHT_CONTIG:
@@ -2092,10 +1956,6 @@ xfs_bmap_add_extent_hole_real(
right.br_state)))
goto done;
}
- /* DELTA: One in-core extent grew. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount +
- right.br_blockcount;
break;
case 0:
@@ -2123,18 +1983,8 @@ xfs_bmap_add_extent_hole_real(
goto done;
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
}
- /* DELTA: A new extent was added in a hole. */
- temp = new->br_startoff;
- temp2 = new->br_blockcount;
break;
}
- if (delta) {
- temp2 += temp;
- if (delta->xed_startoff > temp)
- delta->xed_startoff = temp;
- if (delta->xed_blockcount < temp2)
- delta->xed_blockcount = temp2;
- }
done:
*logflagsp = rval;
return error;
@@ -2959,7 +2809,6 @@ xfs_bmap_del_extent(
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
- xfs_extdelta_t *delta, /* Change made to incore extents */
int whichfork, /* data or attr fork */
int rsvd) /* OK to allocate reserved blocks */
{
@@ -3265,14 +3114,6 @@ xfs_bmap_del_extent(
if (da_old > da_new)
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
rsvd);
- if (delta) {
- /* DELTA: report the original extent. */
- if (delta->xed_startoff > got.br_startoff)
- delta->xed_startoff = got.br_startoff;
- if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
- delta->xed_blockcount = got.br_startoff +
- got.br_blockcount;
- }
done:
*logflagsp = flags;
return error;
@@ -3754,9 +3595,10 @@ xfs_bmap_add_attrfork(
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
}
ASSERT(ip->i_d.di_anextents == 0);
- IHOLD(ip);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_DEV:
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
@@ -4483,8 +4325,7 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
xfs_bmbt_irec_t *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta) /* o: change made to incore extents */
+ xfs_bmap_free_t *flist) /* i/o: list extents to free */
{
xfs_fsblock_t abno; /* allocated block number */
xfs_extlen_t alen; /* allocated extent length */
@@ -4596,10 +4437,7 @@ xfs_bmapi(
end = bno + len;
obno = bno;
bma.ip = NULL;
- if (delta) {
- delta->xed_startoff = NULLFILEOFF;
- delta->xed_blockcount = 0;
- }
+
while (bno < end && n < *nmap) {
/*
* Reading past eof, act as though there's a hole
@@ -4620,19 +4458,13 @@ xfs_bmapi(
* allocate the stuff asked for in this bmap call
* but that wouldn't be as good.
*/
- if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
+ if (wasdelay) {
alen = (xfs_extlen_t)got.br_blockcount;
aoff = got.br_startoff;
if (lastx != NULLEXTNUM && lastx) {
ep = xfs_iext_get_ext(ifp, lastx - 1);
xfs_bmbt_get_all(ep, &prev);
}
- } else if (wasdelay) {
- alen = (xfs_extlen_t)
- XFS_FILBLKS_MIN(len,
- (got.br_startoff +
- got.br_blockcount) - bno);
- aoff = bno;
} else {
alen = (xfs_extlen_t)
XFS_FILBLKS_MIN(len, MAXEXTLEN);
@@ -4831,7 +4663,7 @@ xfs_bmapi(
got.br_state = XFS_EXT_UNWRITTEN;
}
error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
- firstblock, flist, &tmp_logflags, delta,
+ firstblock, flist, &tmp_logflags,
whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
@@ -4927,7 +4759,7 @@ xfs_bmapi(
}
mval->br_state = XFS_EXT_NORM;
error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
- firstblock, flist, &tmp_logflags, delta,
+ firstblock, flist, &tmp_logflags,
whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
logflags |= tmp_logflags;
if (error)
@@ -5017,14 +4849,6 @@ xfs_bmapi(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
error = 0;
- if (delta && delta->xed_startoff != NULLFILEOFF) {
- /* A change was actually made.
- * Note that delta->xed_blockount is an offset at this
- * point and needs to be converted to a block count.
- */
- ASSERT(delta->xed_blockcount > delta->xed_startoff);
- delta->xed_blockcount -= delta->xed_startoff;
- }
error0:
/*
* Log everything. Do this after conversion, there's no point in
@@ -5136,8 +4960,6 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta, /* o: change made to incore
- extents */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5196,10 +5018,7 @@ xfs_bunmapi(
bno = start + len - 1;
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
&prev);
- if (delta) {
- delta->xed_startoff = NULLFILEOFF;
- delta->xed_blockcount = 0;
- }
+
/*
* Check to see if the given block number is past the end of the
* file, back up to the last block if so...
@@ -5297,7 +5116,7 @@ xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
- firstblock, flist, &logflags, delta,
+ firstblock, flist, &logflags,
XFS_DATA_FORK, 0);
if (error)
goto error0;
@@ -5352,7 +5171,7 @@ xfs_bunmapi(
prev.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
&prev, firstblock, flist, &logflags,
- delta, XFS_DATA_FORK, 0);
+ XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@@ -5361,7 +5180,7 @@ xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent(ip, lastx, &cur,
&del, firstblock, flist, &logflags,
- delta, XFS_DATA_FORK, 0);
+ XFS_DATA_FORK, 0);
if (error)
goto error0;
goto nodelete;
@@ -5414,7 +5233,7 @@ xfs_bunmapi(
goto error0;
}
error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
- &tmp_logflags, delta, whichfork, rsvd);
+ &tmp_logflags, whichfork, rsvd);
logflags |= tmp_logflags;
if (error)
goto error0;
@@ -5471,14 +5290,6 @@ nodelete:
ASSERT(ifp->if_ext_max ==
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
error = 0;
- if (delta && delta->xed_startoff != NULLFILEOFF) {
- /* A change was actually made.
- * Note that delta->xed_blockount is an offset at this
- * point and needs to be converted to a block count.
- */
- ASSERT(delta->xed_blockcount > delta->xed_startoff);
- delta->xed_blockcount -= delta->xed_startoff;
- }
error0:
/*
* Log everything. Do this after conversion, there's no point in
@@ -5605,28 +5416,6 @@ xfs_getbmap(
prealloced = 0;
fixlen = 1LL << 32;
} else {
- /*
- * If the BMV_IF_NO_DMAPI_READ interface bit specified, do
- * not generate a DMAPI read event. Otherwise, if the
- * DM_EVENT_READ bit is set for the file, generate a read
- * event in order that the DMAPI application may do its thing
- * before we return the extents. Usually this means restoring
- * user file data to regions of the file that look like holes.
- *
- * The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
- * BMV_IF_NO_DMAPI_READ so that read events are generated.
- * If this were not true, callers of ioctl(XFS_IOC_GETBMAP)
- * could misinterpret holes in a DMAPI file as true holes,
- * when in fact they may represent offline user data.
- */
- if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) &&
- !(iflags & BMV_IF_NO_DMAPI_READ)) {
- error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip,
- 0, 0, 0, NULL);
- if (error)
- return XFS_ERROR(error);
- }
-
if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
@@ -5713,7 +5502,7 @@ xfs_getbmap(
error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
bmapi_flags, NULL, 0, map, &nmap,
- NULL, NULL);
+ NULL);
if (error)
goto out_free_map;
ASSERT(nmap <= subnex);
@@ -5859,66 +5648,34 @@ xfs_bmap_eof(
}
#ifdef DEBUG
-STATIC
-xfs_buf_t *
+STATIC struct xfs_buf *
xfs_bmap_get_bp(
- xfs_btree_cur_t *cur,
+ struct xfs_btree_cur *cur,
xfs_fsblock_t bno)
{
- int i;
- xfs_buf_t *bp;
+ struct xfs_log_item_desc *lidp;
+ int i;
if (!cur)
- return(NULL);
-
- bp = NULL;
- for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
- bp = cur->bc_bufs[i];
- if (!bp) break;
- if (XFS_BUF_ADDR(bp) == bno)
- break; /* Found it */
- }
- if (i == XFS_BTREE_MAXLEVELS)
- bp = NULL;
-
- if (!bp) { /* Chase down all the log items to see if the bp is there */
- xfs_log_item_chunk_t *licp;
- xfs_trans_t *tp;
-
- tp = cur->bc_tp;
- licp = &tp->t_items;
- while (!bp && licp != NULL) {
- if (xfs_lic_are_all_free(licp)) {
- licp = licp->lic_next;
- continue;
- }
- for (i = 0; i < licp->lic_unused; i++) {
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- xfs_buf_log_item_t *bip;
- xfs_buf_t *lbp;
-
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lidp = xfs_lic_slot(licp, i);
- lip = lidp->lid_item;
- if (lip->li_type != XFS_LI_BUF)
- continue;
+ return NULL;
- bip = (xfs_buf_log_item_t *)lip;
- lbp = bip->bli_buf;
+ for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
+ if (!cur->bc_bufs[i])
+ break;
+ if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
+ return cur->bc_bufs[i];
+ }
- if (XFS_BUF_ADDR(lbp) == bno) {
- bp = lbp;
- break; /* Found it */
- }
- }
- licp = licp->lic_next;
- }
+ /* Chase down all the log items to see if the bp is there */
+ list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
+ struct xfs_buf_log_item *bip;
+ bip = (struct xfs_buf_log_item *)lidp->lid_item;
+ if (bip->bli_item.li_type == XFS_LI_BUF &&
+ XFS_BUF_ADDR(bip->bli_buf) == bno)
+ return bip->bli_buf;
}
- return(bp);
+
+ return NULL;
}
STATIC void
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 419dafb9d87..b13569a6179 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -28,20 +28,6 @@ struct xfs_trans;
extern kmem_zone_t *xfs_bmap_free_item_zone;
/*
- * DELTA: describe a change to the in-core extent list.
- *
- * Internally the use of xed_blockount is somewhat funky.
- * xed_blockcount contains an offset much of the time because this
- * makes merging changes easier. (xfs_fileoff_t and xfs_filblks_t are
- * the same underlying type).
- */
-typedef struct xfs_extdelta
-{
- xfs_fileoff_t xed_startoff; /* offset of range */
- xfs_filblks_t xed_blockcount; /* blocks in range */
-} xfs_extdelta_t;
-
-/*
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
@@ -82,16 +68,13 @@ typedef struct xfs_bmap_free
#define XFS_BMAPI_DELAY 0x002 /* delayed write operation */
#define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */
#define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */
-#define XFS_BMAPI_EXACT 0x010 /* allocate only to spec'd bounds */
-#define XFS_BMAPI_ATTRFORK 0x020 /* use attribute fork not data */
-#define XFS_BMAPI_ASYNC 0x040 /* bunmapi xactions can be async */
-#define XFS_BMAPI_RSVBLOCKS 0x080 /* OK to alloc. reserved data blocks */
-#define XFS_BMAPI_PREALLOC 0x100 /* preallocation op: unwritten space */
-#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
+#define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */
+#define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */
+#define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */
+#define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */
/* combine contig. space */
-#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
-/* XFS_BMAPI_DIRECT_IO 0x800 */
-#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
+#define XFS_BMAPI_CONTIG 0x100 /* must allocate only one extent */
+#define XFS_BMAPI_CONVERT 0x200 /* unwritten extent conversion - */
/* need write cache flushing and no */
/* additional allocation alignments */
@@ -100,9 +83,7 @@ typedef struct xfs_bmap_free
{ XFS_BMAPI_DELAY, "DELAY" }, \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
- { XFS_BMAPI_EXACT, "EXACT" }, \
{ XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \
- { XFS_BMAPI_ASYNC, "ASYNC" }, \
{ XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \
{ XFS_BMAPI_PREALLOC, "PREALLOC" }, \
{ XFS_BMAPI_IGSTATE, "IGSTATE" }, \
@@ -310,9 +291,7 @@ xfs_bmapi(
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta); /* o: change made to incore
- extents */
+ xfs_bmap_free_t *flist); /* i/o: list extents to free */
/*
* Map file blocks to filesystem blocks, simple version.
@@ -346,8 +325,6 @@ xfs_bunmapi(
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
xfs_bmap_free_t *flist, /* i/o: list extents to free */
- xfs_extdelta_t *delta, /* o: change made to incore
- extents */
int *done); /* set if not done yet */
/*
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 416e47e54b8..87d3c10b695 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -24,21 +24,16 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 96be4b0f249..829af92f0fb 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -24,20 +24,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
-#include "xfs_ialloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 02a80984aa0..1b09d7a280d 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
@@ -34,6 +33,12 @@
kmem_zone_t *xfs_buf_item_zone;
+static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_buf_log_item, bli_item);
+}
+
+
#ifdef XFS_TRANS_DEBUG
/*
* This function uses an alternate strategy for tracking the bytes
@@ -151,12 +156,13 @@ STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
*/
STATIC uint
xfs_buf_item_size(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- uint nvecs;
- int next_bit;
- int last_bit;
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ uint nvecs;
+ int next_bit;
+ int last_bit;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
@@ -170,7 +176,6 @@ xfs_buf_item_size(
return 1;
}
- bp = bip->bli_buf;
ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
nvecs = 1;
last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
@@ -219,13 +224,13 @@ xfs_buf_item_size(
*/
STATIC void
xfs_buf_item_format(
- xfs_buf_log_item_t *bip,
- xfs_log_iovec_t *log_vector)
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *vecp)
{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
uint base_size;
uint nvecs;
- xfs_log_iovec_t *vecp;
- xfs_buf_t *bp;
int first_bit;
int last_bit;
int next_bit;
@@ -235,8 +240,6 @@ xfs_buf_item_format(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
- bp = bip->bli_buf;
- vecp = log_vector;
/*
* The size of the base structure is the size of the
@@ -248,7 +251,7 @@ xfs_buf_item_format(
base_size =
(uint)(sizeof(xfs_buf_log_format_t) +
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
- vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
+ vecp->i_addr = &bip->bli_format;
vecp->i_len = base_size;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
@@ -263,7 +266,7 @@ xfs_buf_item_format(
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
- xfs_log_item_in_current_chkpt(&bip->bli_item)))
+ xfs_log_item_in_current_chkpt(lip)))
bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
bip->bli_flags &= ~XFS_BLI_INODE_BUF;
}
@@ -356,66 +359,90 @@ xfs_buf_item_format(
/*
* This is called to pin the buffer associated with the buf log item in memory
- * so it cannot be written out. Simply call bpin() on the buffer to do this.
+ * so it cannot be written out.
*
* We also always take a reference to the buffer log item here so that the bli
* is held while the item is pinned in memory. This means that we can
* unconditionally drop the reference count a transaction holds when the
* transaction is completed.
*/
-
STATIC void
xfs_buf_item_pin(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- bp = bip->bli_buf;
- ASSERT(XFS_BUF_ISBUSY(bp));
+ ASSERT(XFS_BUF_ISBUSY(bip->bli_buf));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
- atomic_inc(&bip->bli_refcount);
+
trace_xfs_buf_item_pin(bip);
- xfs_bpin(bp);
-}
+ atomic_inc(&bip->bli_refcount);
+ atomic_inc(&bip->bli_buf->b_pin_count);
+}
/*
* This is called to unpin the buffer associated with the buf log
* item which was previously pinned with a call to xfs_buf_item_pin().
- * Just call bunpin() on the buffer to do this.
*
* Also drop the reference to the buf item for the current transaction.
* If the XFS_BLI_STALE flag is set and we are the last reference,
* then free up the buf log item and unlock the buffer.
+ *
+ * If the remove flag is set we are called from uncommit in the
+ * forced-shutdown path. If that is true and the reference count on
+ * the log item is going to drop to zero we need to free the item's
+ * descriptor in the transaction.
*/
STATIC void
xfs_buf_item_unpin(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip,
+ int remove)
{
- struct xfs_ail *ailp;
- xfs_buf_t *bp;
- int freed;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ xfs_buf_t *bp = bip->bli_buf;
+ struct xfs_ail *ailp = lip->li_ailp;
int stale = bip->bli_flags & XFS_BLI_STALE;
+ int freed;
- bp = bip->bli_buf;
- ASSERT(bp != NULL);
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
- ailp = bip->bli_item.li_ailp;
- xfs_bunpin(bp);
+
+ if (atomic_dec_and_test(&bp->b_pin_count))
+ wake_up_all(&bp->b_waiters);
+
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+
trace_xfs_buf_item_unpin_stale(bip);
+ if (remove) {
+ /*
+ * We have to remove the log item from the transaction
+ * as we are about to release our reference to the
+ * buffer. If we don't, the unlock that occurs later
+ * in xfs_trans_uncommit() will ry to reference the
+ * buffer which we no longer have a hold on.
+ */
+ xfs_trans_del_item(lip);
+
+ /*
+ * Since the transaction no longer refers to the buffer,
+ * the buffer should no longer refer to the transaction.
+ */
+ XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ }
+
/*
* If we get called here because of an IO error, we may
* or may not have the item on the AIL. xfs_trans_ail_delete()
@@ -437,48 +464,6 @@ xfs_buf_item_unpin(
}
/*
- * this is called from uncommit in the forced-shutdown path.
- * we need to check to see if the reference count on the log item
- * is going to drop to zero. If so, unpin will free the log item
- * so we need to free the item's descriptor (that points to the item)
- * in the transaction.
- */
-STATIC void
-xfs_buf_item_unpin_remove(
- xfs_buf_log_item_t *bip,
- xfs_trans_t *tp)
-{
- /* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
- if ((atomic_read(&bip->bli_refcount) == 1) &&
- (bip->bli_flags & XFS_BLI_STALE)) {
- /*
- * yes -- We can safely do some work here and then call
- * buf_item_unpin to do the rest because we are
- * are holding the buffer locked so no one else will be
- * able to bump up the refcount. We have to remove the
- * log item from the transaction as we are about to release
- * our reference to the buffer. If we don't, the unlock that
- * occurs later in the xfs_trans_uncommit() will try to
- * reference the buffer which we no longer have a hold on.
- */
- struct xfs_log_item_desc *lidp;
-
- ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
- trace_xfs_buf_item_unpin_stale(bip);
-
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
- xfs_trans_free_item(tp, lidp);
-
- /*
- * Since the transaction no longer refers to the buffer, the
- * buffer should no longer refer to the transaction.
- */
- XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
- }
- xfs_buf_item_unpin(bip);
-}
-
-/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
* the lock right away, return 0. If we can get the lock, take a
@@ -488,11 +473,11 @@ xfs_buf_item_unpin_remove(
*/
STATIC uint
xfs_buf_item_trylock(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
- bp = bip->bli_buf;
if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
if (!XFS_BUF_CPSEMA(bp))
@@ -529,13 +514,12 @@ xfs_buf_item_trylock(
*/
STATIC void
xfs_buf_item_unlock(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- int aborted;
- xfs_buf_t *bp;
- uint hold;
-
- bp = bip->bli_buf;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ int aborted;
+ uint hold;
/* Clear the buffer's association with this transaction. */
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
@@ -546,7 +530,7 @@ xfs_buf_item_unlock(
* (cancelled) buffers at unpin time, but we'll never go through the
* pin/unpin cycle if we abort inside commit.
*/
- aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
+ aborted = (lip->li_flags & XFS_LI_ABORTED) != 0;
/*
* Before possibly freeing the buf item, determine if we should
@@ -607,16 +591,16 @@ xfs_buf_item_unlock(
*/
STATIC xfs_lsn_t
xfs_buf_item_committed(
- xfs_buf_log_item_t *bip,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+
trace_xfs_buf_item_committed(bip);
- if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
- (bip->bli_item.li_lsn != 0)) {
- return bip->bli_item.li_lsn;
- }
- return (lsn);
+ if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
+ return lip->li_lsn;
+ return lsn;
}
/*
@@ -626,15 +610,16 @@ xfs_buf_item_committed(
*/
STATIC void
xfs_buf_item_push(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+ ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
+
trace_xfs_buf_item_push(bip);
- bp = bip->bli_buf;
- ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_relse(bp);
}
@@ -646,22 +631,24 @@ xfs_buf_item_push(
*/
STATIC void
xfs_buf_item_pushbuf(
- xfs_buf_log_item_t *bip)
+ struct xfs_log_item *lip)
{
- xfs_buf_t *bp;
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+ ASSERT(XFS_BUF_ISDELAYWRITE(bp));
+
trace_xfs_buf_item_pushbuf(bip);
- bp = bip->bli_buf;
- ASSERT(XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
}
-/* ARGSUSED */
STATIC void
-xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
+xfs_buf_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t commit_lsn)
{
}
@@ -669,21 +656,16 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
* This is the ops vector shared by all buf log items.
*/
static struct xfs_item_ops xfs_buf_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_buf_item_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
- xfs_buf_item_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_buf_item_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
- .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_buf_item_committing
+ .iop_size = xfs_buf_item_size,
+ .iop_format = xfs_buf_item_format,
+ .iop_pin = xfs_buf_item_pin,
+ .iop_unpin = xfs_buf_item_unpin,
+ .iop_trylock = xfs_buf_item_trylock,
+ .iop_unlock = xfs_buf_item_unlock,
+ .iop_committed = xfs_buf_item_committed,
+ .iop_push = xfs_buf_item_push,
+ .iop_pushbuf = xfs_buf_item_pushbuf,
+ .iop_committing = xfs_buf_item_committing
};
@@ -712,7 +694,6 @@ xfs_buf_item_init(
*/
if (bp->b_mount != mp)
bp->b_mount = mp;
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (lip->li_type == XFS_LI_BUF) {
@@ -1098,15 +1079,14 @@ xfs_buf_error_relse(
* It is called by xfs_buf_iodone_callbacks() above which will take
* care of cleaning up the buffer itself.
*/
-/* ARGSUSED */
void
xfs_buf_iodone(
- xfs_buf_t *bp,
- xfs_buf_log_item_t *bip)
+ struct xfs_buf *bp,
+ struct xfs_log_item *lip)
{
- struct xfs_ail *ailp = bip->bli_item.li_ailp;
+ struct xfs_ail *ailp = lip->li_ailp;
- ASSERT(bip->bli_buf == bp);
+ ASSERT(BUF_ITEM(lip)->bli_buf == bp);
xfs_buf_rele(bp);
@@ -1120,6 +1100,6 @@ xfs_buf_iodone(
* Either way, AIL is useless if we're forcing a shutdown.
*/
spin_lock(&ailp->xa_lock);
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
- xfs_buf_item_free(bip);
+ xfs_trans_ail_delete(ailp, lip);
+ xfs_buf_item_free(BUF_ITEM(lip));
}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index f20bb472d58..0e2ed43f16c 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -124,7 +124,7 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void(*)(struct xfs_buf *, xfs_log_item_t *),
xfs_log_item_t *);
void xfs_buf_iodone_callbacks(struct xfs_buf *);
-void xfs_buf_iodone(struct xfs_buf *, xfs_buf_log_item_t *);
+void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
#ifdef XFS_TRANS_DEBUG
void
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 0ca556b4bf3..30fa0e206fb 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -25,19 +25,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
-#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
@@ -581,16 +576,14 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
xfs_da_intnode_t *node;
xfs_da_node_entry_t *btree;
int tmp;
- xfs_mount_t *mp;
node = oldblk->bp->data;
- mp = state->mp;
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
- ASSERT(newblk->blkno >= mp->m_dirleafblk &&
- newblk->blkno < mp->m_dirfreeblk);
+ ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
+ newblk->blkno < state->mp->m_dirfreeblk);
/*
* We may need to make some room before we insert the new node.
@@ -1601,7 +1594,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist, NULL))) {
+ args->flist))) {
return error;
}
ASSERT(nmap <= 1);
@@ -1622,8 +1615,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist,
- NULL))) {
+ &mapp[mapi], &nmap, args->flist))) {
kmem_free(mapp);
return error;
}
@@ -1884,7 +1876,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
*/
if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
- 0, args->firstblock, args->flist, NULL,
+ 0, args->firstblock, args->flist,
&done)) == ENOSPC) {
if (w != XFS_DATA_FORK)
break;
@@ -1989,7 +1981,7 @@ xfs_da_do_buf(
nfsb,
XFS_BMAPI_METADATA |
xfs_bmapi_aflag(whichfork),
- NULL, 0, mapp, &nmap, NULL, NULL)))
+ NULL, 0, mapp, &nmap, NULL)))
goto exit0;
}
} else {
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 5bba29a0781..3b9582c60a2 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -24,24 +24,15 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_dfrag.h"
#include "xfs_error.h"
-#include "xfs_rw.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
@@ -69,7 +60,9 @@ xfs_swapext(
goto out;
}
- if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) {
+ if (!(file->f_mode & FMODE_WRITE) ||
+ !(file->f_mode & FMODE_READ) ||
+ (file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_file;
}
@@ -81,6 +74,7 @@ xfs_swapext(
}
if (!(tmp_file->f_mode & FMODE_WRITE) ||
+ !(tmp_file->f_mode & FMODE_READ) ||
(tmp_file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
goto out_put_tmp_file;
@@ -422,11 +416,8 @@ xfs_swap_extents(
}
- IHOLD(ip);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
- IHOLD(tip);
- xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
xfs_trans_log_inode(tp, ip, ilf_fields);
xfs_trans_log_inode(tp, tip, tilf_fields);
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 42520f04126..a1321bc7f19 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -25,13 +25,11 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -382,7 +380,7 @@ xfs_readdir(
int rval; /* return value */
int v; /* type-checking value */
- xfs_itrace_entry(dp);
+ trace_xfs_readdir(dp);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
@@ -549,7 +547,7 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, bno, count,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist, NULL)))
+ args->flist)))
return error;
ASSERT(nmap <= 1);
if (nmap == 1) {
@@ -581,8 +579,7 @@ xfs_dir2_grow_inode(
if ((error = xfs_bmapi(tp, dp, b, c,
XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist,
- NULL))) {
+ &mapp[mapi], &nmap, args->flist))) {
kmem_free(mapp);
return error;
}
@@ -715,7 +712,7 @@ xfs_dir2_shrink_inode(
*/
if ((error = xfs_bunmapi(tp, dp, da, mp->m_dirblkfsbs,
XFS_BMAPI_METADATA, 0, args->firstblock, args->flist,
- NULL, &done))) {
+ &done))) {
/*
* ENOSPC actually can happen if we're in a removename with
* no space reservation, and the resulting block removal
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 779a267b0a8..580d99cef9e 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -1073,10 +1071,10 @@ xfs_dir2_sf_to_block(
*/
buf_len = dp->i_df.if_bytes;
- buf = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
+ buf = kmem_alloc(buf_len, KM_SLEEP);
- memcpy(buf, sfp, dp->i_df.if_bytes);
- xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
+ memcpy(buf, sfp, buf_len);
+ xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
/*
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 498f8d69433..921595b84f5 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_dir2_data.h"
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index e2d89854ec9..504be8640e9 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -25,11 +25,9 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_attr_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -875,7 +873,7 @@ xfs_dir2_leaf_getdents(
xfs_dir2_byte_to_da(mp,
XFS_DIR2_LEAF_OFFSET) - map_off,
XFS_BMAPI_METADATA, NULL, 0,
- &map[map_valid], &nmap, NULL, NULL);
+ &map[map_valid], &nmap, NULL);
/*
* Don't know if we should ignore this or
* try to return an error.
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 78fc4d9ae75..f9a0864b696 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index c1a5945d463..b1bae6b1eed 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -24,12 +24,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
deleted file mode 100644
index 2813cdd7237..00000000000
--- a/fs/xfs/xfs_dmapi.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DMAPI_H__
-#define __XFS_DMAPI_H__
-
-/* Values used to define the on-disk version of dm_attrname_t. All
- * on-disk attribute names start with the 8-byte string "SGI_DMI_".
- *
- * In the on-disk inode, DMAPI attribute names consist of the user-provided
- * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be
- * changed.
- */
-
-#define DMATTR_PREFIXLEN 8
-#define DMATTR_PREFIXSTRING "SGI_DMI_"
-
-typedef enum {
- DM_EVENT_INVALID = -1,
- DM_EVENT_CANCEL = 0, /* not supported */
- DM_EVENT_MOUNT = 1,
- DM_EVENT_PREUNMOUNT = 2,
- DM_EVENT_UNMOUNT = 3,
- DM_EVENT_DEBUT = 4, /* not supported */
- DM_EVENT_CREATE = 5,
- DM_EVENT_CLOSE = 6, /* not supported */
- DM_EVENT_POSTCREATE = 7,
- DM_EVENT_REMOVE = 8,
- DM_EVENT_POSTREMOVE = 9,
- DM_EVENT_RENAME = 10,
- DM_EVENT_POSTRENAME = 11,
- DM_EVENT_LINK = 12,
- DM_EVENT_POSTLINK = 13,
- DM_EVENT_SYMLINK = 14,
- DM_EVENT_POSTSYMLINK = 15,
- DM_EVENT_READ = 16,
- DM_EVENT_WRITE = 17,
- DM_EVENT_TRUNCATE = 18,
- DM_EVENT_ATTRIBUTE = 19,
- DM_EVENT_DESTROY = 20,
- DM_EVENT_NOSPACE = 21,
- DM_EVENT_USER = 22,
- DM_EVENT_MAX = 23
-} dm_eventtype_t;
-#define HAVE_DM_EVENTTYPE_T
-
-typedef enum {
- DM_RIGHT_NULL,
- DM_RIGHT_SHARED,
- DM_RIGHT_EXCL
-} dm_right_t;
-#define HAVE_DM_RIGHT_T
-
-/* Defines for determining if an event message should be sent. */
-#ifdef HAVE_DMAPI
-#define DM_EVENT_ENABLED(ip, event) ( \
- unlikely ((ip)->i_mount->m_flags & XFS_MOUNT_DMAPI) && \
- ( ((ip)->i_d.di_dmevmask & (1 << event)) || \
- ((ip)->i_mount->m_dmevmask & (1 << event)) ) \
- )
-#else
-#define DM_EVENT_ENABLED(ip, event) (0)
-#endif
-
-#define DM_XFS_VALID_FS_EVENTS ( \
- (1 << DM_EVENT_PREUNMOUNT) | \
- (1 << DM_EVENT_UNMOUNT) | \
- (1 << DM_EVENT_NOSPACE) | \
- (1 << DM_EVENT_DEBUT) | \
- (1 << DM_EVENT_CREATE) | \
- (1 << DM_EVENT_POSTCREATE) | \
- (1 << DM_EVENT_REMOVE) | \
- (1 << DM_EVENT_POSTREMOVE) | \
- (1 << DM_EVENT_RENAME) | \
- (1 << DM_EVENT_POSTRENAME) | \
- (1 << DM_EVENT_LINK) | \
- (1 << DM_EVENT_POSTLINK) | \
- (1 << DM_EVENT_SYMLINK) | \
- (1 << DM_EVENT_POSTSYMLINK) | \
- (1 << DM_EVENT_ATTRIBUTE) | \
- (1 << DM_EVENT_DESTROY) )
-
-/* Events valid in dm_set_eventlist() when called with a file handle for
- a regular file or a symlink. These events are persistent.
-*/
-
-#define DM_XFS_VALID_FILE_EVENTS ( \
- (1 << DM_EVENT_ATTRIBUTE) | \
- (1 << DM_EVENT_DESTROY) )
-
-/* Events valid in dm_set_eventlist() when called with a file handle for
- a directory. These events are persistent.
-*/
-
-#define DM_XFS_VALID_DIRECTORY_EVENTS ( \
- (1 << DM_EVENT_CREATE) | \
- (1 << DM_EVENT_POSTCREATE) | \
- (1 << DM_EVENT_REMOVE) | \
- (1 << DM_EVENT_POSTREMOVE) | \
- (1 << DM_EVENT_RENAME) | \
- (1 << DM_EVENT_POSTRENAME) | \
- (1 << DM_EVENT_LINK) | \
- (1 << DM_EVENT_POSTLINK) | \
- (1 << DM_EVENT_SYMLINK) | \
- (1 << DM_EVENT_POSTSYMLINK) | \
- (1 << DM_EVENT_ATTRIBUTE) | \
- (1 << DM_EVENT_DESTROY) )
-
-/* Events supported by the XFS filesystem. */
-#define DM_XFS_SUPPORTED_EVENTS ( \
- (1 << DM_EVENT_MOUNT) | \
- (1 << DM_EVENT_PREUNMOUNT) | \
- (1 << DM_EVENT_UNMOUNT) | \
- (1 << DM_EVENT_NOSPACE) | \
- (1 << DM_EVENT_CREATE) | \
- (1 << DM_EVENT_POSTCREATE) | \
- (1 << DM_EVENT_REMOVE) | \
- (1 << DM_EVENT_POSTREMOVE) | \
- (1 << DM_EVENT_RENAME) | \
- (1 << DM_EVENT_POSTRENAME) | \
- (1 << DM_EVENT_LINK) | \
- (1 << DM_EVENT_POSTLINK) | \
- (1 << DM_EVENT_SYMLINK) | \
- (1 << DM_EVENT_POSTSYMLINK) | \
- (1 << DM_EVENT_READ) | \
- (1 << DM_EVENT_WRITE) | \
- (1 << DM_EVENT_TRUNCATE) | \
- (1 << DM_EVENT_ATTRIBUTE) | \
- (1 << DM_EVENT_DESTROY) )
-
-
-/*
- * Definitions used for the flags field on dm_send_*_event().
- */
-
-#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
-#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
-#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
-#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
-#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
-
-/*
- * Pull in platform specific event flags defines
- */
-#include "xfs_dmapi_priv.h"
-
-/*
- * Macros to turn caller specified delay/block flags into
- * dm_send_xxxx_event flag DM_FLAGS_NDELAY.
- */
-
-#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
- DM_FLAGS_NDELAY : 0)
-#define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
-
-#endif /* __XFS_DMAPI_H__ */
diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c
deleted file mode 100644
index e71e2581c0c..00000000000
--- a/fs/xfs/xfs_dmops.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_sb.h"
-#include "xfs_dmapi.h"
-#include "xfs_inum.h"
-#include "xfs_ag.h"
-#include "xfs_mount.h"
-
-
-static struct xfs_dmops xfs_dmcore_stub = {
- .xfs_send_data = (xfs_send_data_t)fs_nosys,
- .xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
- .xfs_send_destroy = (xfs_send_destroy_t)fs_nosys,
- .xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
- .xfs_send_mount = (xfs_send_mount_t)fs_nosys,
- .xfs_send_unmount = (xfs_send_unmount_t)fs_noerr,
-};
-
-int
-xfs_dmops_get(struct xfs_mount *mp)
-{
- if (mp->m_flags & XFS_MOUNT_DMAPI) {
- cmn_err(CE_WARN,
- "XFS: dmapi support not available in this kernel.");
- return EINVAL;
- }
-
- mp->m_dm_ops = &xfs_dmcore_stub;
- return 0;
-}
-
-void
-xfs_dmops_put(struct xfs_mount *mp)
-{
-}
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 047b8a8e5c2..ed999026766 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -23,12 +23,8 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_utils.h"
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 409fe81585f..a55e687bf56 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -24,7 +24,6 @@
#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
@@ -33,18 +32,19 @@
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
-STATIC void xfs_efi_item_unlock(xfs_efi_log_item_t *);
+static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_efi_log_item, efi_item);
+}
void
-xfs_efi_item_free(xfs_efi_log_item_t *efip)
+xfs_efi_item_free(
+ struct xfs_efi_log_item *efip)
{
- int nexts = efip->efi_format.efi_nextents;
-
- if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
+ if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
- } else {
+ else
kmem_zone_free(xfs_efi_zone, efip);
- }
}
/*
@@ -52,9 +52,9 @@ xfs_efi_item_free(xfs_efi_log_item_t *efip)
* We only need 1 iovec for an efi item. It just logs the efi_log_format
* structure.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_efi_item_size(xfs_efi_log_item_t *efip)
+xfs_efi_item_size(
+ struct xfs_log_item *lip)
{
return 1;
}
@@ -67,10 +67,12 @@ xfs_efi_item_size(xfs_efi_log_item_t *efip)
* slots in the efi item have been filled.
*/
STATIC void
-xfs_efi_item_format(xfs_efi_log_item_t *efip,
- xfs_log_iovec_t *log_vector)
+xfs_efi_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *log_vector)
{
- uint size;
+ struct xfs_efi_log_item *efip = EFI_ITEM(lip);
+ uint size;
ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
@@ -80,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
size += (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t);
efip->efi_format.efi_size = 1;
- log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
+ log_vector->i_addr = &efip->efi_format;
log_vector->i_len = size;
log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
ASSERT(size >= sizeof(xfs_efi_log_format_t));
@@ -90,60 +92,33 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
/*
* Pinning has no meaning for an efi item, so just return.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efi_item_pin(xfs_efi_log_item_t *efip)
+xfs_efi_item_pin(
+ struct xfs_log_item *lip)
{
- return;
}
-
/*
* While EFIs cannot really be pinned, the unpin operation is the
* last place at which the EFI is manipulated during a transaction.
* Here we coordinate with xfs_efi_cancel() to determine who gets to
* free the EFI.
*/
-/*ARGSUSED*/
-STATIC void
-xfs_efi_item_unpin(xfs_efi_log_item_t *efip)
-{
- struct xfs_ail *ailp = efip->efi_item.li_ailp;
-
- spin_lock(&ailp->xa_lock);
- if (efip->efi_flags & XFS_EFI_CANCELED) {
- /* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
- xfs_efi_item_free(efip);
- } else {
- efip->efi_flags |= XFS_EFI_COMMITTED;
- spin_unlock(&ailp->xa_lock);
- }
-}
-
-/*
- * like unpin only we have to also clear the xaction descriptor
- * pointing the log item if we free the item. This routine duplicates
- * unpin because efi_flags is protected by the AIL lock. Freeing
- * the descriptor and then calling unpin would force us to drop the AIL
- * lock which would open up a race condition.
- */
STATIC void
-xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
+xfs_efi_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
{
- struct xfs_ail *ailp = efip->efi_item.li_ailp;
- xfs_log_item_desc_t *lidp;
+ struct xfs_efi_log_item *efip = EFI_ITEM(lip);
+ struct xfs_ail *ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock);
if (efip->efi_flags & XFS_EFI_CANCELED) {
- /*
- * free the xaction descriptor pointing to this item
- */
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip);
- xfs_trans_free_item(tp, lidp);
+ if (remove)
+ xfs_trans_del_item(lip);
/* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
+ xfs_trans_ail_delete(ailp, lip);
xfs_efi_item_free(efip);
} else {
efip->efi_flags |= XFS_EFI_COMMITTED;
@@ -158,9 +133,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp)
* XFS_ITEM_PINNED so that the caller will eventually flush the log.
* This should help in getting the EFI out of the AIL.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
+xfs_efi_item_trylock(
+ struct xfs_log_item *lip)
{
return XFS_ITEM_PINNED;
}
@@ -168,13 +143,12 @@ xfs_efi_item_trylock(xfs_efi_log_item_t *efip)
/*
* Efi items have no locking, so just return.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
+xfs_efi_item_unlock(
+ struct xfs_log_item *lip)
{
- if (efip->efi_item.li_flags & XFS_LI_ABORTED)
- xfs_efi_item_free(efip);
- return;
+ if (lip->li_flags & XFS_LI_ABORTED)
+ xfs_efi_item_free(EFI_ITEM(lip));
}
/*
@@ -183,9 +157,10 @@ xfs_efi_item_unlock(xfs_efi_log_item_t *efip)
* flag is not paid any attention here. Checking for that is delayed
* until the EFI is unpinned.
*/
-/*ARGSUSED*/
STATIC xfs_lsn_t
-xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
+xfs_efi_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
return lsn;
}
@@ -195,11 +170,10 @@ xfs_efi_item_committed(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
* stuck waiting for all of its corresponding efd items to be
* committed to disk.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efi_item_push(xfs_efi_log_item_t *efip)
+xfs_efi_item_push(
+ struct xfs_log_item *lip)
{
- return;
}
/*
@@ -209,61 +183,55 @@ xfs_efi_item_push(xfs_efi_log_item_t *efip)
* example, for inodes, the inode is locked throughout the extent freeing
* so the dependency should be recorded there.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
+xfs_efi_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
- return;
}
/*
* This is the ops vector shared by all efi log items.
*/
static struct xfs_item_ops xfs_efi_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_efi_item_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_efi_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efi_item_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
- xfs_efi_item_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efi_item_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efi_item_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_efi_item_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_efi_item_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_efi_item_committing
+ .iop_size = xfs_efi_item_size,
+ .iop_format = xfs_efi_item_format,
+ .iop_pin = xfs_efi_item_pin,
+ .iop_unpin = xfs_efi_item_unpin,
+ .iop_trylock = xfs_efi_item_trylock,
+ .iop_unlock = xfs_efi_item_unlock,
+ .iop_committed = xfs_efi_item_committed,
+ .iop_push = xfs_efi_item_push,
+ .iop_committing = xfs_efi_item_committing
};
/*
* Allocate and initialize an efi item with the given number of extents.
*/
-xfs_efi_log_item_t *
-xfs_efi_init(xfs_mount_t *mp,
- uint nextents)
+struct xfs_efi_log_item *
+xfs_efi_init(
+ struct xfs_mount *mp,
+ uint nextents)
{
- xfs_efi_log_item_t *efip;
+ struct xfs_efi_log_item *efip;
uint size;
ASSERT(nextents > 0);
if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
size = (uint)(sizeof(xfs_efi_log_item_t) +
((nextents - 1) * sizeof(xfs_extent_t)));
- efip = (xfs_efi_log_item_t*)kmem_zalloc(size, KM_SLEEP);
+ efip = kmem_zalloc(size, KM_SLEEP);
} else {
- efip = (xfs_efi_log_item_t*)kmem_zone_zalloc(xfs_efi_zone,
- KM_SLEEP);
+ efip = kmem_zone_zalloc(xfs_efi_zone, KM_SLEEP);
}
xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
efip->efi_format.efi_nextents = nextents;
efip->efi_format.efi_id = (__psint_t)(void*)efip;
- return (efip);
+ return efip;
}
/*
@@ -276,7 +244,7 @@ xfs_efi_init(xfs_mount_t *mp,
int
xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
{
- xfs_efi_log_format_t *src_efi_fmt = (xfs_efi_log_format_t *)buf->i_addr;
+ xfs_efi_log_format_t *src_efi_fmt = buf->i_addr;
uint i;
uint len = sizeof(xfs_efi_log_format_t) +
(src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t);
@@ -289,8 +257,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len);
return 0;
} else if (buf->i_len == len32) {
- xfs_efi_log_format_32_t *src_efi_fmt_32 =
- (xfs_efi_log_format_32_t *)buf->i_addr;
+ xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr;
dst_efi_fmt->efi_type = src_efi_fmt_32->efi_type;
dst_efi_fmt->efi_size = src_efi_fmt_32->efi_size;
@@ -304,8 +271,7 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
}
return 0;
} else if (buf->i_len == len64) {
- xfs_efi_log_format_64_t *src_efi_fmt_64 =
- (xfs_efi_log_format_64_t *)buf->i_addr;
+ xfs_efi_log_format_64_t *src_efi_fmt_64 = buf->i_addr;
dst_efi_fmt->efi_type = src_efi_fmt_64->efi_type;
dst_efi_fmt->efi_size = src_efi_fmt_64->efi_size;
@@ -356,16 +322,18 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
}
}
-STATIC void
-xfs_efd_item_free(xfs_efd_log_item_t *efdp)
+static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
{
- int nexts = efdp->efd_format.efd_nextents;
+ return container_of(lip, struct xfs_efd_log_item, efd_item);
+}
- if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
+STATIC void
+xfs_efd_item_free(struct xfs_efd_log_item *efdp)
+{
+ if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
- } else {
+ else
kmem_zone_free(xfs_efd_zone, efdp);
- }
}
/*
@@ -373,9 +341,9 @@ xfs_efd_item_free(xfs_efd_log_item_t *efdp)
* We only need 1 iovec for an efd item. It just logs the efd_log_format
* structure.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_efd_item_size(xfs_efd_log_item_t *efdp)
+xfs_efd_item_size(
+ struct xfs_log_item *lip)
{
return 1;
}
@@ -388,10 +356,12 @@ xfs_efd_item_size(xfs_efd_log_item_t *efdp)
* slots in the efd item have been filled.
*/
STATIC void
-xfs_efd_item_format(xfs_efd_log_item_t *efdp,
- xfs_log_iovec_t *log_vector)
+xfs_efd_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *log_vector)
{
- uint size;
+ struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
+ uint size;
ASSERT(efdp->efd_next_extent == efdp->efd_format.efd_nextents);
@@ -401,48 +371,38 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
size += (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t);
efdp->efd_format.efd_size = 1;
- log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
+ log_vector->i_addr = &efdp->efd_format;
log_vector->i_len = size;
log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
ASSERT(size >= sizeof(xfs_efd_log_format_t));
}
-
/*
* Pinning has no meaning for an efd item, so just return.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efd_item_pin(xfs_efd_log_item_t *efdp)
+xfs_efd_item_pin(
+ struct xfs_log_item *lip)
{
- return;
}
-
/*
* Since pinning has no meaning for an efd item, unpinning does
* not either.
*/
-/*ARGSUSED*/
-STATIC void
-xfs_efd_item_unpin(xfs_efd_log_item_t *efdp)
-{
- return;
-}
-
-/*ARGSUSED*/
STATIC void
-xfs_efd_item_unpin_remove(xfs_efd_log_item_t *efdp, xfs_trans_t *tp)
+xfs_efd_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
{
- return;
}
/*
* Efd items have no locking, so just return success.
*/
-/*ARGSUSED*/
STATIC uint
-xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
+xfs_efd_item_trylock(
+ struct xfs_log_item *lip)
{
return XFS_ITEM_LOCKED;
}
@@ -451,13 +411,12 @@ xfs_efd_item_trylock(xfs_efd_log_item_t *efdp)
* Efd items have no locking or pushing, so return failure
* so that the caller doesn't bother with us.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
+xfs_efd_item_unlock(
+ struct xfs_log_item *lip)
{
- if (efdp->efd_item.li_flags & XFS_LI_ABORTED)
- xfs_efd_item_free(efdp);
- return;
+ if (lip->li_flags & XFS_LI_ABORTED)
+ xfs_efd_item_free(EFD_ITEM(lip));
}
/*
@@ -467,15 +426,18 @@ xfs_efd_item_unlock(xfs_efd_log_item_t *efdp)
* return -1 to keep the transaction code from further referencing
* this item.
*/
-/*ARGSUSED*/
STATIC xfs_lsn_t
-xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
+xfs_efd_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
+ struct xfs_efd_log_item *efdp = EFD_ITEM(lip);
+
/*
* If we got a log I/O error, it's always the case that the LR with the
* EFI got unpinned and freed before the EFD got aborted.
*/
- if ((efdp->efd_item.li_flags & XFS_LI_ABORTED) == 0)
+ if (!(lip->li_flags & XFS_LI_ABORTED))
xfs_efi_release(efdp->efd_efip, efdp->efd_format.efd_nextents);
xfs_efd_item_free(efdp);
@@ -486,11 +448,10 @@ xfs_efd_item_committed(xfs_efd_log_item_t *efdp, xfs_lsn_t lsn)
* There isn't much you can do to push on an efd item. It is simply
* stuck waiting for the log to be flushed to disk.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efd_item_push(xfs_efd_log_item_t *efdp)
+xfs_efd_item_push(
+ struct xfs_log_item *lip)
{
- return;
}
/*
@@ -500,55 +461,48 @@ xfs_efd_item_push(xfs_efd_log_item_t *efdp)
* example, for inodes, the inode is locked throughout the extent freeing
* so the dependency should be recorded there.
*/
-/*ARGSUSED*/
STATIC void
-xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn)
+xfs_efd_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
{
- return;
}
/*
* This is the ops vector shared by all efd log items.
*/
static struct xfs_item_ops xfs_efd_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_efd_item_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_efd_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_efd_item_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
- xfs_efd_item_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_efd_item_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_efd_item_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_efd_item_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_efd_item_push,
- .iop_pushbuf = NULL,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_efd_item_committing
+ .iop_size = xfs_efd_item_size,
+ .iop_format = xfs_efd_item_format,
+ .iop_pin = xfs_efd_item_pin,
+ .iop_unpin = xfs_efd_item_unpin,
+ .iop_trylock = xfs_efd_item_trylock,
+ .iop_unlock = xfs_efd_item_unlock,
+ .iop_committed = xfs_efd_item_committed,
+ .iop_push = xfs_efd_item_push,
+ .iop_committing = xfs_efd_item_committing
};
-
/*
* Allocate and initialize an efd item with the given number of extents.
*/
-xfs_efd_log_item_t *
-xfs_efd_init(xfs_mount_t *mp,
- xfs_efi_log_item_t *efip,
- uint nextents)
+struct xfs_efd_log_item *
+xfs_efd_init(
+ struct xfs_mount *mp,
+ struct xfs_efi_log_item *efip,
+ uint nextents)
{
- xfs_efd_log_item_t *efdp;
+ struct xfs_efd_log_item *efdp;
uint size;
ASSERT(nextents > 0);
if (nextents > XFS_EFD_MAX_FAST_EXTENTS) {
size = (uint)(sizeof(xfs_efd_log_item_t) +
((nextents - 1) * sizeof(xfs_extent_t)));
- efdp = (xfs_efd_log_item_t*)kmem_zalloc(size, KM_SLEEP);
+ efdp = kmem_zalloc(size, KM_SLEEP);
} else {
- efdp = (xfs_efd_log_item_t*)kmem_zone_zalloc(xfs_efd_zone,
- KM_SLEEP);
+ efdp = kmem_zone_zalloc(xfs_efd_zone, KM_SLEEP);
}
xfs_log_item_init(mp, &efdp->efd_item, XFS_LI_EFD, &xfs_efd_item_ops);
@@ -556,5 +510,5 @@ xfs_efd_init(xfs_mount_t *mp,
efdp->efd_format.efd_nextents = nextents;
efdp->efd_format.efd_efi_id = efip->efi_format.efi_id;
- return (efdp);
+ return efdp;
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 390850ee660..9b715dce569 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -18,13 +18,9 @@
#include "xfs.h"
#include "xfs_bmap_btree.h"
#include "xfs_inum.h"
-#include "xfs_dir2.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_log.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
@@ -127,6 +123,82 @@ typedef struct fstrm_item
xfs_inode_t *pip; /* Parent directory inode pointer. */
} fstrm_item_t;
+/*
+ * Allocation group filestream associations are tracked with per-ag atomic
+ * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
+ * particular AG already has active filestreams associated with it. The mount
+ * point's m_peraglock is used to protect these counters from per-ag array
+ * re-allocation during a growfs operation. When xfs_growfs_data_private() is
+ * about to reallocate the array, it calls xfs_filestream_flush() with the
+ * m_peraglock held in write mode.
+ *
+ * Since xfs_mru_cache_flush() guarantees that all the free functions for all
+ * the cache elements have finished executing before it returns, it's safe for
+ * the free functions to use the atomic counters without m_peraglock protection.
+ * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
+ * whether it was called with the m_peraglock held in read mode, write mode or
+ * not held at all. The race condition this addresses is the following:
+ *
+ * - The work queue scheduler fires and pulls a filestream directory cache
+ * element off the LRU end of the cache for deletion, then gets pre-empted.
+ * - A growfs operation grabs the m_peraglock in write mode, flushes all the
+ * remaining items from the cache and reallocates the mount point's per-ag
+ * array, resetting all the counters to zero.
+ * - The work queue thread resumes and calls the free function for the element
+ * it started cleaning up earlier. In the process it decrements the
+ * filestreams counter for an AG that now has no references.
+ *
+ * With a shrinkfs feature, the above scenario could panic the system.
+ *
+ * All other uses of the following macros should be protected by either the
+ * m_peraglock held in read mode, or the cache's internal locking exposed by the
+ * interval between a call to xfs_mru_cache_lookup() and a call to
+ * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
+ * when new elements are added to the cache.
+ *
+ * Combined, these locking rules ensure that no associations will ever exist in
+ * the cache that reference per-ag array elements that have since been
+ * reallocated.
+ */
+static int
+xfs_filestream_peek_ag(
+ xfs_mount_t *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_read(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
+}
+
+static int
+xfs_filestream_get_ag(
+ xfs_mount_t *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_inc_return(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
+}
+
+static void
+xfs_filestream_put_ag(
+ xfs_mount_t *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, agno);
+ atomic_dec(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+}
/*
* Scan the AGs starting at startag looking for an AG that isn't in use and has
@@ -355,16 +427,14 @@ xfs_fstrm_free_func(
{
fstrm_item_t *item = (fstrm_item_t *)data;
xfs_inode_t *ip = item->ip;
- int ref;
ASSERT(ip->i_ino == ino);
xfs_iflags_clear(ip, XFS_IFILESTREAM);
/* Drop the reference taken on the AG when the item was added. */
- ref = xfs_filestream_put_ag(ip->i_mount, item->ag);
+ xfs_filestream_put_ag(ip->i_mount, item->ag);
- ASSERT(ref >= 0);
TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
xfs_filestream_peek_ag(ip->i_mount, item->ag));
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index 260f757bbc5..09dd9af4543 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -42,88 +42,6 @@ extern ktrace_t *xfs_filestreams_trace_buf;
#endif
-/*
- * Allocation group filestream associations are tracked with per-ag atomic
- * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a
- * particular AG already has active filestreams associated with it. The mount
- * point's m_peraglock is used to protect these counters from per-ag array
- * re-allocation during a growfs operation. When xfs_growfs_data_private() is
- * about to reallocate the array, it calls xfs_filestream_flush() with the
- * m_peraglock held in write mode.
- *
- * Since xfs_mru_cache_flush() guarantees that all the free functions for all
- * the cache elements have finished executing before it returns, it's safe for
- * the free functions to use the atomic counters without m_peraglock protection.
- * This allows the implementation of xfs_fstrm_free_func() to be agnostic about
- * whether it was called with the m_peraglock held in read mode, write mode or
- * not held at all. The race condition this addresses is the following:
- *
- * - The work queue scheduler fires and pulls a filestream directory cache
- * element off the LRU end of the cache for deletion, then gets pre-empted.
- * - A growfs operation grabs the m_peraglock in write mode, flushes all the
- * remaining items from the cache and reallocates the mount point's per-ag
- * array, resetting all the counters to zero.
- * - The work queue thread resumes and calls the free function for the element
- * it started cleaning up earlier. In the process it decrements the
- * filestreams counter for an AG that now has no references.
- *
- * With a shrinkfs feature, the above scenario could panic the system.
- *
- * All other uses of the following macros should be protected by either the
- * m_peraglock held in read mode, or the cache's internal locking exposed by the
- * interval between a call to xfs_mru_cache_lookup() and a call to
- * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode
- * when new elements are added to the cache.
- *
- * Combined, these locking rules ensure that no associations will ever exist in
- * the cache that reference per-ag array elements that have since been
- * reallocated.
- */
-/*
- * xfs_filestream_peek_ag is only used in tracing code
- */
-static inline int
-xfs_filestream_peek_ag(
- xfs_mount_t *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_perag *pag;
- int ret;
-
- pag = xfs_perag_get(mp, agno);
- ret = atomic_read(&pag->pagf_fstrms);
- xfs_perag_put(pag);
- return ret;
-}
-
-static inline int
-xfs_filestream_get_ag(
- xfs_mount_t *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_perag *pag;
- int ret;
-
- pag = xfs_perag_get(mp, agno);
- ret = atomic_inc_return(&pag->pagf_fstrms);
- xfs_perag_put(pag);
- return ret;
-}
-
-static inline int
-xfs_filestream_put_ag(
- xfs_mount_t *mp,
- xfs_agnumber_t agno)
-{
- struct xfs_perag *pag;
- int ret;
-
- pag = xfs_perag_get(mp, agno);
- ret = atomic_dec_return(&pag->pagf_fstrms);
- xfs_perag_put(pag);
- return ret;
-}
-
/* allocation selection flags */
typedef enum xfs_fstrm_alloc {
XFS_PICK_USERDATA = 1,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 37a6f62c57b..dbca5f5c37b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -626,8 +622,7 @@ xfs_fs_log_dummy(
ip = mp->m_rootip;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 9d884c127bb..abf80ae1e95 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -1203,6 +1199,63 @@ error0:
return error;
}
+STATIC int
+xfs_imap_lookup(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino,
+ xfs_agblock_t agbno,
+ xfs_agblock_t *chunk_agbno,
+ xfs_agblock_t *offset_agbno,
+ int flags)
+{
+ struct xfs_inobt_rec_incore rec;
+ struct xfs_btree_cur *cur;
+ struct xfs_buf *agbp;
+ xfs_agino_t startino;
+ int error;
+ int i;
+
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ if (error) {
+ xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
+ "xfs_ialloc_read_agi() returned "
+ "error %d, agno %d",
+ error, agno);
+ return error;
+ }
+
+ /*
+ * derive and lookup the exact inode record for the given agino. If the
+ * record cannot be found, then it's an invalid inode number and we
+ * should abort.
+ */
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
+ startino = agino & ~(XFS_IALLOC_INODES(mp) - 1);
+ error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i);
+ if (!error) {
+ if (i)
+ error = xfs_inobt_get_rec(cur, &rec, &i);
+ if (!error && i == 0)
+ error = EINVAL;
+ }
+
+ xfs_trans_brelse(tp, agbp);
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ if (error)
+ return error;
+
+ /* for untrusted inodes check it is allocated first */
+ if ((flags & XFS_IGET_UNTRUSTED) &&
+ (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
+ return EINVAL;
+
+ *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
+ *offset_agbno = agbno - *chunk_agbno;
+ return 0;
+}
+
/*
* Return the location of the inode in imap, for mapping it into a buffer.
*/
@@ -1235,8 +1288,11 @@ xfs_imap(
if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
#ifdef DEBUG
- /* no diagnostics for bulkstat, ino comes from userspace */
- if (flags & XFS_IGET_BULKSTAT)
+ /*
+ * Don't output diagnostic information for untrusted inodes
+ * as they can be invalid without implying corruption.
+ */
+ if (flags & XFS_IGET_UNTRUSTED)
return XFS_ERROR(EINVAL);
if (agno >= mp->m_sb.sb_agcount) {
xfs_fs_cmn_err(CE_ALERT, mp,
@@ -1263,6 +1319,23 @@ xfs_imap(
return XFS_ERROR(EINVAL);
}
+ blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
+
+ /*
+ * For bulkstat and handle lookups, we have an untrusted inode number
+ * that we have to verify is valid. We cannot do this just by reading
+ * the inode buffer as it may have been unlinked and removed leaving
+ * inodes in stale state on disk. Hence we have to do a btree lookup
+ * in all cases where an untrusted inode number is passed.
+ */
+ if (flags & XFS_IGET_UNTRUSTED) {
+ error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ &chunk_agbno, &offset_agbno, flags);
+ if (error)
+ return error;
+ goto out_map;
+ }
+
/*
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
@@ -1277,24 +1350,6 @@ xfs_imap(
return 0;
}
- blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog;
-
- /*
- * If we get a block number passed from bulkstat we can use it to
- * find the buffer easily.
- */
- if (imap->im_blkno) {
- offset = XFS_INO_TO_OFFSET(mp, ino);
- ASSERT(offset < mp->m_sb.sb_inopblock);
-
- cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
- offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
-
- imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
- imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
- return 0;
- }
-
/*
* If the inode chunks are aligned then use simple maths to
* find the location. Otherwise we have to do a btree
@@ -1304,50 +1359,13 @@ xfs_imap(
offset_agbno = agbno & mp->m_inoalign_mask;
chunk_agbno = agbno - offset_agbno;
} else {
- xfs_btree_cur_t *cur; /* inode btree cursor */
- xfs_inobt_rec_incore_t chunk_rec;
- xfs_buf_t *agbp; /* agi buffer */
- int i; /* temp state */
-
- error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_ialloc_read_agi() returned "
- "error %d, agno %d",
- error, agno);
- return error;
- }
-
- cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
- error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_lookup() failed");
- goto error0;
- }
-
- error = xfs_inobt_get_rec(cur, &chunk_rec, &i);
- if (error) {
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_get_rec() failed");
- goto error0;
- }
- if (i == 0) {
-#ifdef DEBUG
- xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
- "xfs_inobt_get_rec() failed");
-#endif /* DEBUG */
- error = XFS_ERROR(EINVAL);
- }
- error0:
- xfs_trans_brelse(tp, agbp);
- xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
+ &chunk_agbno, &offset_agbno, flags);
if (error)
return error;
- chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino);
- offset_agbno = agbno - chunk_agbno;
}
+out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
((offset_agbno / blks_per_cluster) * blks_per_cluster);
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index c282a9af539..d352862cefa 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 6845db90818..b1ecc6f97ad 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -25,14 +25,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -95,7 +91,7 @@ xfs_inode_alloc(
return ip;
}
-STATIC void
+void
xfs_inode_free(
struct xfs_inode *ip)
{
@@ -212,7 +208,7 @@ xfs_iget_cache_hit(
ip->i_flags &= ~XFS_INEW;
ip->i_flags |= XFS_IRECLAIMABLE;
__xfs_inode_set_reclaim_tag(pag, ip);
- trace_xfs_iget_reclaim(ip);
+ trace_xfs_iget_reclaim_fail(ip);
goto out_error;
}
@@ -227,6 +223,7 @@ xfs_iget_cache_hit(
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
+ trace_xfs_iget_skip(ip);
error = EAGAIN;
goto out_error;
}
@@ -234,6 +231,7 @@ xfs_iget_cache_hit(
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
+ trace_xfs_iget_hit(ip);
}
if (lock_flags != 0)
@@ -242,7 +240,6 @@ xfs_iget_cache_hit(
xfs_iflags_clear(ip, XFS_ISTALE);
XFS_STATS_INC(xs_ig_found);
- trace_xfs_iget_found(ip);
return 0;
out_error:
@@ -259,24 +256,22 @@ xfs_iget_cache_miss(
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
- xfs_daddr_t bno,
int flags,
int lock_flags)
{
struct xfs_inode *ip;
int error;
- unsigned long first_index, mask;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
ip = xfs_inode_alloc(mp, ino);
if (!ip)
return ENOMEM;
- error = xfs_iread(mp, tp, ip, bno, flags);
+ error = xfs_iread(mp, tp, ip, flags);
if (error)
goto out_destroy;
- xfs_itrace_entry(ip);
+ trace_xfs_iget_miss(ip);
if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = ENOENT;
@@ -302,8 +297,6 @@ xfs_iget_cache_miss(
BUG();
}
- mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
- first_index = agino & mask;
write_lock(&pag->pag_ici_lock);
/* insert the new inode */
@@ -322,7 +315,6 @@ xfs_iget_cache_miss(
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
- trace_xfs_iget_alloc(ip);
*ipp = ip;
return 0;
@@ -358,8 +350,6 @@ out_destroy:
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
- * bno -- the block number starting the buffer containing the inode,
- * if known (as by bulkstat), else 0.
*/
int
xfs_iget(
@@ -368,8 +358,7 @@ xfs_iget(
xfs_ino_t ino,
uint flags,
uint lock_flags,
- xfs_inode_t **ipp,
- xfs_daddr_t bno)
+ xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
@@ -382,9 +371,6 @@ xfs_iget(
/* get the perag structure and ensure that it's inode capable */
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
- if (!pag->pagi_inodeok)
- return EINVAL;
- ASSERT(pag->pag_ici_init);
agino = XFS_INO_TO_AGINO(mp, ino);
again:
@@ -400,7 +386,7 @@ again:
read_unlock(&pag->pag_ici_lock);
XFS_STATS_INC(xs_ig_missed);
- error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
+ error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
@@ -429,97 +415,6 @@ out_error_or_again:
}
/*
- * Decrement reference count of an inode structure and unlock it.
- *
- * ip -- the inode being released
- * lock_flags -- this parameter indicates the inode's locks to be
- * to be released. See the comment on xfs_iunlock() for a list
- * of valid values.
- */
-void
-xfs_iput(xfs_inode_t *ip,
- uint lock_flags)
-{
- xfs_itrace_entry(ip);
- xfs_iunlock(ip, lock_flags);
- IRELE(ip);
-}
-
-/*
- * Special iput for brand-new inodes that are still locked
- */
-void
-xfs_iput_new(
- xfs_inode_t *ip,
- uint lock_flags)
-{
- struct inode *inode = VFS_I(ip);
-
- xfs_itrace_entry(ip);
-
- if ((ip->i_d.di_mode == 0)) {
- ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
- make_bad_inode(inode);
- }
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
- if (lock_flags)
- xfs_iunlock(ip, lock_flags);
- IRELE(ip);
-}
-
-/*
- * This is called free all the memory associated with an inode.
- * It must free the inode itself and any buffers allocated for
- * if_extents/if_data and if_broot. It must also free the lock
- * associated with the inode.
- *
- * Note: because we don't initialise everything on reallocation out
- * of the zone, we must ensure we nullify everything correctly before
- * freeing the structure.
- */
-void
-xfs_ireclaim(
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
- xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
-
- XFS_STATS_INC(xs_ig_reclaims);
-
- /*
- * Remove the inode from the per-AG radix tree.
- *
- * Because radix_tree_delete won't complain even if the item was never
- * added to the tree assert that it's been there before to catch
- * problems with the inode life time early on.
- */
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- write_lock(&pag->pag_ici_lock);
- if (!radix_tree_delete(&pag->pag_ici_root, agino))
- ASSERT(0);
- write_unlock(&pag->pag_ici_lock);
- xfs_perag_put(pag);
-
- /*
- * Here we do an (almost) spurious inode lock in order to coordinate
- * with inode cache radix tree lookups. This is because the lookup
- * can reference the inodes in the cache without taking references.
- *
- * We make that OK here by ensuring that we wait until the inode is
- * unlocked after the lookup before we go ahead and free it. We get
- * both the ilock and the iolock because the code may need to drop the
- * ilock one but will still hold the iolock.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_qm_dqdetach(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
- xfs_inode_free(ip);
-}
-
-/*
* This is a wrapper routine around the xfs_ilock() routine
* used to centralize some grungy code. It is used in places
* that wish to lock the inode solely for reading the extents.
@@ -744,30 +639,24 @@ xfs_ilock_demote(
}
#ifdef DEBUG
-/*
- * Debug-only routine, without additional rw_semaphore APIs, we can
- * now only answer requests regarding whether we hold the lock for write
- * (reader state is outside our visibility, we only track writer state).
- *
- * Note: this means !xfs_isilocked would give false positives, so don't do that.
- */
int
xfs_isilocked(
xfs_inode_t *ip,
uint lock_flags)
{
- if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
- XFS_ILOCK_EXCL) {
- if (!ip->i_lock.mr_writer)
- return 0;
+ if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
+ if (!(lock_flags & XFS_ILOCK_SHARED))
+ return !!ip->i_lock.mr_writer;
+ return rwsem_is_locked(&ip->i_lock.mr_lock);
}
- if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
- XFS_IOLOCK_EXCL) {
- if (!ip->i_iolock.mr_writer)
- return 0;
+ if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
+ if (!(lock_flags & XFS_IOLOCK_SHARED))
+ return !!ip->i_iolock.mr_writer;
+ return rwsem_is_locked(&ip->i_iolock.mr_lock);
}
- return 1;
+ ASSERT(0);
+ return 0;
}
#endif
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 8cd6e8d8fe9..68415cb4f23 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -27,13 +27,10 @@
#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
@@ -44,7 +41,6 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
-#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_utils.h"
#include "xfs_quota.h"
@@ -177,7 +173,7 @@ xfs_imap_to_bp(
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
- if (iget_flags & XFS_IGET_BULKSTAT) {
+ if (iget_flags & XFS_IGET_UNTRUSTED) {
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EINVAL);
}
@@ -426,7 +422,7 @@ xfs_iformat(
if (!XFS_DFORK_Q(dip))
return 0;
ASSERT(ip->i_afp == NULL);
- ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
+ ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
ip->i_afp->if_ext_max =
XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
switch (dip->di_aformat) {
@@ -509,7 +505,7 @@ xfs_iformat_local(
ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
else {
real_size = roundup(size, 4);
- ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+ ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
}
ifp->if_bytes = size;
ifp->if_real_bytes = real_size;
@@ -636,7 +632,7 @@ xfs_iformat_btree(
}
ifp->if_broot_bytes = size;
- ifp->if_broot = kmem_alloc(size, KM_SLEEP);
+ ifp->if_broot = kmem_alloc(size, KM_SLEEP | KM_NOFS);
ASSERT(ifp->if_broot != NULL);
/*
* Copy and convert from the on-disk structure
@@ -787,7 +783,6 @@ xfs_iread(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
- xfs_daddr_t bno,
uint iget_flags)
{
xfs_buf_t *bp;
@@ -797,11 +792,9 @@ xfs_iread(
/*
* Fill in the location information in the in-core inode.
*/
- ip->i_imap.im_blkno = bno;
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
- ASSERT(bno == 0 || bno == ip->i_imap.im_blkno);
/*
* Get pointers to the on-disk inode and the buffer containing it.
@@ -925,7 +918,6 @@ xfs_iread_extents(
int error;
xfs_ifork_t *ifp;
xfs_extnum_t nextents;
- size_t size;
if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
@@ -933,7 +925,6 @@ xfs_iread_extents(
return XFS_ERROR(EFSCORRUPTED);
}
nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
- size = nextents * sizeof(xfs_bmbt_rec_t);
ifp = XFS_IFORK_PTR(ip, whichfork);
/*
@@ -1229,7 +1220,7 @@ xfs_isize_check(
(xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
map_first),
XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
- NULL, NULL))
+ NULL))
return;
ASSERT(nimaps == 1);
ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
@@ -1463,7 +1454,7 @@ xfs_itruncate_finish(
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(ip->i_transp == *tp);
ASSERT(ip->i_itemp != NULL);
- ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
+ ASSERT(ip->i_itemp->ili_lock_flags == 0);
ntp = *tp;
@@ -1592,11 +1583,10 @@ xfs_itruncate_finish(
xfs_bmap_init(&free_list, &first_block);
error = xfs_bunmapi(ntp, ip,
first_unmap_block, unmap_len,
- xfs_bmapi_aflag(fork) |
- (sync ? 0 : XFS_BMAPI_ASYNC),
+ xfs_bmapi_aflag(fork),
XFS_ITRUNC_MAX_EXTENTS,
&first_block, &free_list,
- NULL, &done);
+ &done);
if (error) {
/*
* If the bunmapi call encounters an error,
@@ -1615,12 +1605,8 @@ xfs_itruncate_finish(
*/
error = xfs_bmap_finish(tp, &free_list, &committed);
ntp = *tp;
- if (committed) {
- /* link the inode into the next xact in the chain */
- xfs_trans_ijoin(ntp, ip,
- XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ihold(ntp, ip);
- }
+ if (committed)
+ xfs_trans_ijoin(ntp, ip);
if (error) {
/*
@@ -1649,9 +1635,7 @@ xfs_itruncate_finish(
error = xfs_trans_commit(*tp, 0);
*tp = ntp;
- /* link the inode into the next transaction in the chain */
- xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ihold(ntp, ip);
+ xfs_trans_ijoin(ntp, ip);
if (error)
return error;
@@ -1940,10 +1924,10 @@ xfs_ifree_cluster(
int blks_per_cluster;
int nbufs;
int ninodes;
- int i, j, found, pre_flushed;
+ int i, j;
xfs_daddr_t blkno;
xfs_buf_t *bp;
- xfs_inode_t *ip, **ip_found;
+ xfs_inode_t *ip;
xfs_inode_log_item_t *iip;
xfs_log_item_t *lip;
struct xfs_perag *pag;
@@ -1960,114 +1944,97 @@ xfs_ifree_cluster(
nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
}
- ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
-
for (j = 0; j < nbufs; j++, inum += ninodes) {
+ int found = 0;
+
blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
XFS_INO_TO_AGBNO(mp, inum));
+ /*
+ * We obtain and lock the backing buffer first in the process
+ * here, as we have to ensure that any dirty inode that we
+ * can't get the flush lock on is attached to the buffer.
+ * If we scan the in-memory inodes first, then buffer IO can
+ * complete before we get a lock on it, and hence we may fail
+ * to mark all the active inodes on the buffer stale.
+ */
+ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
+ mp->m_bsize * blks_per_cluster,
+ XBF_LOCK);
/*
- * Look for each inode in memory and attempt to lock it,
- * we can be racing with flush and tail pushing here.
- * any inode we get the locks on, add to an array of
- * inode items to process later.
+ * Walk the inodes already attached to the buffer and mark them
+ * stale. These will all have the flush locks held, so an
+ * in-memory inode walk can't lock them.
+ */
+ lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ while (lip) {
+ if (lip->li_type == XFS_LI_INODE) {
+ iip = (xfs_inode_log_item_t *)lip;
+ ASSERT(iip->ili_logged == 1);
+ lip->li_cb = xfs_istale_done;
+ xfs_trans_ail_copy_lsn(mp->m_ail,
+ &iip->ili_flush_lsn,
+ &iip->ili_item.li_lsn);
+ xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
+ found++;
+ }
+ lip = lip->li_bio_list;
+ }
+
+ /*
+ * For each inode in memory attempt to add it to the inode
+ * buffer and set it up for being staled on buffer IO
+ * completion. This is safe as we've locked out tail pushing
+ * and flushing by locking the buffer.
*
- * The get the buffer lock, we could beat a flush
- * or tail pushing thread to the lock here, in which
- * case they will go looking for the inode buffer
- * and fail, we need some other form of interlock
- * here.
+ * We have already marked every inode that was part of a
+ * transaction stale above, which means there is no point in
+ * even trying to lock them.
*/
- found = 0;
for (i = 0; i < ninodes; i++) {
read_lock(&pag->pag_ici_lock);
ip = radix_tree_lookup(&pag->pag_ici_root,
XFS_INO_TO_AGINO(mp, (inum + i)));
- /* Inode not in memory or we found it already,
- * nothing to do
- */
+ /* Inode not in memory or stale, nothing to do */
if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
read_unlock(&pag->pag_ici_lock);
continue;
}
- if (xfs_inode_clean(ip)) {
- read_unlock(&pag->pag_ici_lock);
- continue;
- }
-
- /* If we can get the locks then add it to the
- * list, otherwise by the time we get the bp lock
- * below it will already be attached to the
- * inode buffer.
- */
-
- /* This inode will already be locked - by us, lets
- * keep it that way.
- */
-
- if (ip == free_ip) {
- if (xfs_iflock_nowait(ip)) {
- xfs_iflags_set(ip, XFS_ISTALE);
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- } else {
- ip_found[found++] = ip;
- }
- }
+ /* don't try to lock/unlock the current inode */
+ if (ip != free_ip &&
+ !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
read_unlock(&pag->pag_ici_lock);
continue;
}
+ read_unlock(&pag->pag_ici_lock);
- if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
- if (xfs_iflock_nowait(ip)) {
- xfs_iflags_set(ip, XFS_ISTALE);
-
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- } else {
- ip_found[found++] = ip;
- }
- } else {
+ if (!xfs_iflock_nowait(ip)) {
+ if (ip != free_ip)
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
+ continue;
}
- read_unlock(&pag->pag_ici_lock);
- }
-
- bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * blks_per_cluster,
- XBF_LOCK);
- pre_flushed = 0;
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- while (lip) {
- if (lip->li_type == XFS_LI_INODE) {
- iip = (xfs_inode_log_item_t *)lip;
- ASSERT(iip->ili_logged == 1);
- lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
- xfs_trans_ail_copy_lsn(mp->m_ail,
- &iip->ili_flush_lsn,
- &iip->ili_item.li_lsn);
- xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
- pre_flushed++;
+ xfs_iflags_set(ip, XFS_ISTALE);
+ if (xfs_inode_clean(ip)) {
+ ASSERT(ip != free_ip);
+ xfs_ifunlock(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ continue;
}
- lip = lip->li_bio_list;
- }
- for (i = 0; i < found; i++) {
- ip = ip_found[i];
iip = ip->i_itemp;
-
if (!iip) {
+ /* inode with unlogged changes only */
+ ASSERT(ip != free_ip);
ip->i_update_core = 0;
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
continue;
}
+ found++;
iip->ili_last_fields = iip->ili_format.ilf_fields;
iip->ili_format.ilf_fields = 0;
@@ -2075,20 +2042,18 @@ xfs_ifree_cluster(
xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
&iip->ili_item.li_lsn);
- xfs_buf_attach_iodone(bp,
- (void(*)(xfs_buf_t*,xfs_log_item_t*))
- xfs_istale_done, (xfs_log_item_t *)iip);
- if (ip != free_ip) {
+ xfs_buf_attach_iodone(bp, xfs_istale_done,
+ &iip->ili_item);
+
+ if (ip != free_ip)
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- }
}
- if (found || pre_flushed)
+ if (found)
xfs_trans_stale_inode_buf(tp, bp);
xfs_trans_binval(tp, bp);
}
- kmem_free(ip_found);
xfs_perag_put(pag);
}
@@ -2224,7 +2189,7 @@ xfs_iroot_realloc(
*/
if (ifp->if_broot_bytes == 0) {
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
- ifp->if_broot = kmem_alloc(new_size, KM_SLEEP);
+ ifp->if_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
ifp->if_broot_bytes = (int)new_size;
return;
}
@@ -2240,7 +2205,7 @@ xfs_iroot_realloc(
new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
- KM_SLEEP);
+ KM_SLEEP | KM_NOFS);
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
@@ -2266,7 +2231,7 @@ xfs_iroot_realloc(
else
new_size = 0;
if (new_size > 0) {
- new_broot = kmem_alloc(new_size, KM_SLEEP);
+ new_broot = kmem_alloc(new_size, KM_SLEEP | KM_NOFS);
/*
* First copy over the btree block header.
*/
@@ -2370,7 +2335,8 @@ xfs_idata_realloc(
real_size = roundup(new_size, 4);
if (ifp->if_u1.if_data == NULL) {
ASSERT(ifp->if_real_bytes == 0);
- ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+ ifp->if_u1.if_data = kmem_alloc(real_size,
+ KM_SLEEP | KM_NOFS);
} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
/*
* Only do the realloc if the underlying size
@@ -2381,11 +2347,12 @@ xfs_idata_realloc(
kmem_realloc(ifp->if_u1.if_data,
real_size,
ifp->if_real_bytes,
- KM_SLEEP);
+ KM_SLEEP | KM_NOFS);
}
} else {
ASSERT(ifp->if_real_bytes == 0);
- ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
+ ifp->if_u1.if_data = kmem_alloc(real_size,
+ KM_SLEEP | KM_NOFS);
memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
ifp->if_bytes);
}
@@ -2649,8 +2616,6 @@ xfs_iflush_cluster(
int i;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- ASSERT(pag->pagi_inodeok);
- ASSERT(pag->pag_ici_init);
inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
@@ -2754,7 +2719,6 @@ cluster_corrupt_out:
* mark it as stale and brelse.
*/
if (XFS_BUF_IODONE_FUNC(bp)) {
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_ERROR(bp,EIO);
@@ -3092,8 +3056,7 @@ xfs_iflush_int(
* and unlock the inode's flush lock when the inode is
* completely written to disk.
*/
- xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
- xfs_iflush_done, (xfs_log_item_t *)iip);
+ xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
@@ -3537,13 +3500,11 @@ xfs_iext_remove_indirect(
xfs_extnum_t ext_diff; /* extents to remove in current list */
xfs_extnum_t nex1; /* number of extents before idx */
xfs_extnum_t nex2; /* extents after idx + count */
- int nlists; /* entries in indirection array */
int page_idx = idx; /* index in target extent list */
ASSERT(ifp->if_flags & XFS_IFEXTIREC);
erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
ASSERT(erp != NULL);
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
nex1 = page_idx;
ext_cnt = count;
while (ext_cnt) {
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 9965e40a461..0898c5417d1 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -442,9 +442,7 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
* xfs_iget.c prototypes.
*/
int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t,
- uint, uint, xfs_inode_t **, xfs_daddr_t);
-void xfs_iput(xfs_inode_t *, uint);
-void xfs_iput_new(xfs_inode_t *, uint);
+ uint, uint, xfs_inode_t **);
void xfs_ilock(xfs_inode_t *, uint);
int xfs_ilock_nowait(xfs_inode_t *, uint);
void xfs_iunlock(xfs_inode_t *, uint);
@@ -452,7 +450,7 @@ void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_map_shared(xfs_inode_t *);
void xfs_iunlock_map_shared(xfs_inode_t *, uint);
-void xfs_ireclaim(xfs_inode_t *);
+void xfs_inode_free(struct xfs_inode *ip);
/*
* xfs_inode.c prototypes.
@@ -500,7 +498,7 @@ do { \
* Flags for xfs_iget()
*/
#define XFS_IGET_CREATE 0x1
-#define XFS_IGET_BULKSTAT 0x2
+#define XFS_IGET_UNTRUSTED 0x2
int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
xfs_ino_t, struct xfs_dinode **,
@@ -509,7 +507,7 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, struct xfs_dinode **,
struct xfs_buf **, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
- struct xfs_inode *, xfs_daddr_t, uint);
+ struct xfs_inode *, uint);
void xfs_dinode_to_disk(struct xfs_dinode *,
struct xfs_icdinode *);
void xfs_idestroy_fork(struct xfs_inode *, int);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index cf8249a6000..fe00777e279 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -22,30 +22,26 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
-#include "xfs_buf_item.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_rw.h"
#include "xfs_error.h"
#include "xfs_trace.h"
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
+static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_inode_log_item, ili_item);
+}
+
+
/*
* This returns the number of iovecs needed to log the given inode item.
*
@@ -55,13 +51,11 @@ kmem_zone_t *xfs_ili_zone; /* inode log item zone */
*/
STATIC uint
xfs_inode_item_size(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- uint nvecs;
- xfs_inode_t *ip;
-
- ip = iip->ili_inode;
- nvecs = 2;
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
+ uint nvecs = 2;
/*
* Only log the data/extents/b-tree root if there is something
@@ -212,21 +206,17 @@ xfs_inode_item_size(
*/
STATIC void
xfs_inode_item_format(
- xfs_inode_log_item_t *iip,
- xfs_log_iovec_t *log_vector)
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *vecp)
{
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
uint nvecs;
- xfs_log_iovec_t *vecp;
- xfs_inode_t *ip;
size_t data_bytes;
xfs_bmbt_rec_t *ext_buffer;
- int nrecs;
xfs_mount_t *mp;
- ip = iip->ili_inode;
- vecp = log_vector;
-
- vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
+ vecp->i_addr = &iip->ili_format;
vecp->i_len = sizeof(xfs_inode_log_format_t);
vecp->i_type = XLOG_REG_TYPE_IFORMAT;
vecp++;
@@ -277,7 +267,7 @@ xfs_inode_item_format(
*/
xfs_synchronize_times(ip);
- vecp->i_addr = (xfs_caddr_t)&ip->i_d;
+ vecp->i_addr = &ip->i_d;
vecp->i_len = sizeof(struct xfs_icdinode);
vecp->i_type = XLOG_REG_TYPE_ICORE;
vecp++;
@@ -323,18 +313,17 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_u1.if_extents != NULL);
ASSERT(ip->i_d.di_nextents > 0);
ASSERT(iip->ili_extents_buf == NULL);
- nrecs = ip->i_df.if_bytes /
- (uint)sizeof(xfs_bmbt_rec_t);
- ASSERT(nrecs > 0);
+ ASSERT((ip->i_df.if_bytes /
+ (uint)sizeof(xfs_bmbt_rec_t)) > 0);
#ifdef XFS_NATIVE_HOST
- if (nrecs == ip->i_d.di_nextents) {
+ if (ip->i_d.di_nextents == ip->i_df.if_bytes /
+ (uint)sizeof(xfs_bmbt_rec_t)) {
/*
* There are no delayed allocation
* extents, so just point to the
* real extents array.
*/
- vecp->i_addr =
- (char *)(ip->i_df.if_u1.if_extents);
+ vecp->i_addr = ip->i_df.if_u1.if_extents;
vecp->i_len = ip->i_df.if_bytes;
vecp->i_type = XLOG_REG_TYPE_IEXT;
} else
@@ -352,7 +341,7 @@ xfs_inode_item_format(
ext_buffer = kmem_alloc(ip->i_df.if_bytes,
KM_SLEEP);
iip->ili_extents_buf = ext_buffer;
- vecp->i_addr = (xfs_caddr_t)ext_buffer;
+ vecp->i_addr = ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_DATA_FORK);
vecp->i_type = XLOG_REG_TYPE_IEXT;
@@ -371,7 +360,7 @@ xfs_inode_item_format(
if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) {
ASSERT(ip->i_df.if_broot_bytes > 0);
ASSERT(ip->i_df.if_broot != NULL);
- vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
+ vecp->i_addr = ip->i_df.if_broot;
vecp->i_len = ip->i_df.if_broot_bytes;
vecp->i_type = XLOG_REG_TYPE_IBROOT;
vecp++;
@@ -389,7 +378,7 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_u1.if_data != NULL);
ASSERT(ip->i_d.di_size > 0);
- vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data;
+ vecp->i_addr = ip->i_df.if_u1.if_data;
/*
* Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to
@@ -437,7 +426,7 @@ xfs_inode_item_format(
* Assert that no attribute-related log flags are set.
*/
if (!XFS_IFORK_Q(ip)) {
- ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
+ ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT)));
@@ -449,21 +438,21 @@ xfs_inode_item_format(
ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
- ASSERT(ip->i_afp->if_bytes > 0);
- ASSERT(ip->i_afp->if_u1.if_extents != NULL);
- ASSERT(ip->i_d.di_anextents > 0);
#ifdef DEBUG
- nrecs = ip->i_afp->if_bytes /
+ int nrecs = ip->i_afp->if_bytes /
(uint)sizeof(xfs_bmbt_rec_t);
-#endif
ASSERT(nrecs > 0);
ASSERT(nrecs == ip->i_d.di_anextents);
+ ASSERT(ip->i_afp->if_bytes > 0);
+ ASSERT(ip->i_afp->if_u1.if_extents != NULL);
+ ASSERT(ip->i_d.di_anextents > 0);
+#endif
#ifdef XFS_NATIVE_HOST
/*
* There are not delayed allocation extents
* for attributes, so just point at the array.
*/
- vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents);
+ vecp->i_addr = ip->i_afp->if_u1.if_extents;
vecp->i_len = ip->i_afp->if_bytes;
#else
ASSERT(iip->ili_aextents_buf == NULL);
@@ -473,7 +462,7 @@ xfs_inode_item_format(
ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
KM_SLEEP);
iip->ili_aextents_buf = ext_buffer;
- vecp->i_addr = (xfs_caddr_t)ext_buffer;
+ vecp->i_addr = ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_ATTR_FORK);
#endif
@@ -490,7 +479,7 @@ xfs_inode_item_format(
if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) {
ASSERT(ip->i_afp->if_broot_bytes > 0);
ASSERT(ip->i_afp->if_broot != NULL);
- vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
+ vecp->i_addr = ip->i_afp->if_broot;
vecp->i_len = ip->i_afp->if_broot_bytes;
vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
vecp++;
@@ -506,7 +495,7 @@ xfs_inode_item_format(
ASSERT(ip->i_afp->if_bytes > 0);
ASSERT(ip->i_afp->if_u1.if_data != NULL);
- vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data;
+ vecp->i_addr = ip->i_afp->if_u1.if_data;
/*
* Round i_bytes up to a word boundary.
* The underlying memory is guaranteed to
@@ -528,7 +517,7 @@ xfs_inode_item_format(
break;
}
- ASSERT(nvecs == iip->ili_item.li_desc->lid_size);
+ ASSERT(nvecs == lip->li_desc->lid_size);
iip->ili_format.ilf_size = nvecs;
}
@@ -539,12 +528,14 @@ xfs_inode_item_format(
*/
STATIC void
xfs_inode_item_pin(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
+ struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- trace_xfs_inode_pin(iip->ili_inode, _RET_IP_);
- atomic_inc(&iip->ili_inode->i_pincount);
+ trace_xfs_inode_pin(ip, _RET_IP_);
+ atomic_inc(&ip->i_pincount);
}
@@ -554,12 +545,12 @@ xfs_inode_item_pin(
*
* Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
*/
-/* ARGSUSED */
STATIC void
xfs_inode_item_unpin(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip,
+ int remove)
{
- struct xfs_inode *ip = iip->ili_inode;
+ struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
trace_xfs_inode_unpin(ip, _RET_IP_);
ASSERT(atomic_read(&ip->i_pincount) > 0);
@@ -567,15 +558,6 @@ xfs_inode_item_unpin(
wake_up(&ip->i_ipin_wait);
}
-/* ARGSUSED */
-STATIC void
-xfs_inode_item_unpin_remove(
- xfs_inode_log_item_t *iip,
- xfs_trans_t *tp)
-{
- xfs_inode_item_unpin(iip);
-}
-
/*
* This is called to attempt to lock the inode associated with this
* inode log item, in preparation for the push routine which does the actual
@@ -591,19 +573,16 @@ xfs_inode_item_unpin_remove(
*/
STATIC uint
xfs_inode_item_trylock(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- register xfs_inode_t *ip;
-
- ip = iip->ili_inode;
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
- if (xfs_ipincount(ip) > 0) {
+ if (xfs_ipincount(ip) > 0)
return XFS_ITEM_PINNED;
- }
- if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+ if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
return XFS_ITEM_LOCKED;
- }
if (!xfs_iflock_nowait(ip)) {
/*
@@ -629,7 +608,7 @@ xfs_inode_item_trylock(
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
ASSERT(iip->ili_format.ilf_fields != 0);
ASSERT(iip->ili_logged == 0);
- ASSERT(iip->ili_item.li_flags & XFS_LI_IN_AIL);
+ ASSERT(lip->li_flags & XFS_LI_IN_AIL);
}
#endif
return XFS_ITEM_SUCCESS;
@@ -643,26 +622,18 @@ xfs_inode_item_trylock(
*/
STATIC void
xfs_inode_item_unlock(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- uint hold;
- uint iolocked;
- uint lock_flags;
- xfs_inode_t *ip;
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
+ unsigned short lock_flags;
- ASSERT(iip != NULL);
ASSERT(iip->ili_inode->i_itemp != NULL);
ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
- ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
- XFS_ILI_IOLOCKED_EXCL)) ||
- xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL));
- ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
- XFS_ILI_IOLOCKED_SHARED)) ||
- xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED));
+
/*
* Clear the transaction pointer in the inode.
*/
- ip = iip->ili_inode;
ip->i_transp = NULL;
/*
@@ -686,34 +657,11 @@ xfs_inode_item_unlock(
iip->ili_aextents_buf = NULL;
}
- /*
- * Figure out if we should unlock the inode or not.
- */
- hold = iip->ili_flags & XFS_ILI_HOLD;
-
- /*
- * Before clearing out the flags, remember whether we
- * are holding the inode's IO lock.
- */
- iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY;
-
- /*
- * Clear out the fields of the inode log item particular
- * to the current transaction.
- */
- iip->ili_flags = 0;
-
- /*
- * Unlock the inode if XFS_ILI_HOLD was not set.
- */
- if (!hold) {
- lock_flags = XFS_ILOCK_EXCL;
- if (iolocked & XFS_ILI_IOLOCKED_EXCL) {
- lock_flags |= XFS_IOLOCK_EXCL;
- } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) {
- lock_flags |= XFS_IOLOCK_SHARED;
- }
- xfs_iput(iip->ili_inode, lock_flags);
+ lock_flags = iip->ili_lock_flags;
+ iip->ili_lock_flags = 0;
+ if (lock_flags) {
+ xfs_iunlock(iip->ili_inode, lock_flags);
+ IRELE(iip->ili_inode);
}
}
@@ -725,13 +673,12 @@ xfs_inode_item_unlock(
* is the only one that matters. Therefore, simply return the
* given lsn.
*/
-/*ARGSUSED*/
STATIC xfs_lsn_t
xfs_inode_item_committed(
- xfs_inode_log_item_t *iip,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
- return (lsn);
+ return lsn;
}
/*
@@ -743,13 +690,12 @@ xfs_inode_item_committed(
*/
STATIC void
xfs_inode_item_pushbuf(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- xfs_inode_t *ip;
- xfs_mount_t *mp;
- xfs_buf_t *bp;
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
+ struct xfs_buf *bp;
- ip = iip->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
/*
@@ -757,14 +703,13 @@ xfs_inode_item_pushbuf(
* inode was taken off the AIL. So, just get out.
*/
if (completion_done(&ip->i_flush) ||
- ((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
+ !(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
}
- mp = ip->i_mount;
- bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
- iip->ili_format.ilf_len, XBF_TRYLOCK);
+ bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
+ iip->ili_format.ilf_len, XBF_TRYLOCK);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp)
@@ -772,10 +717,8 @@ xfs_inode_item_pushbuf(
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
- return;
}
-
/*
* This is called to asynchronously write the inode associated with this
* inode log item out to disk. The inode will already have been locked by
@@ -783,14 +726,14 @@ xfs_inode_item_pushbuf(
*/
STATIC void
xfs_inode_item_push(
- xfs_inode_log_item_t *iip)
+ struct xfs_log_item *lip)
{
- xfs_inode_t *ip;
-
- ip = iip->ili_inode;
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+ struct xfs_inode *ip = iip->ili_inode;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
ASSERT(!completion_done(&ip->i_flush));
+
/*
* Since we were able to lock the inode's flush lock and
* we found it on the AIL, the inode must be dirty. This
@@ -813,43 +756,34 @@ xfs_inode_item_push(
*/
(void) xfs_iflush(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- return;
}
/*
* XXX rcc - this one really has to do something. Probably needs
* to stamp in a new field in the incore inode.
*/
-/* ARGSUSED */
STATIC void
xfs_inode_item_committing(
- xfs_inode_log_item_t *iip,
+ struct xfs_log_item *lip,
xfs_lsn_t lsn)
{
- iip->ili_last_lsn = lsn;
- return;
+ INODE_ITEM(lip)->ili_last_lsn = lsn;
}
/*
* This is the ops vector shared by all buf log items.
*/
static struct xfs_item_ops xfs_inode_item_ops = {
- .iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size,
- .iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
- xfs_inode_item_format,
- .iop_pin = (void(*)(xfs_log_item_t*))xfs_inode_item_pin,
- .iop_unpin = (void(*)(xfs_log_item_t*))xfs_inode_item_unpin,
- .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
- xfs_inode_item_unpin_remove,
- .iop_trylock = (uint(*)(xfs_log_item_t*))xfs_inode_item_trylock,
- .iop_unlock = (void(*)(xfs_log_item_t*))xfs_inode_item_unlock,
- .iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_inode_item_committed,
- .iop_push = (void(*)(xfs_log_item_t*))xfs_inode_item_push,
- .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_inode_item_pushbuf,
- .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
- xfs_inode_item_committing
+ .iop_size = xfs_inode_item_size,
+ .iop_format = xfs_inode_item_format,
+ .iop_pin = xfs_inode_item_pin,
+ .iop_unpin = xfs_inode_item_unpin,
+ .iop_trylock = xfs_inode_item_trylock,
+ .iop_unlock = xfs_inode_item_unlock,
+ .iop_committed = xfs_inode_item_committed,
+ .iop_push = xfs_inode_item_push,
+ .iop_pushbuf = xfs_inode_item_pushbuf,
+ .iop_committing = xfs_inode_item_committing
};
@@ -858,10 +792,10 @@ static struct xfs_item_ops xfs_inode_item_ops = {
*/
void
xfs_inode_item_init(
- xfs_inode_t *ip,
- xfs_mount_t *mp)
+ struct xfs_inode *ip,
+ struct xfs_mount *mp)
{
- xfs_inode_log_item_t *iip;
+ struct xfs_inode_log_item *iip;
ASSERT(ip->i_itemp == NULL);
iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
@@ -899,14 +833,14 @@ xfs_inode_item_destroy(
* from the AIL if it has not been re-logged, and unlocking the inode's
* flush lock.
*/
-/*ARGSUSED*/
void
xfs_iflush_done(
- xfs_buf_t *bp,
- xfs_inode_log_item_t *iip)
+ struct xfs_buf *bp,
+ struct xfs_log_item *lip)
{
+ struct xfs_inode_log_item *iip = INODE_ITEM(lip);
xfs_inode_t *ip = iip->ili_inode;
- struct xfs_ail *ailp = iip->ili_item.li_ailp;
+ struct xfs_ail *ailp = lip->li_ailp;
/*
* We only want to pull the item from the AIL if it is
@@ -917,12 +851,11 @@ xfs_iflush_done(
* the lock since it's cheaper, and then we recheck while
* holding the lock before removing the inode from the AIL.
*/
- if (iip->ili_logged &&
- (iip->ili_item.li_lsn == iip->ili_flush_lsn)) {
+ if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
spin_lock(&ailp->xa_lock);
- if (iip->ili_item.li_lsn == iip->ili_flush_lsn) {
+ if (lip->li_lsn == iip->ili_flush_lsn) {
/* xfs_trans_ail_delete() drops the AIL lock. */
- xfs_trans_ail_delete(ailp, (xfs_log_item_t*)iip);
+ xfs_trans_ail_delete(ailp, lip);
} else {
spin_unlock(&ailp->xa_lock);
}
@@ -940,8 +873,6 @@ xfs_iflush_done(
* Release the inode's flush lock since we're done with it.
*/
xfs_ifunlock(ip);
-
- return;
}
/*
@@ -957,10 +888,8 @@ xfs_iflush_abort(
xfs_inode_t *ip)
{
xfs_inode_log_item_t *iip = ip->i_itemp;
- xfs_mount_t *mp;
iip = ip->i_itemp;
- mp = ip->i_mount;
if (iip) {
struct xfs_ail *ailp = iip->ili_item.li_ailp;
if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
@@ -991,10 +920,10 @@ xfs_iflush_abort(
void
xfs_istale_done(
- xfs_buf_t *bp,
- xfs_inode_log_item_t *iip)
+ struct xfs_buf *bp,
+ struct xfs_log_item *lip)
{
- xfs_iflush_abort(iip->ili_inode);
+ xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
}
/*
@@ -1007,9 +936,8 @@ xfs_inode_item_format_convert(
xfs_inode_log_format_t *in_f)
{
if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
- xfs_inode_log_format_32_t *in_f32;
+ xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
- in_f32 = (xfs_inode_log_format_32_t *)buf->i_addr;
in_f->ilf_type = in_f32->ilf_type;
in_f->ilf_size = in_f32->ilf_size;
in_f->ilf_fields = in_f32->ilf_fields;
@@ -1025,9 +953,8 @@ xfs_inode_item_format_convert(
in_f->ilf_boffset = in_f32->ilf_boffset;
return 0;
} else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
- xfs_inode_log_format_64_t *in_f64;
+ xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
- in_f64 = (xfs_inode_log_format_64_t *)buf->i_addr;
in_f->ilf_type = in_f64->ilf_type;
in_f->ilf_size = in_f64->ilf_size;
in_f->ilf_fields = in_f64->ilf_fields;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 9a467958ecd..d3dee61e6d9 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -103,12 +103,6 @@ typedef struct xfs_inode_log_format_64 {
XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
XFS_ILOG_ABROOT)
-#define XFS_ILI_HOLD 0x1
-#define XFS_ILI_IOLOCKED_EXCL 0x2
-#define XFS_ILI_IOLOCKED_SHARED 0x4
-
-#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED)
-
static inline int xfs_ilog_fbroot(int w)
{
return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
@@ -137,7 +131,7 @@ typedef struct xfs_inode_log_item {
struct xfs_inode *ili_inode; /* inode ptr */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
xfs_lsn_t ili_last_lsn; /* lsn at last transaction */
- unsigned short ili_flags; /* misc flags */
+ unsigned short ili_lock_flags; /* lock flags */
unsigned short ili_logged; /* flushed logged data */
unsigned int ili_last_fields; /* fields when flushed */
struct xfs_bmbt_rec *ili_extents_buf; /* array of logged
@@ -161,8 +155,8 @@ static inline int xfs_inode_clean(xfs_inode_t *ip)
extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *);
-extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *);
-extern void xfs_istale_done(struct xfs_buf *, xfs_inode_log_item_t *);
+extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
+extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_iflush_abort(struct xfs_inode *);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
xfs_inode_log_format_t *);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ef14943829d..20576146369 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -23,19 +23,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_alloc.h"
-#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_ialloc.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
@@ -123,7 +118,7 @@ xfs_iomap(
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
bmapi_flags, NULL, 0, imap,
- nimaps, NULL, NULL);
+ nimaps, NULL);
if (error)
goto out;
@@ -138,7 +133,7 @@ xfs_iomap(
break;
}
- if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
+ if (flags & BMAPI_DIRECT) {
error = xfs_iomap_write_direct(ip, offset, count, flags,
imap, nimaps);
} else {
@@ -247,7 +242,7 @@ xfs_iomap_write_direct(
xfs_off_t offset,
size_t count,
int flags,
- xfs_bmbt_irec_t *ret_imap,
+ xfs_bmbt_irec_t *imap,
int *nmaps)
{
xfs_mount_t *mp = ip->i_mount;
@@ -261,7 +256,6 @@ xfs_iomap_write_direct(
int quota_flag;
int rt;
xfs_trans_t *tp;
- xfs_bmbt_irec_t imap;
xfs_bmap_free_t free_list;
uint qblocks, resblks, resrtextents;
int committed;
@@ -285,10 +279,10 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
} else {
- if (*nmaps && (ret_imap->br_startblock == HOLESTARTBLOCK))
+ if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
- ret_imap->br_blockcount +
- ret_imap->br_startoff);
+ imap->br_blockcount +
+ imap->br_startoff);
}
count_fsb = last_fsb - offset_fsb;
ASSERT(count_fsb > 0);
@@ -334,20 +328,22 @@ xfs_iomap_write_direct(
if (error)
goto error1;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
bmapi_flag = XFS_BMAPI_WRITE;
if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
- * Issue the xfs_bmapi() call to allocate the blocks
+ * Issue the xfs_bmapi() call to allocate the blocks.
+ *
+ * From this point onwards we overwrite the imap pointer that the
+ * caller gave to us.
*/
xfs_bmap_init(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag,
- &firstfsb, 0, &imap, &nimaps, &free_list, NULL);
+ &firstfsb, 0, imap, &nimaps, &free_list);
if (error)
goto error0;
@@ -369,12 +365,11 @@ xfs_iomap_write_direct(
goto error_out;
}
- if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) {
- error = xfs_cmn_err_fsblock_zero(ip, &imap);
+ if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) {
+ error = xfs_cmn_err_fsblock_zero(ip, imap);
goto error_out;
}
- *ret_imap = imap;
*nmaps = 1;
return 0;
@@ -425,7 +420,7 @@ xfs_iomap_eof_want_preallocate(
imaps = nimaps;
firstblock = NULLFSBLOCK;
error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0,
- &firstblock, 0, imap, &imaps, NULL, NULL);
+ &firstblock, 0, imap, &imaps, NULL);
if (error)
return error;
for (n = 0; n < imaps; n++) {
@@ -500,7 +495,7 @@ retry:
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
- &nimaps, NULL, NULL);
+ &nimaps, NULL);
if (error && (error != ENOSPC))
return XFS_ERROR(error);
@@ -548,7 +543,7 @@ xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_off_t offset,
size_t count,
- xfs_bmbt_irec_t *map,
+ xfs_bmbt_irec_t *imap,
int *retmap)
{
xfs_mount_t *mp = ip->i_mount;
@@ -557,7 +552,6 @@ xfs_iomap_write_allocate(
xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
- xfs_bmbt_irec_t imap;
xfs_trans_t *tp;
int nimaps, committed;
int error = 0;
@@ -573,8 +567,8 @@ xfs_iomap_write_allocate(
return XFS_ERROR(error);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
- count_fsb = map->br_blockcount;
- map_start_fsb = map->br_startoff;
+ count_fsb = imap->br_blockcount;
+ map_start_fsb = imap->br_startoff;
XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
@@ -602,8 +596,7 @@ xfs_iomap_write_allocate(
return XFS_ERROR(error);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_bmap_init(&free_list, &first_block);
@@ -654,10 +647,15 @@ xfs_iomap_write_allocate(
}
}
- /* Go get the actual blocks */
+ /*
+ * Go get the actual blocks.
+ *
+ * From this point onwards we overwrite the imap
+ * pointer that the caller gave to us.
+ */
error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
- &imap, &nimaps, &free_list, NULL);
+ imap, &nimaps, &free_list);
if (error)
goto trans_cancel;
@@ -676,13 +674,12 @@ xfs_iomap_write_allocate(
* See if we were able to allocate an extent that
* covers at least part of the callers request
*/
- if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
- return xfs_cmn_err_fsblock_zero(ip, &imap);
+ if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
+ return xfs_cmn_err_fsblock_zero(ip, imap);
- if ((offset_fsb >= imap.br_startoff) &&
- (offset_fsb < (imap.br_startoff +
- imap.br_blockcount))) {
- *map = imap;
+ if ((offset_fsb >= imap->br_startoff) &&
+ (offset_fsb < (imap->br_startoff +
+ imap->br_blockcount))) {
*retmap = 1;
XFS_STATS_INC(xs_xstrat_quick);
return 0;
@@ -692,8 +689,8 @@ xfs_iomap_write_allocate(
* So far we have not mapped the requested part of the
* file, just surrounding data, try again.
*/
- count_fsb -= imap.br_blockcount;
- map_start_fsb = imap.br_startoff + imap.br_blockcount;
+ count_fsb -= imap->br_blockcount;
+ map_start_fsb = imap->br_startoff + imap->br_blockcount;
}
trans_cancel:
@@ -766,8 +763,7 @@ xfs_iomap_write_unwritten(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* Modify the unwritten extent state of the buffer.
@@ -776,7 +772,7 @@ xfs_iomap_write_unwritten(
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
- 1, &imap, &nimaps, &free_list, NULL);
+ 1, &imap, &nimaps, &free_list);
if (error)
goto error_on_bmapi_transaction;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 81ac4afd45b..7748a430f50 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,17 +18,16 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
-typedef enum {
- /* base extent manipulation calls */
- BMAPI_READ = (1 << 0), /* read extents */
- BMAPI_WRITE = (1 << 1), /* create extents */
- BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */
- /* modifiers */
- BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */
- BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */
- BMAPI_MMAP = (1 << 6), /* allocate for mmap write */
- BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */
-} bmapi_flags_t;
+/* base extent manipulation calls */
+#define BMAPI_READ (1 << 0) /* read extents */
+#define BMAPI_WRITE (1 << 1) /* create extents */
+#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
+
+/* modifiers */
+#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
+#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
+#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
+#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
#define BMAPI_FLAGS \
{ BMAPI_READ, "READ" }, \
@@ -36,7 +35,6 @@ typedef enum {
{ BMAPI_ALLOCATE, "ALLOCATE" }, \
{ BMAPI_IGNSTATE, "IGNSTATE" }, \
{ BMAPI_DIRECT, "DIRECT" }, \
- { BMAPI_MMAP, "MMAP" }, \
{ BMAPI_TRYLOCK, "TRYLOCK" }
struct xfs_inode;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index b1b801e4a28..7e3626e5925 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -24,20 +24,17 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_itable.h"
#include "xfs_error.h"
#include "xfs_btree.h"
+#include "xfs_trace.h"
STATIC int
xfs_internal_inum(
@@ -49,24 +46,40 @@ xfs_internal_inum(
(ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino)));
}
-STATIC int
-xfs_bulkstat_one_iget(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- xfs_daddr_t bno, /* starting bno of inode cluster */
- xfs_bstat_t *buf, /* return buffer */
- int *stat) /* BULKSTAT_RV_... */
+/*
+ * Return stat information for one inode.
+ * Return 0 if ok, else errno.
+ */
+int
+xfs_bulkstat_one_int(
+ struct xfs_mount *mp, /* mount point for filesystem */
+ xfs_ino_t ino, /* inode to get data for */
+ void __user *buffer, /* buffer to place output in */
+ int ubsize, /* size of buffer */
+ bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
+ int *ubused, /* bytes used by me */
+ int *stat) /* BULKSTAT_RV_... */
{
- xfs_icdinode_t *dic; /* dinode core info pointer */
- xfs_inode_t *ip; /* incore inode pointer */
- struct inode *inode;
- int error;
+ struct xfs_icdinode *dic; /* dinode core info pointer */
+ struct xfs_inode *ip; /* incore inode pointer */
+ struct inode *inode;
+ struct xfs_bstat *buf; /* return buffer */
+ int error = 0; /* error value */
+
+ *stat = BULKSTAT_RV_NOTHING;
+
+ if (!buffer || xfs_internal_inum(mp, ino))
+ return XFS_ERROR(EINVAL);
+
+ buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
+ if (!buf)
+ return XFS_ERROR(ENOMEM);
error = xfs_iget(mp, NULL, ino,
- XFS_IGET_BULKSTAT, XFS_ILOCK_SHARED, &ip, bno);
+ XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip);
if (error) {
*stat = BULKSTAT_RV_NOTHING;
- return error;
+ goto out_free;
}
ASSERT(ip != NULL);
@@ -127,77 +140,17 @@ xfs_bulkstat_one_iget(
buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
break;
}
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ IRELE(ip);
- xfs_iput(ip, XFS_ILOCK_SHARED);
- return error;
-}
-
-STATIC void
-xfs_bulkstat_one_dinode(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- xfs_dinode_t *dic, /* dinode inode pointer */
- xfs_bstat_t *buf) /* return buffer */
-{
- /*
- * The inode format changed when we moved the link count and
- * made it 32 bits long. If this is an old format inode,
- * convert it in memory to look like a new one. If it gets
- * flushed to disk we will convert back before flushing or
- * logging it. We zero out the new projid field and the old link
- * count field. We'll handle clearing the pad field (the remains
- * of the old uuid field) when we actually convert the inode to
- * the new format. We don't change the version number so that we
- * can distinguish this from a real new format inode.
- */
- if (dic->di_version == 1) {
- buf->bs_nlink = be16_to_cpu(dic->di_onlink);
- buf->bs_projid = 0;
- } else {
- buf->bs_nlink = be32_to_cpu(dic->di_nlink);
- buf->bs_projid = be16_to_cpu(dic->di_projid);
- }
+ error = formatter(buffer, ubsize, ubused, buf);
- buf->bs_ino = ino;
- buf->bs_mode = be16_to_cpu(dic->di_mode);
- buf->bs_uid = be32_to_cpu(dic->di_uid);
- buf->bs_gid = be32_to_cpu(dic->di_gid);
- buf->bs_size = be64_to_cpu(dic->di_size);
- buf->bs_atime.tv_sec = be32_to_cpu(dic->di_atime.t_sec);
- buf->bs_atime.tv_nsec = be32_to_cpu(dic->di_atime.t_nsec);
- buf->bs_mtime.tv_sec = be32_to_cpu(dic->di_mtime.t_sec);
- buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec);
- buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec);
- buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec);
- buf->bs_xflags = xfs_dic2xflags(dic);
- buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog;
- buf->bs_extents = be32_to_cpu(dic->di_nextents);
- buf->bs_gen = be32_to_cpu(dic->di_gen);
- memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
- buf->bs_dmevmask = be32_to_cpu(dic->di_dmevmask);
- buf->bs_dmstate = be16_to_cpu(dic->di_dmstate);
- buf->bs_aextents = be16_to_cpu(dic->di_anextents);
- buf->bs_forkoff = XFS_DFORK_BOFF(dic);
+ if (!error)
+ *stat = BULKSTAT_RV_DIDONE;
- switch (dic->di_format) {
- case XFS_DINODE_FMT_DEV:
- buf->bs_rdev = xfs_dinode_get_rdev(dic);
- buf->bs_blksize = BLKDEV_IOSIZE;
- buf->bs_blocks = 0;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_UUID:
- buf->bs_rdev = 0;
- buf->bs_blksize = mp->m_sb.sb_blocksize;
- buf->bs_blocks = 0;
- break;
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- buf->bs_rdev = 0;
- buf->bs_blksize = mp->m_sb.sb_blocksize;
- buf->bs_blocks = be64_to_cpu(dic->di_nblocks);
- break;
- }
+ out_free:
+ kmem_free(buf);
+ return error;
}
/* Return 0 on success or positive error */
@@ -217,118 +170,17 @@ xfs_bulkstat_one_fmt(
return 0;
}
-/*
- * Return stat information for one inode.
- * Return 0 if ok, else errno.
- */
-int /* error status */
-xfs_bulkstat_one_int(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* buffer to place output in */
- int ubsize, /* size of buffer */
- bulkstat_one_fmt_pf formatter, /* formatter, copy to user */
- xfs_daddr_t bno, /* starting bno of inode cluster */
- int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
- int *stat) /* BULKSTAT_RV_... */
-{
- xfs_bstat_t *buf; /* return buffer */
- int error = 0; /* error value */
- xfs_dinode_t *dip; /* dinode inode pointer */
-
- dip = (xfs_dinode_t *)dibuff;
- *stat = BULKSTAT_RV_NOTHING;
-
- if (!buffer || xfs_internal_inum(mp, ino))
- return XFS_ERROR(EINVAL);
-
- buf = kmem_alloc(sizeof(*buf), KM_SLEEP);
-
- if (dip == NULL) {
- /* We're not being passed a pointer to a dinode. This happens
- * if BULKSTAT_FG_IGET is selected. Do the iget.
- */
- error = xfs_bulkstat_one_iget(mp, ino, bno, buf, stat);
- if (error)
- goto out_free;
- } else {
- xfs_bulkstat_one_dinode(mp, ino, dip, buf);
- }
-
- error = formatter(buffer, ubsize, ubused, buf);
- if (error)
- goto out_free;
-
- *stat = BULKSTAT_RV_DIDONE;
-
- out_free:
- kmem_free(buf);
- return error;
-}
-
int
xfs_bulkstat_one(
xfs_mount_t *mp, /* mount point for filesystem */
xfs_ino_t ino, /* inode number to get data for */
void __user *buffer, /* buffer to place output in */
int ubsize, /* size of buffer */
- void *private_data, /* my private data */
- xfs_daddr_t bno, /* starting bno of inode cluster */
int *ubused, /* bytes used by me */
- void *dibuff, /* on-disk inode buffer */
int *stat) /* BULKSTAT_RV_... */
{
return xfs_bulkstat_one_int(mp, ino, buffer, ubsize,
- xfs_bulkstat_one_fmt, bno,
- ubused, dibuff, stat);
-}
-
-/*
- * Test to see whether we can use the ondisk inode directly, based
- * on the given bulkstat flags, filling in dipp accordingly.
- * Returns zero if the inode is dodgey.
- */
-STATIC int
-xfs_bulkstat_use_dinode(
- xfs_mount_t *mp,
- int flags,
- xfs_buf_t *bp,
- int clustidx,
- xfs_dinode_t **dipp)
-{
- xfs_dinode_t *dip;
- unsigned int aformat;
-
- *dipp = NULL;
- if (!bp || (flags & BULKSTAT_FG_IGET))
- return 1;
- dip = (xfs_dinode_t *)
- xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog);
- /*
- * Check the buffer containing the on-disk inode for di_mode == 0.
- * This is to prevent xfs_bulkstat from picking up just reclaimed
- * inodes that have their in-core state initialized but not flushed
- * to disk yet. This is a temporary hack that would require a proper
- * fix in the future.
- */
- if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
- !XFS_DINODE_GOOD_VERSION(dip->di_version) ||
- !dip->di_mode)
- return 0;
- if (flags & BULKSTAT_FG_QUICK) {
- *dipp = dip;
- return 1;
- }
- /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */
- aformat = dip->di_aformat;
- if ((XFS_DFORK_Q(dip) == 0) ||
- (aformat == XFS_DINODE_FMT_LOCAL) ||
- (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) {
- *dipp = dip;
- return 1;
- }
- return 1;
+ xfs_bulkstat_one_fmt, ubused, stat);
}
#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
@@ -342,10 +194,8 @@ xfs_bulkstat(
xfs_ino_t *lastinop, /* last inode returned */
int *ubcountp, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
- void *private_data,/* private data for formatter */
size_t statstruct_size, /* sizeof struct filling */
char __user *ubuffer, /* buffer with inode stats */
- int flags, /* defined in xfs_itable.h */
int *done) /* 1 if there are more stats to get */
{
xfs_agblock_t agbno=0;/* allocation group block number */
@@ -380,14 +230,12 @@ xfs_bulkstat(
int ubelem; /* spaces used in user's buffer */
int ubused; /* bytes used by formatter */
xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */
- xfs_dinode_t *dip; /* ptr into bp for specific inode */
/*
* Get the last inode value, see if there's nothing to do.
*/
ino = (xfs_ino_t)*lastinop;
lastino = ino;
- dip = NULL;
agno = XFS_INO_TO_AGNO(mp, ino);
agino = XFS_INO_TO_AGINO(mp, ino);
if (agno >= mp->m_sb.sb_agcount ||
@@ -612,37 +460,6 @@ xfs_bulkstat(
irbp->ir_startino) +
((chunkidx & nimask) >>
mp->m_sb.sb_inopblog);
-
- if (flags & (BULKSTAT_FG_QUICK |
- BULKSTAT_FG_INLINE)) {
- int offset;
-
- ino = XFS_AGINO_TO_INO(mp, agno,
- agino);
- bno = XFS_AGB_TO_DADDR(mp, agno,
- agbno);
-
- /*
- * Get the inode cluster buffer
- */
- if (bp)
- xfs_buf_relse(bp);
-
- error = xfs_inotobp(mp, NULL, ino, &dip,
- &bp, &offset,
- XFS_IGET_BULKSTAT);
-
- if (!error)
- clustidx = offset / mp->m_sb.sb_inodesize;
- if (XFS_TEST_ERROR(error != 0,
- mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK,
- XFS_RANDOM_BULKSTAT_READ_CHUNK)) {
- bp = NULL;
- ubleft = 0;
- rval = error;
- break;
- }
- }
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
@@ -658,35 +475,13 @@ xfs_bulkstat(
* when the chunk is used up.
*/
irbp->ir_freecount++;
- if (!xfs_bulkstat_use_dinode(mp, flags, bp,
- clustidx, &dip)) {
- lastino = ino;
- continue;
- }
- /*
- * If we need to do an iget, cannot hold bp.
- * Drop it, until starting the next cluster.
- */
- if ((flags & BULKSTAT_FG_INLINE) && !dip) {
- if (bp)
- xfs_buf_relse(bp);
- bp = NULL;
- }
/*
* Get the inode and fill in a single buffer.
- * BULKSTAT_FG_QUICK uses dip to fill it in.
- * BULKSTAT_FG_IGET uses igets.
- * BULKSTAT_FG_INLINE uses dip if we have an
- * inline attr fork, else igets.
- * See: xfs_bulkstat_one & xfs_dm_bulkstat_one.
- * This is also used to count inodes/blks, etc
- * in xfs_qm_quotacheck.
*/
ubused = statstruct_size;
- error = formatter(mp, ino, ubufp,
- ubleft, private_data,
- bno, &ubused, dip, &fmterror);
+ error = formatter(mp, ino, ubufp, ubleft,
+ &ubused, &fmterror);
if (fmterror == BULKSTAT_RV_NOTHING) {
if (error && error != ENOENT &&
error != EINVAL) {
@@ -778,8 +573,7 @@ xfs_bulkstat_single(
*/
ino = (xfs_ino_t)*lastinop;
- error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t),
- NULL, 0, NULL, NULL, &res);
+ error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res);
if (error) {
/*
* Special case way failed, do it the "long" way
@@ -788,8 +582,7 @@ xfs_bulkstat_single(
(*lastinop)--;
count = 1;
if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one,
- NULL, sizeof(xfs_bstat_t), buffer,
- BULKSTAT_FG_IGET, done))
+ sizeof(xfs_bstat_t), buffer, done))
return error;
if (count == 0 || (xfs_ino_t)*lastinop != ino)
return error == EFSCORRUPTED ?
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 20792bf4594..97295d91d17 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -27,10 +27,7 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
xfs_ino_t ino,
void __user *buffer,
int ubsize,
- void *private_data,
- xfs_daddr_t bno,
int *ubused,
- void *dip,
int *stat);
/*
@@ -41,13 +38,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
#define BULKSTAT_RV_GIVEUP 2
/*
- * Values for bulkstat flag argument.
- */
-#define BULKSTAT_FG_IGET 0x1 /* Go through the buffer cache */
-#define BULKSTAT_FG_QUICK 0x2 /* No iget, walk the dinode cluster */
-#define BULKSTAT_FG_INLINE 0x4 /* No iget if inline attrs */
-
-/*
* Return stat information in bulk (by-inode) for the filesystem.
*/
int /* error status */
@@ -56,10 +46,8 @@ xfs_bulkstat(
xfs_ino_t *lastino, /* last inode returned */
int *count, /* size of buffer/count returned */
bulkstat_one_pf formatter, /* func that'd fill a single buf */
- void *private_data, /* private data for formatter */
size_t statstruct_size,/* sizeof struct that we're filling */
char __user *ubuffer,/* buffer with inode stats */
- int flags, /* flag to control access method */
int *done); /* 1 if there are more stats to get */
int
@@ -82,9 +70,7 @@ xfs_bulkstat_one_int(
void __user *buffer,
int ubsize,
bulkstat_one_fmt_pf formatter,
- xfs_daddr_t bno,
int *ubused,
- void *dibuff,
int *stat);
int
@@ -93,10 +79,7 @@ xfs_bulkstat_one(
xfs_ino_t ino,
void __user *buffer,
int ubsize,
- void *private_data,
- xfs_daddr_t bno,
int *ubused,
- void *dibuff,
int *stat);
typedef int (*inumbers_fmt_pf)(
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 5215abc8023..925d572bf0f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -24,8 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_log_priv.h"
@@ -35,8 +33,6 @@
#include "xfs_ialloc_btree.h"
#include "xfs_log_recover.h"
#include "xfs_trans_priv.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_rw.h"
@@ -337,7 +333,6 @@ xfs_log_reserve(
int retval = 0;
ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
- ASSERT((flags & XFS_LOG_NOSLEEP) == 0);
if (XLOG_FORCED_SHUTDOWN(log))
return XFS_ERROR(EIO);
@@ -552,7 +547,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
.magic = XLOG_UNMOUNT_TYPE,
};
struct xfs_log_iovec reg = {
- .i_addr = (void *)&magic,
+ .i_addr = &magic,
.i_len = sizeof(magic),
.i_type = XLOG_REG_TYPE_UNMOUNT,
};
@@ -1047,7 +1042,6 @@ xlog_alloc_log(xfs_mount_t *mp,
xlog_in_core_t *iclog, *prev_iclog=NULL;
xfs_buf_t *bp;
int i;
- int iclogsize;
int error = ENOMEM;
uint log2_size = 0;
@@ -1127,7 +1121,6 @@ xlog_alloc_log(xfs_mount_t *mp,
* with different amounts of memory. See the definition of
* xlog_in_core_t in xfs_log_priv.h for details.
*/
- iclogsize = log->l_iclog_size;
ASSERT(log->l_iclog_size >= 4096);
for (i=0; i < log->l_iclog_bufs; i++) {
*iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
@@ -1428,11 +1421,8 @@ xlog_sync(xlog_t *log,
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
bp->b_flags |= XBF_LOG_BUFFER;
- /*
- * Do an ordered write for the log block.
- * Its unnecessary to flush the first split block in the log wrap case.
- */
- if (!split && (log->l_mp->m_flags & XFS_MOUNT_BARRIER))
+
+ if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
XFS_BUF_ORDERED(bp);
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 04c78e642cc..916eb7db14d 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -55,14 +55,10 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
/*
* Flags to xfs_log_reserve()
*
- * XFS_LOG_SLEEP: If space is not available, sleep (default)
- * XFS_LOG_NOSLEEP: If space is not available, return error
* XFS_LOG_PERM_RESERV: Permanent reservation. When writes are
* performed against this type of reservation, the reservation
* is not decreased. Long running transactions should use this.
*/
-#define XFS_LOG_SLEEP 0x0
-#define XFS_LOG_NOSLEEP 0x1
#define XFS_LOG_PERM_RESERV 0x2
/*
@@ -104,7 +100,7 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_MAX 19
typedef struct xfs_log_iovec {
- xfs_caddr_t i_addr; /* beginning address of region */
+ void *i_addr; /* beginning address of region */
int i_len; /* length in bytes of region */
uint i_type; /* type of region */
} xfs_log_iovec_t;
@@ -201,9 +197,4 @@ int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
#endif
-
-
-extern int xlog_debug; /* set to 1 to enable real log */
-
-
#endif /* __XFS_LOG_H__ */
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index bb17cc044bf..31e4ea2d19a 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -26,8 +26,6 @@
#include "xfs_log_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_alloc.h"
@@ -554,7 +552,7 @@ xlog_cil_push(
thdr.th_type = XFS_TRANS_CHECKPOINT;
thdr.th_tid = tic->t_tid;
thdr.th_num_items = num_iovecs;
- lhdr.i_addr = (xfs_caddr_t)&thdr;
+ lhdr.i_addr = &thdr;
lhdr.i_len = sizeof(xfs_trans_header_t);
lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 14a69aec2c0..6f3f5fa37ac 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -24,15 +24,11 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -132,15 +128,10 @@ xlog_align(
int nbblks,
xfs_buf_t *bp)
{
- xfs_daddr_t offset;
- xfs_caddr_t ptr;
+ xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
- offset = blk_no & ((xfs_daddr_t) log->l_sectBBsize - 1);
- ptr = XFS_BUF_PTR(bp) + BBTOB(offset);
-
- ASSERT(ptr + BBTOB(nbblks) <= XFS_BUF_PTR(bp) + XFS_BUF_SIZE(bp));
-
- return ptr;
+ ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
+ return XFS_BUF_PTR(bp) + BBTOB(offset);
}
@@ -1570,9 +1561,7 @@ xlog_recover_reorder_trans(
list_splice_init(&trans->r_itemq, &sort_list);
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
- xfs_buf_log_format_t *buf_f;
-
- buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
+ xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
@@ -1897,9 +1886,8 @@ xlog_recover_do_inode_buffer(
* current di_next_unlinked field. Extract its value
* and copy it to the buffer copy.
*/
- logged_nextp = (xfs_agino_t *)
- ((char *)(item->ri_buf[item_index].i_addr) +
- (next_unlinked_offset - reg_buf_offset));
+ logged_nextp = item->ri_buf[item_index].i_addr +
+ next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
xfs_fs_cmn_err(CE_ALERT, mp,
"bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field",
@@ -1978,8 +1966,7 @@ xlog_recover_do_reg_buffer(
item->ri_buf[i].i_len, __func__);
goto next;
}
- error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
- item->ri_buf[i].i_addr,
+ error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
"dquot_buf_recover");
if (error)
@@ -2192,7 +2179,7 @@ xlog_recover_do_buffer_trans(
xlog_recover_item_t *item,
int pass)
{
- xfs_buf_log_format_t *buf_f;
+ xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
xfs_mount_t *mp;
xfs_buf_t *bp;
int error;
@@ -2202,8 +2189,6 @@ xlog_recover_do_buffer_trans(
ushort flags;
uint buf_flags;
- buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
-
if (pass == XLOG_RECOVER_PASS1) {
/*
* In this pass we're only looking for buf items
@@ -2324,10 +2309,9 @@ xlog_recover_do_inode_trans(
}
if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
- in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
+ in_f = item->ri_buf[0].i_addr;
} else {
- in_f = (xfs_inode_log_format_t *)kmem_alloc(
- sizeof(xfs_inode_log_format_t), KM_SLEEP);
+ in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
need_free = 1;
error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
if (error)
@@ -2375,7 +2359,7 @@ xlog_recover_do_inode_trans(
error = EFSCORRUPTED;
goto error;
}
- dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
+ dicp = item->ri_buf[1].i_addr;
if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
xfs_buf_relse(bp);
xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2466,7 +2450,7 @@ xlog_recover_do_inode_trans(
}
/* The core is in in-core format */
- xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
+ xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
/* the rest is in on-disk format */
if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
@@ -2583,7 +2567,7 @@ xlog_recover_do_quotaoff_trans(
return (0);
}
- qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
+ qoff_f = item->ri_buf[0].i_addr;
ASSERT(qoff_f);
/*
@@ -2627,9 +2611,8 @@ xlog_recover_do_dquot_trans(
if (mp->m_qflags == 0)
return (0);
- recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
-
- if (item->ri_buf[1].i_addr == NULL) {
+ recddq = item->ri_buf[1].i_addr;
+ if (recddq == NULL) {
cmn_err(CE_ALERT,
"XFS: NULL dquot in %s.", __func__);
return XFS_ERROR(EIO);
@@ -2659,7 +2642,7 @@ xlog_recover_do_dquot_trans(
* The other possibility, of course, is that the quota subsystem was
* removed since the last mount - ENOSYS.
*/
- dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
+ dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
if ((error = xfs_qm_dqcheck(recddq,
dq_f->qlf_id,
@@ -2726,7 +2709,7 @@ xlog_recover_do_efi_trans(
return 0;
}
- efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
+ efi_formatp = item->ri_buf[0].i_addr;
mp = log->l_mp;
efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
@@ -2772,7 +2755,7 @@ xlog_recover_do_efd_trans(
return;
}
- efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
+ efd_formatp = item->ri_buf[0].i_addr;
ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
(item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
@@ -3203,7 +3186,7 @@ xlog_recover_process_one_iunlink(
int error;
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
+ error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
if (error)
goto fail;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d7bf38c8cd1..aeb9d72ebf6 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -25,13 +25,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -268,10 +265,10 @@ xfs_sb_validate_fsb_count(
#if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
- return E2BIG;
+ return EFBIG;
#else /* Limited by UINT_MAX of sectors */
if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
- return E2BIG;
+ return EFBIG;
#endif
return 0;
}
@@ -393,7 +390,7 @@ xfs_mount_validate_sb(
xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
xfs_fs_mount_cmn_err(flags,
"file system too large to be mounted on this system.");
- return XFS_ERROR(E2BIG);
+ return XFS_ERROR(EFBIG);
}
if (unlikely(sbp->sb_inprogress)) {
@@ -413,17 +410,6 @@ xfs_mount_validate_sb(
return 0;
}
-STATIC void
-xfs_initialize_perag_icache(
- xfs_perag_t *pag)
-{
- if (!pag->pag_ici_init) {
- rwlock_init(&pag->pag_ici_lock);
- INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
- pag->pag_ici_init = 1;
- }
-}
-
int
xfs_initialize_perag(
xfs_mount_t *mp,
@@ -436,13 +422,8 @@ xfs_initialize_perag(
xfs_agino_t agino;
xfs_ino_t ino;
xfs_sb_t *sbp = &mp->m_sb;
- xfs_ino_t max_inum = XFS_MAXINUMBER_32;
int error = -ENOMEM;
- /* Check to see if the filesystem can overflow 32 bit inodes */
- agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
- ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
-
/*
* Walk the current per-ag tree so we don't try to initialise AGs
* that already exist (growfs case). Allocate and insert all the
@@ -456,11 +437,18 @@ xfs_initialize_perag(
}
if (!first_initialised)
first_initialised = index;
+
pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
if (!pag)
goto out_unwind;
+ pag->pag_agno = index;
+ pag->pag_mount = mp;
+ rwlock_init(&pag->pag_ici_lock);
+ INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
+
if (radix_tree_preload(GFP_NOFS))
goto out_unwind;
+
spin_lock(&mp->m_perag_lock);
if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
BUG();
@@ -469,25 +457,26 @@ xfs_initialize_perag(
error = -EEXIST;
goto out_unwind;
}
- pag->pag_agno = index;
- pag->pag_mount = mp;
spin_unlock(&mp->m_perag_lock);
radix_tree_preload_end();
}
- /* Clear the mount flag if no inode can overflow 32 bits
- * on this filesystem, or if specifically requested..
+ /*
+ * If we mount with the inode64 option, or no inode overflows
+ * the legacy 32-bit address space clear the inode32 option.
*/
- if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > max_inum) {
+ agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
+ ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
+
+ if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
mp->m_flags |= XFS_MOUNT_32BITINODES;
- } else {
+ else
mp->m_flags &= ~XFS_MOUNT_32BITINODES;
- }
- /* If we can overflow then setup the ag headers accordingly */
if (mp->m_flags & XFS_MOUNT_32BITINODES) {
- /* Calculate how much should be reserved for inodes to
- * meet the max inode percentage.
+ /*
+ * Calculate how much should be reserved for inodes to meet
+ * the max inode percentage.
*/
if (mp->m_maxicount) {
__uint64_t icount;
@@ -500,30 +489,28 @@ xfs_initialize_perag(
} else {
max_metadata = agcount;
}
+
for (index = 0; index < agcount; index++) {
ino = XFS_AGINO_TO_INO(mp, index, agino);
- if (ino > max_inum) {
+ if (ino > XFS_MAXINUMBER_32) {
index++;
break;
}
- /* This ag is preferred for inodes */
pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
if (index < max_metadata)
pag->pagf_metadata = 1;
- xfs_initialize_perag_icache(pag);
xfs_perag_put(pag);
}
} else {
- /* Setup default behavior for smaller filesystems */
for (index = 0; index < agcount; index++) {
pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
- xfs_initialize_perag_icache(pag);
xfs_perag_put(pag);
}
}
+
if (maxagi)
*maxagi = index;
return 0;
@@ -1009,7 +996,7 @@ xfs_check_sizes(xfs_mount_t *mp)
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
cmn_err(CE_WARN, "XFS: size check 1 failed");
- return XFS_ERROR(E2BIG);
+ return XFS_ERROR(EFBIG);
}
error = xfs_read_buf(mp, mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
@@ -1019,7 +1006,7 @@ xfs_check_sizes(xfs_mount_t *mp)
} else {
cmn_err(CE_WARN, "XFS: size check 2 failed");
if (error == ENOSPC)
- error = XFS_ERROR(E2BIG);
+ error = XFS_ERROR(EFBIG);
return error;
}
@@ -1027,7 +1014,7 @@ xfs_check_sizes(xfs_mount_t *mp)
d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
cmn_err(CE_WARN, "XFS: size check 3 failed");
- return XFS_ERROR(E2BIG);
+ return XFS_ERROR(EFBIG);
}
error = xfs_read_buf(mp, mp->m_logdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
@@ -1037,7 +1024,7 @@ xfs_check_sizes(xfs_mount_t *mp)
} else {
cmn_err(CE_WARN, "XFS: size check 3 failed");
if (error == ENOSPC)
- error = XFS_ERROR(E2BIG);
+ error = XFS_ERROR(EFBIG);
return error;
}
}
@@ -1254,7 +1241,7 @@ xfs_mountfs(
* Allocate and initialize the per-ag data.
*/
spin_lock_init(&mp->m_perag_lock);
- INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS);
+ INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
if (error) {
cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
@@ -1310,7 +1297,7 @@ xfs_mountfs(
* Get and sanity-check the root inode.
* Save the pointer to it in the mount structure.
*/
- error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
if (error) {
cmn_err(CE_WARN, "XFS: failed to read root inode");
goto out_log_dealloc;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1d2c7eed4ed..622da2179a5 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -66,65 +66,6 @@ struct xfs_nameops;
struct xfs_ail;
struct xfs_quotainfo;
-
-/*
- * Prototypes and functions for the Data Migration subsystem.
- */
-
-typedef int (*xfs_send_data_t)(int, struct xfs_inode *,
- xfs_off_t, size_t, int, int *);
-typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
-typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
-typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
- struct xfs_inode *, dm_right_t,
- struct xfs_inode *, dm_right_t,
- const unsigned char *, const unsigned char *,
- mode_t, int, int);
-typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
- char *, char *);
-typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
- dm_right_t, mode_t, int, int);
-
-typedef struct xfs_dmops {
- xfs_send_data_t xfs_send_data;
- xfs_send_mmap_t xfs_send_mmap;
- xfs_send_destroy_t xfs_send_destroy;
- xfs_send_namesp_t xfs_send_namesp;
- xfs_send_mount_t xfs_send_mount;
- xfs_send_unmount_t xfs_send_unmount;
-} xfs_dmops_t;
-
-#define XFS_DMAPI_UNMOUNT_FLAGS(mp) \
- (((mp)->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ? 0 : DM_FLAGS_UNWANTED)
-
-#define XFS_SEND_DATA(mp, ev,ip,off,len,fl,lock) \
- (*(mp)->m_dm_ops->xfs_send_data)(ev,ip,off,len,fl,lock)
-#define XFS_SEND_MMAP(mp, vma,fl) \
- (*(mp)->m_dm_ops->xfs_send_mmap)(vma,fl)
-#define XFS_SEND_DESTROY(mp, ip,right) \
- (*(mp)->m_dm_ops->xfs_send_destroy)(ip,right)
-#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
- (*(mp)->m_dm_ops->xfs_send_namesp)(ev,NULL,b1,r1,b2,r2,n1,n2,mode,rval,fl)
-#define XFS_SEND_MOUNT(mp,right,path,name) \
- (*(mp)->m_dm_ops->xfs_send_mount)(mp,right,path,name)
-#define XFS_SEND_PREUNMOUNT(mp) \
-do { \
- if (mp->m_flags & XFS_MOUNT_DMAPI) { \
- (*(mp)->m_dm_ops->xfs_send_namesp)(DM_EVENT_PREUNMOUNT, mp, \
- (mp)->m_rootip, DM_RIGHT_NULL, \
- (mp)->m_rootip, DM_RIGHT_NULL, \
- NULL, NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
- } \
-} while (0)
-#define XFS_SEND_UNMOUNT(mp) \
-do { \
- if (mp->m_flags & XFS_MOUNT_DMAPI) { \
- (*(mp)->m_dm_ops->xfs_send_unmount)(mp, (mp)->m_rootip, \
- DM_RIGHT_NULL, 0, 0, XFS_DMAPI_UNMOUNT_FLAGS(mp)); \
- } \
-} while (0)
-
-
#ifdef HAVE_PERCPU_SB
/*
@@ -241,8 +182,6 @@ typedef struct xfs_mount {
uint m_chsize; /* size of next field */
struct xfs_chash *m_chash; /* fs private inode per-cluster
* hash table */
- struct xfs_dmops *m_dm_ops; /* vector of DMI ops */
- struct xfs_qmops *m_qm_ops; /* vector of XQM ops */
atomic_t m_active_trans; /* number trans frozen */
#ifdef HAVE_PERCPU_SB
xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
@@ -259,7 +198,7 @@ typedef struct xfs_mount {
wait_queue_head_t m_wait_single_sync_task;
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
- struct list_head m_mplist; /* inode shrinker mount list */
+ struct shrinker m_inode_shrink; /* inode reclaim shrinker */
} xfs_mount_t;
/*
@@ -269,7 +208,6 @@ typedef struct xfs_mount {
must be synchronous except
for space allocations */
#define XFS_MOUNT_DELAYLOG (1ULL << 1) /* delayed logging is enabled */
-#define XFS_MOUNT_DMAPI (1ULL << 2) /* dmapi is enabled */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
@@ -282,8 +220,6 @@ typedef struct xfs_mount {
#define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */
#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
#define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */
-#define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */
- /* osyncisdsync is now default*/
#define XFS_MOUNT_32BITINODES (1ULL << 14) /* do not create inodes above
* 32 bits in size */
#define XFS_MOUNT_SMALL_INUMS (1ULL << 15) /* users wants 32bit inodes */
@@ -440,11 +376,6 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
-extern int xfs_dmops_get(struct xfs_mount *);
-extern void xfs_dmops_put(struct xfs_mount *);
-
-extern struct xfs_dmops xfs_dmcore_xfs;
-
#endif /* __KERNEL__ */
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index fc1cda23b81..8fca957200d 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -24,12 +24,9 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -116,20 +113,7 @@ xfs_rename(
int spaceres;
int num_inodes;
- xfs_itrace_entry(src_dp);
- xfs_itrace_entry(target_dp);
-
- if (DM_EVENT_ENABLED(src_dp, DM_EVENT_RENAME) ||
- DM_EVENT_ENABLED(target_dp, DM_EVENT_RENAME)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
- src_dp, DM_RIGHT_NULL,
- target_dp, DM_RIGHT_NULL,
- src_name->name, target_name->name,
- 0, 0, 0);
- if (error)
- return error;
- }
- /* Return through std_return after this point. */
+ trace_xfs_rename(src_dp, target_dp, src_name, target_name);
new_parent = (src_dp != target_dp);
src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
@@ -184,26 +168,14 @@ xfs_rename(
/*
* Join all the inodes to the transaction. From this point on,
* we can rely on either trans_commit or trans_cancel to unlock
- * them. Note that we need to add a vnode reference to the
- * directories since trans_commit & trans_cancel will decrement
- * them when they unlock the inodes. Also, we need to be careful
- * not to add an inode to the transaction more than once.
+ * them.
*/
- IHOLD(src_dp);
- xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
-
- if (new_parent) {
- IHOLD(target_dp);
- xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
- }
-
- IHOLD(src_ip);
- xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
-
- if (target_ip) {
- IHOLD(target_ip);
- xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
- }
+ xfs_trans_ijoin_ref(tp, src_dp, XFS_ILOCK_EXCL);
+ if (new_parent)
+ xfs_trans_ijoin_ref(tp, target_dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, src_ip, XFS_ILOCK_EXCL);
+ if (target_ip)
+ xfs_trans_ijoin_ref(tp, target_ip, XFS_ILOCK_EXCL);
/*
* If we are using project inheritance, we only allow renames
@@ -369,26 +341,13 @@ xfs_rename(
* trans_commit will unlock src_ip, target_ip & decrement
* the vnode references.
*/
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-
- /* Fall through to std_return with error = 0 or errno from
- * xfs_trans_commit */
-std_return:
- if (DM_EVENT_ENABLED(src_dp, DM_EVENT_POSTRENAME) ||
- DM_EVENT_ENABLED(target_dp, DM_EVENT_POSTRENAME)) {
- (void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
- src_dp, DM_RIGHT_NULL,
- target_dp, DM_RIGHT_NULL,
- src_name->name, target_name->name,
- 0, error, 0);
- }
- return error;
+ return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
- /* FALLTHROUGH */
error_return:
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, cancel_flags);
- goto std_return;
+ std_return:
+ return error;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 6be05f756d5..891260fea11 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -25,17 +25,10 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
@@ -129,7 +122,7 @@ xfs_growfs_rt_alloc(
cancelflags |= XFS_TRANS_ABORT;
error = xfs_bmapi(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &flist, NULL);
+ resblks, &map, &nmap, &flist);
if (!error && nmap < 1)
error = XFS_ERROR(ENOSPC);
if (error)
@@ -2247,7 +2240,7 @@ xfs_rtmount_init(
cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu",
(unsigned long long) XFS_BB_TO_FSB(mp, d),
(unsigned long long) mp->m_sb.sb_rblocks);
- return XFS_ERROR(E2BIG);
+ return XFS_ERROR(EFBIG);
}
error = xfs_read_buf(mp, mp->m_rtdev_targp,
d - XFS_FSB_TO_BB(mp, 1),
@@ -2256,7 +2249,7 @@ xfs_rtmount_init(
cmn_err(CE_WARN,
"XFS: realtime mount -- xfs_read_buf failed, returned %d", error);
if (error == ENOSPC)
- return XFS_ERROR(E2BIG);
+ return XFS_ERROR(EFBIG);
return error;
}
xfs_buf_relse(bp);
@@ -2277,12 +2270,12 @@ xfs_rtmount_inodes(
sbp = &mp->m_sb;
if (sbp->sb_rbmino == NULLFSINO)
return 0;
- error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
ASSERT(sbp->sb_rsumino != NULLFSINO);
- error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip, 0);
+ error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
IRELE(mp->m_rbmip);
return error;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index b2d67adb6a0..ff614c29b44 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -147,7 +147,16 @@ xfs_growfs_rt(
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS)
# define xfs_growfs_rt(mp,in) (ENOSYS)
-# define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+static inline int /* error */
+xfs_rtmount_init(
+ xfs_mount_t *mp) /* file system mount structure */
+{
+ if (mp->m_sb.sb_rblocks == 0)
+ return 0;
+
+ cmn_err(CE_WARN, "XFS: Not built with CONFIG_XFS_RT");
+ return ENOSYS;
+}
# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
# define xfs_rtunmount_inodes(m)
#endif /* CONFIG_XFS_RT */
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index e336742a58a..56861d5daae 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -24,27 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
-#include "xfs_attr.h"
-#include "xfs_bmap.h"
#include "xfs_error.h"
-#include "xfs_buf_item.h"
#include "xfs_rw.h"
-#include "xfs_trace.h"
/*
* Force a shutdown of the filesystem instantly while keeping
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index ce558efa2ea..fdca7416c75 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * Copyright (C) 2010 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -24,16 +25,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_error.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
@@ -47,135 +44,491 @@
#include "xfs_trace.h"
kmem_zone_t *xfs_trans_zone;
+kmem_zone_t *xfs_log_item_desc_zone;
+
+
+/*
+ * Various log reservation values.
+ *
+ * These are based on the size of the file system block because that is what
+ * most transactions manipulate. Each adds in an additional 128 bytes per
+ * item logged to try to account for the overhead of the transaction mechanism.
+ *
+ * Note: Most of the reservations underestimate the number of allocation
+ * groups into which they could free extents in the xfs_bmap_finish() call.
+ * This is because the number in the worst case is quite high and quite
+ * unusual. In order to fix this we need to change xfs_bmap_finish() to free
+ * extents in only a single AG at a time. This will require changes to the
+ * EFI code as well, however, so that the EFI for the extents not freed is
+ * logged again in each transaction. See SGI PV #261917.
+ *
+ * Reservation functions here avoid a huge stack in xfs_trans_init due to
+ * register overflow from temporaries in the calculations.
+ */
+
/*
- * Reservation functions here avoid a huge stack in xfs_trans_init
- * due to register overflow from temporaries in the calculations.
+ * In a write transaction we can allocate a maximum of 2
+ * extents. This gives:
+ * the inode getting the new extents: inode size
+ * the inode's bmap btree: max depth * block size
+ * the agfs of the ags from which the extents are allocated: 2 * sector
+ * the superblock free block counter: sector size
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ * And the bmap_finish transaction can free bmap blocks in a join:
+ * the agfs of the ags containing the blocks: 2 * sector size
+ * the agfls of the ags containing the blocks: 2 * sector size
+ * the super block free block counter: sector size
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
*/
STATIC uint
-xfs_calc_write_reservation(xfs_mount_t *mp)
+xfs_calc_write_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
+ 2 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 2) +
+ 128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 2))),
+ (2 * mp->m_sb.sb_sectsize +
+ 2 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 2) +
+ 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
}
+/*
+ * In truncating a file we free up to two extents at once. We can modify:
+ * the inode being truncated: inode size
+ * the inode's bmap btree: (max depth + 1) * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ * the agf for each of the ags: 4 * sector size
+ * the agfl for each of the ags: 4 * sector size
+ * the super block to reflect the freed blocks: sector size
+ * worst case split in allocation btrees per extent assuming 4 extents:
+ * 4 exts * 2 trees * (2 * max depth - 1) * block size
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_itruncate_reservation(xfs_mount_t *mp)
+xfs_calc_itruncate_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) +
+ 128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
+ (4 * mp->m_sb.sb_sectsize +
+ 4 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 4) +
+ 128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)) +
+ 128 * 5 +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
}
+/*
+ * In renaming a files we can modify:
+ * the four inodes involved: 4 * inode size
+ * the two directory btrees: 2 * (max depth + v2) * dir block size
+ * the two directory bmap btrees: 2 * max depth * block size
+ * And the bmap_finish transaction can free dir and bmap blocks (two sets
+ * of bmap blocks) giving:
+ * the agf for the ags in which the blocks live: 3 * sector size
+ * the agfl for the ags in which the blocks live: 3 * sector size
+ * the superblock for the free block count: sector size
+ * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_rename_reservation(xfs_mount_t *mp)
+xfs_calc_rename_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((4 * mp->m_sb.sb_inodesize +
+ 2 * XFS_DIROP_LOG_RES(mp) +
+ 128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp))),
+ (3 * mp->m_sb.sb_sectsize +
+ 3 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 3) +
+ 128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3))));
}
+/*
+ * For creating a link to an inode:
+ * the parent directory inode: inode size
+ * the linked inode: inode size
+ * the directory btree could split: (max depth + v2) * dir block size
+ * the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free some bmap blocks giving:
+ * the agf for the ag in which the blocks live: sector size
+ * the agfl for the ag in which the blocks live: sector size
+ * the superblock for the free block count: sector size
+ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_link_reservation(xfs_mount_t *mp)
+xfs_calc_link_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_inodesize +
+ XFS_DIROP_LOG_RES(mp) +
+ 128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
+ (mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
}
+/*
+ * For removing a directory entry we can modify:
+ * the parent directory inode: inode size
+ * the removed inode: inode size
+ * the directory btree could join: (max depth + v2) * dir block size
+ * the directory bmap btree could join or split: (max depth + v2) * blocksize
+ * And the bmap_finish transaction can free the dir and bmap blocks giving:
+ * the agf for the ag in which the blocks live: 2 * sector size
+ * the agfl for the ag in which the blocks live: 2 * sector size
+ * the superblock for the free block count: sector size
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_remove_reservation(xfs_mount_t *mp)
+xfs_calc_remove_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_inodesize +
+ XFS_DIROP_LOG_RES(mp) +
+ 128 * (2 + XFS_DIROP_LOG_COUNT(mp))),
+ (2 * mp->m_sb.sb_sectsize +
+ 2 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 2) +
+ 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
}
+/*
+ * For symlink we can modify:
+ * the parent directory inode: inode size
+ * the new inode: inode size
+ * the inode btree entry: 1 block
+ * the directory btree: (max depth + v2) * dir block size
+ * the directory inode's bmap btree: (max depth + v2) * block size
+ * the blocks for the symlink: 1 kB
+ * Or in the first xact we allocate some inodes giving:
+ * the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_symlink_reservation(xfs_mount_t *mp)
+xfs_calc_symlink_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_inodesize +
+ XFS_FSB_TO_B(mp, 1) +
+ XFS_DIROP_LOG_RES(mp) +
+ 1024 +
+ 128 * (4 + XFS_DIROP_LOG_COUNT(mp))),
+ (2 * mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
+ XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
}
+/*
+ * For create we can modify:
+ * the parent directory inode: inode size
+ * the new inode: inode size
+ * the inode btree entry: block size
+ * the superblock for the nlink flag: sector size
+ * the directory btree: (max depth + v2) * dir block size
+ * the directory inode's bmap btree: (max depth + v2) * block size
+ * Or in the first xact we allocate some inodes giving:
+ * the agi and agf of the ag getting the new inodes: 2 * sectorsize
+ * the superblock for the nlink flag: sector size
+ * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_create_reservation(xfs_mount_t *mp)
+xfs_calc_create_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, 1) +
+ XFS_DIROP_LOG_RES(mp) +
+ 128 * (3 + XFS_DIROP_LOG_COUNT(mp))),
+ (3 * mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) +
+ XFS_FSB_TO_B(mp, mp->m_in_maxlevels) +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1))));
}
+/*
+ * Making a new directory is the same as creating a new file.
+ */
STATIC uint
-xfs_calc_mkdir_reservation(xfs_mount_t *mp)
+xfs_calc_mkdir_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return xfs_calc_create_reservation(mp);
}
+/*
+ * In freeing an inode we can modify:
+ * the inode being freed: inode size
+ * the super block free inode counter: sector size
+ * the agi hash list and counters: sector size
+ * the inode btree entry: block size
+ * the on disk inode before ours in the agi hash list: inode cluster size
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_ifree_reservation(xfs_mount_t *mp)
+xfs_calc_ifree_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, 1) +
+ MAX((__uint16_t)XFS_FSB_TO_B(mp, 1),
+ XFS_INODE_CLUSTER_SIZE(mp)) +
+ 128 * 5 +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1));
}
+/*
+ * When only changing the inode we log the inode and possibly the superblock
+ * We also add a bit of slop for the transaction stuff.
+ */
STATIC uint
-xfs_calc_ichange_reservation(xfs_mount_t *mp)
+xfs_calc_ichange_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_sectsize +
+ 512;
+
}
+/*
+ * Growing the data section of the filesystem.
+ * superblock
+ * agi and agf
+ * allocation btrees
+ */
STATIC uint
-xfs_calc_growdata_reservation(xfs_mount_t *mp)
+xfs_calc_growdata_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_GROWDATA_LOG_RES(mp);
+ return mp->m_sb.sb_sectsize * 3 +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1));
}
+/*
+ * Growing the rt section of the filesystem.
+ * In the first set of transactions (ALLOC) we allocate space to the
+ * bitmap or summary files.
+ * superblock: sector size
+ * agf of the ag from which the extent is allocated: sector size
+ * bmap btree for bitmap/summary inode: max depth * blocksize
+ * bitmap/summary inode: inode size
+ * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
+ */
STATIC uint
-xfs_calc_growrtalloc_reservation(xfs_mount_t *mp)
+xfs_calc_growrtalloc_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_GROWRTALLOC_LOG_RES(mp);
+ return 2 * mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) +
+ mp->m_sb.sb_inodesize +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1));
}
+/*
+ * Growing the rt section of the filesystem.
+ * In the second set of transactions (ZERO) we zero the new metadata blocks.
+ * one bitmap/summary block: blocksize
+ */
STATIC uint
-xfs_calc_growrtzero_reservation(xfs_mount_t *mp)
+xfs_calc_growrtzero_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_GROWRTZERO_LOG_RES(mp);
+ return mp->m_sb.sb_blocksize + 128;
}
+/*
+ * Growing the rt section of the filesystem.
+ * In the third set of transactions (FREE) we update metadata without
+ * allocating any new blocks.
+ * superblock: sector size
+ * bitmap inode: inode size
+ * summary inode: inode size
+ * one bitmap block: blocksize
+ * summary blocks: new summary size
+ */
STATIC uint
-xfs_calc_growrtfree_reservation(xfs_mount_t *mp)
+xfs_calc_growrtfree_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_GROWRTFREE_LOG_RES(mp);
+ return mp->m_sb.sb_sectsize +
+ 2 * mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_blocksize +
+ mp->m_rsumsize +
+ 128 * 5;
}
+/*
+ * Logging the inode modification timestamp on a synchronous write.
+ * inode
+ */
STATIC uint
-xfs_calc_swrite_reservation(xfs_mount_t *mp)
+xfs_calc_swrite_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_SWRITE_LOG_RES(mp);
+ return mp->m_sb.sb_inodesize + 128;
}
+/*
+ * Logging the inode mode bits when writing a setuid/setgid file
+ * inode
+ */
STATIC uint
xfs_calc_writeid_reservation(xfs_mount_t *mp)
{
- return XFS_CALC_WRITEID_LOG_RES(mp);
+ return mp->m_sb.sb_inodesize + 128;
}
+/*
+ * Converting the inode from non-attributed to attributed.
+ * the inode being converted: inode size
+ * agf block and superblock (for block allocation)
+ * the new block (directory sized)
+ * bmap blocks for the new directory block
+ * allocation btrees
+ */
STATIC uint
-xfs_calc_addafork_reservation(xfs_mount_t *mp)
+xfs_calc_addafork_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_sectsize * 2 +
+ mp->m_dirblksize +
+ XFS_FSB_TO_B(mp, XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) +
+ XFS_ALLOCFREE_LOG_RES(mp, 1) +
+ 128 * (4 + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1 +
+ XFS_ALLOCFREE_LOG_COUNT(mp, 1));
}
+/*
+ * Removing the attribute fork of a file
+ * the inode being truncated: inode size
+ * the inode's bmap btree: max depth * block size
+ * And the bmap_finish transaction can free the blocks and bmap blocks:
+ * the agf for each of the ags: 4 * sector size
+ * the agfl for each of the ags: 4 * sector size
+ * the super block to reflect the freed blocks: sector size
+ * worst case split in allocation btrees per extent assuming 4 extents:
+ * 4 exts * 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_attrinval_reservation(xfs_mount_t *mp)
+xfs_calc_attrinval_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ATTRINVAL_LOG_RES(mp);
+ return MAX((mp->m_sb.sb_inodesize +
+ XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
+ 128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))),
+ (4 * mp->m_sb.sb_sectsize +
+ 4 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 4) +
+ 128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))));
}
+/*
+ * Setting an attribute.
+ * the inode getting the attribute
+ * the superblock for allocations
+ * the agfs extents are allocated from
+ * the attribute btree * max depth
+ * the inode allocation btree
+ * Since attribute transaction space is dependent on the size of the attribute,
+ * the calculation is done partially at mount time and partially at runtime.
+ */
STATIC uint
-xfs_calc_attrset_reservation(xfs_mount_t *mp)
+xfs_calc_attrset_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ mp->m_sb.sb_inodesize +
+ mp->m_sb.sb_sectsize +
+ XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
+ 128 * (2 + XFS_DA_NODE_MAXDEPTH);
}
+/*
+ * Removing an attribute.
+ * the inode: inode size
+ * the attribute btree could join: max depth * block size
+ * the inode bmap btree could join or split: max depth * block size
+ * And the bmap_finish transaction can free the attr blocks freed giving:
+ * the agf for the ag in which the blocks live: 2 * sector size
+ * the agfl for the ag in which the blocks live: 2 * sector size
+ * the superblock for the free block count: sector size
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
STATIC uint
-xfs_calc_attrrm_reservation(xfs_mount_t *mp)
+xfs_calc_attrrm_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp);
+ return XFS_DQUOT_LOGRES(mp) +
+ MAX((mp->m_sb.sb_inodesize +
+ XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) +
+ XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
+ 128 * (1 + XFS_DA_NODE_MAXDEPTH +
+ XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))),
+ (2 * mp->m_sb.sb_sectsize +
+ 2 * mp->m_sb.sb_sectsize +
+ mp->m_sb.sb_sectsize +
+ XFS_ALLOCFREE_LOG_RES(mp, 2) +
+ 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))));
}
+/*
+ * Clearing a bad agino number in an agi hash bucket.
+ */
STATIC uint
-xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp)
+xfs_calc_clear_agi_bucket_reservation(
+ struct xfs_mount *mp)
{
- return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp);
+ return mp->m_sb.sb_sectsize + 128;
}
/*
@@ -184,11 +537,10 @@ xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp)
*/
void
xfs_trans_init(
- xfs_mount_t *mp)
+ struct xfs_mount *mp)
{
- xfs_trans_reservations_t *resp;
+ struct xfs_trans_reservations *resp = &mp->m_reservations;
- resp = &(mp->m_reservations);
resp->tr_write = xfs_calc_write_reservation(mp);
resp->tr_itruncate = xfs_calc_itruncate_reservation(mp);
resp->tr_rename = xfs_calc_rename_reservation(mp);
@@ -243,8 +595,7 @@ _xfs_trans_alloc(
tp->t_magic = XFS_TRANS_MAGIC;
tp->t_type = type;
tp->t_mountp = mp;
- tp->t_items_free = XFS_LIC_NUM_SLOTS;
- xfs_lic_init(&(tp->t_items));
+ INIT_LIST_HEAD(&tp->t_items);
INIT_LIST_HEAD(&tp->t_busy);
return tp;
}
@@ -289,8 +640,7 @@ xfs_trans_dup(
ntp->t_magic = XFS_TRANS_MAGIC;
ntp->t_type = tp->t_type;
ntp->t_mountp = tp->t_mountp;
- ntp->t_items_free = XFS_LIC_NUM_SLOTS;
- xfs_lic_init(&(ntp->t_items));
+ INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -770,6 +1120,108 @@ xfs_trans_unreserve_and_mod_sb(
}
/*
+ * Add the given log item to the transaction's list of log items.
+ *
+ * The log item will now point to its new descriptor with its li_desc field.
+ */
+void
+xfs_trans_add_item(
+ struct xfs_trans *tp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_log_item_desc *lidp;
+
+ ASSERT(lip->li_mountp = tp->t_mountp);
+ ASSERT(lip->li_ailp = tp->t_mountp->m_ail);
+
+ lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
+
+ lidp->lid_item = lip;
+ lidp->lid_flags = 0;
+ lidp->lid_size = 0;
+ list_add_tail(&lidp->lid_trans, &tp->t_items);
+
+ lip->li_desc = lidp;
+}
+
+STATIC void
+xfs_trans_free_item_desc(
+ struct xfs_log_item_desc *lidp)
+{
+ list_del_init(&lidp->lid_trans);
+ kmem_zone_free(xfs_log_item_desc_zone, lidp);
+}
+
+/*
+ * Unlink and free the given descriptor.
+ */
+void
+xfs_trans_del_item(
+ struct xfs_log_item *lip)
+{
+ xfs_trans_free_item_desc(lip->li_desc);
+ lip->li_desc = NULL;
+}
+
+/*
+ * Unlock all of the items of a transaction and free all the descriptors
+ * of that transaction.
+ */
+STATIC void
+xfs_trans_free_items(
+ struct xfs_trans *tp,
+ xfs_lsn_t commit_lsn,
+ int flags)
+{
+ struct xfs_log_item_desc *lidp, *next;
+
+ list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
+ struct xfs_log_item *lip = lidp->lid_item;
+
+ lip->li_desc = NULL;
+
+ if (commit_lsn != NULLCOMMITLSN)
+ IOP_COMMITTING(lip, commit_lsn);
+ if (flags & XFS_TRANS_ABORT)
+ lip->li_flags |= XFS_LI_ABORTED;
+ IOP_UNLOCK(lip);
+
+ xfs_trans_free_item_desc(lidp);
+ }
+}
+
+/*
+ * Unlock the items associated with a transaction.
+ *
+ * Items which were not logged should be freed. Those which were logged must
+ * still be tracked so they can be unpinned when the transaction commits.
+ */
+STATIC void
+xfs_trans_unlock_items(
+ struct xfs_trans *tp,
+ xfs_lsn_t commit_lsn)
+{
+ struct xfs_log_item_desc *lidp, *next;
+
+ list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
+ struct xfs_log_item *lip = lidp->lid_item;
+
+ lip->li_desc = NULL;
+
+ if (commit_lsn != NULLCOMMITLSN)
+ IOP_COMMITTING(lip, commit_lsn);
+ IOP_UNLOCK(lip);
+
+ /*
+ * Free the descriptor if the item is not dirty
+ * within this transaction.
+ */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
+ xfs_trans_free_item_desc(lidp);
+ }
+}
+
+/*
* Total up the number of log iovecs needed to commit this
* transaction. The transaction itself needs one for the
* transaction header. Ask each dirty item in turn how many
@@ -780,30 +1232,27 @@ xfs_trans_count_vecs(
struct xfs_trans *tp)
{
int nvecs;
- xfs_log_item_desc_t *lidp;
+ struct xfs_log_item_desc *lidp;
nvecs = 1;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp != NULL);
/* In the non-debug case we need to start bailing out if we
* didn't find a log_item here, return zero and let trans_commit
* deal with it.
*/
- if (lidp == NULL)
+ if (list_empty(&tp->t_items)) {
+ ASSERT(0);
return 0;
+ }
- while (lidp != NULL) {
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/*
* Skip items which aren't dirty in this transaction.
*/
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
- }
lidp->lid_size = IOP_SIZE(lidp->lid_item);
nvecs += lidp->lid_size;
- lidp = xfs_trans_next_item(tp, lidp);
}
return nvecs;
@@ -823,7 +1272,7 @@ xfs_trans_fill_vecs(
struct xfs_trans *tp,
struct xfs_log_iovec *log_vector)
{
- xfs_log_item_desc_t *lidp;
+ struct xfs_log_item_desc *lidp;
struct xfs_log_iovec *vecp;
uint nitems;
@@ -834,14 +1283,11 @@ xfs_trans_fill_vecs(
vecp = log_vector + 1;
nitems = 0;
- lidp = xfs_trans_first_item(tp);
- ASSERT(lidp);
- while (lidp) {
+ ASSERT(!list_empty(&tp->t_items));
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/* Skip items which aren't dirty in this transaction. */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
- }
/*
* The item may be marked dirty but not log anything. This can
@@ -852,7 +1298,6 @@ xfs_trans_fill_vecs(
IOP_FORMAT(lidp->lid_item, vecp);
vecp += lidp->lid_size;
IOP_PIN(lidp->lid_item);
- lidp = xfs_trans_next_item(tp, lidp);
}
/*
@@ -930,7 +1375,7 @@ xfs_trans_item_committed(
* log item flags, if anyone else stales the buffer we do not want to
* pay any attention to it.
*/
- IOP_UNPIN(lip);
+ IOP_UNPIN(lip, 0);
}
/*
@@ -947,24 +1392,15 @@ xfs_trans_committed(
struct xfs_trans *tp,
int abortflag)
{
- xfs_log_item_desc_t *lidp;
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t *next_licp;
+ struct xfs_log_item_desc *lidp, *next;
/* Call the transaction's completion callback if there is one. */
if (tp->t_callback != NULL)
tp->t_callback(tp, tp->t_callarg);
- for (lidp = xfs_trans_first_item(tp);
- lidp != NULL;
- lidp = xfs_trans_next_item(tp, lidp)) {
+ list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
xfs_trans_item_committed(lidp->lid_item, tp->t_lsn, abortflag);
- }
-
- /* free the item chunks, ignoring the embedded chunk */
- for (licp = tp->t_items.lic_next; licp != NULL; licp = next_licp) {
- next_licp = licp->lic_next;
- kmem_free(licp);
+ xfs_trans_free_item_desc(lidp);
}
xfs_trans_free(tp);
@@ -979,16 +1415,14 @@ xfs_trans_uncommit(
struct xfs_trans *tp,
uint flags)
{
- xfs_log_item_desc_t *lidp;
+ struct xfs_log_item_desc *lidp;
- for (lidp = xfs_trans_first_item(tp);
- lidp != NULL;
- lidp = xfs_trans_next_item(tp, lidp)) {
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
/*
* Unpin all but those that aren't dirty.
*/
if (lidp->lid_flags & XFS_LID_DIRTY)
- IOP_UNPIN_REMOVE(lidp->lid_item, tp);
+ IOP_UNPIN(lidp->lid_item, 1);
}
xfs_trans_unreserve_and_mod_sb(tp);
@@ -1154,33 +1588,28 @@ STATIC struct xfs_log_vec *
xfs_trans_alloc_log_vecs(
xfs_trans_t *tp)
{
- xfs_log_item_desc_t *lidp;
+ struct xfs_log_item_desc *lidp;
struct xfs_log_vec *lv = NULL;
struct xfs_log_vec *ret_lv = NULL;
- lidp = xfs_trans_first_item(tp);
/* Bail out if we didn't find a log item. */
- if (!lidp) {
+ if (list_empty(&tp->t_items)) {
ASSERT(0);
return NULL;
}
- while (lidp != NULL) {
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
struct xfs_log_vec *new_lv;
/* Skip items which aren't dirty in this transaction. */
- if (!(lidp->lid_flags & XFS_LID_DIRTY)) {
- lidp = xfs_trans_next_item(tp, lidp);
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
- }
/* Skip items that do not have any vectors for writing */
lidp->lid_size = IOP_SIZE(lidp->lid_item);
- if (!lidp->lid_size) {
- lidp = xfs_trans_next_item(tp, lidp);
+ if (!lidp->lid_size)
continue;
- }
new_lv = kmem_zalloc(sizeof(*new_lv) +
lidp->lid_size * sizeof(struct xfs_log_iovec),
@@ -1195,7 +1624,6 @@ xfs_trans_alloc_log_vecs(
else
lv->lv_next = new_lv;
lv = new_lv;
- lidp = xfs_trans_next_item(tp, lidp);
}
return ret_lv;
@@ -1354,12 +1782,6 @@ xfs_trans_cancel(
int flags)
{
int log_flags;
-#ifdef DEBUG
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- int i;
-#endif
xfs_mount_t *mp = tp->t_mountp;
/*
@@ -1378,21 +1800,11 @@ xfs_trans_cancel(
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
}
#ifdef DEBUG
- if (!(flags & XFS_TRANS_ABORT)) {
- licp = &(tp->t_items);
- while (licp != NULL) {
- lidp = licp->lic_descs;
- for (i = 0; i < licp->lic_unused; i++, lidp++) {
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- lip = lidp->lid_item;
- if (!XFS_FORCED_SHUTDOWN(mp))
- ASSERT(!(lip->li_type == XFS_LI_EFD));
- }
- licp = licp->lic_next;
- }
+ if (!(flags & XFS_TRANS_ABORT) && !XFS_FORCED_SHUTDOWN(mp)) {
+ struct xfs_log_item_desc *lidp;
+
+ list_for_each_entry(lidp, &tp->t_items, lid_trans)
+ ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
}
#endif
xfs_trans_unreserve_and_mod_sb(tp);
@@ -1480,7 +1892,6 @@ xfs_trans_roll(
if (error)
return error;
- xfs_trans_ijoin(trans, dp, XFS_ILOCK_EXCL);
- xfs_trans_ihold(trans, dp);
+ xfs_trans_ijoin(trans, dp);
return 0;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 8c69e7824f6..c13c0f97b49 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -161,105 +161,14 @@ typedef struct xfs_trans_header {
* the amount of space needed to log the item it describes
* once we get to commit processing (see xfs_trans_commit()).
*/
-typedef struct xfs_log_item_desc {
+struct xfs_log_item_desc {
struct xfs_log_item *lid_item;
- ushort lid_size;
- unsigned char lid_flags;
- unsigned char lid_index;
-} xfs_log_item_desc_t;
+ ushort lid_size;
+ unsigned char lid_flags;
+ struct list_head lid_trans;
+};
#define XFS_LID_DIRTY 0x1
-#define XFS_LID_PINNED 0x2
-
-/*
- * This structure is used to maintain a chunk list of log_item_desc
- * structures. The free field is a bitmask indicating which descriptors
- * in this chunk's array are free. The unused field is the first value
- * not used since this chunk was allocated.
- */
-#define XFS_LIC_NUM_SLOTS 15
-typedef struct xfs_log_item_chunk {
- struct xfs_log_item_chunk *lic_next;
- ushort lic_free;
- ushort lic_unused;
- xfs_log_item_desc_t lic_descs[XFS_LIC_NUM_SLOTS];
-} xfs_log_item_chunk_t;
-
-#define XFS_LIC_MAX_SLOT (XFS_LIC_NUM_SLOTS - 1)
-#define XFS_LIC_FREEMASK ((1 << XFS_LIC_NUM_SLOTS) - 1)
-
-
-/*
- * Initialize the given chunk. Set the chunk's free descriptor mask
- * to indicate that all descriptors are free. The caller gets to set
- * lic_unused to the right value (0 matches all free). The
- * lic_descs.lid_index values are set up as each desc is allocated.
- */
-static inline void xfs_lic_init(xfs_log_item_chunk_t *cp)
-{
- cp->lic_free = XFS_LIC_FREEMASK;
-}
-
-static inline void xfs_lic_init_slot(xfs_log_item_chunk_t *cp, int slot)
-{
- cp->lic_descs[slot].lid_index = (unsigned char)(slot);
-}
-
-static inline int xfs_lic_vacancy(xfs_log_item_chunk_t *cp)
-{
- return cp->lic_free & XFS_LIC_FREEMASK;
-}
-
-static inline void xfs_lic_all_free(xfs_log_item_chunk_t *cp)
-{
- cp->lic_free = XFS_LIC_FREEMASK;
-}
-
-static inline int xfs_lic_are_all_free(xfs_log_item_chunk_t *cp)
-{
- return ((cp->lic_free & XFS_LIC_FREEMASK) == XFS_LIC_FREEMASK);
-}
-
-static inline int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot)
-{
- return (cp->lic_free & (1 << slot));
-}
-
-static inline void xfs_lic_claim(xfs_log_item_chunk_t *cp, int slot)
-{
- cp->lic_free &= ~(1 << slot);
-}
-
-static inline void xfs_lic_relse(xfs_log_item_chunk_t *cp, int slot)
-{
- cp->lic_free |= 1 << slot;
-}
-
-static inline xfs_log_item_desc_t *
-xfs_lic_slot(xfs_log_item_chunk_t *cp, int slot)
-{
- return &(cp->lic_descs[slot]);
-}
-
-static inline int xfs_lic_desc_to_slot(xfs_log_item_desc_t *dp)
-{
- return (uint)dp->lid_index;
-}
-
-/*
- * Calculate the address of a chunk given a descriptor pointer:
- * dp - dp->lid_index give the address of the start of the lic_descs array.
- * From this we subtract the offset of the lic_descs field in a chunk.
- * All of this yields the address of the chunk, which is
- * cast to a chunk pointer.
- */
-static inline xfs_log_item_chunk_t *
-xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
-{
- return (xfs_log_item_chunk_t*) \
- (((xfs_caddr_t)((dp) - (dp)->lid_index)) - \
- (xfs_caddr_t)(((xfs_log_item_chunk_t*)0)->lic_descs));
-}
#define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */
/*
@@ -275,8 +184,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
/*
* Values for call flags parameter.
*/
-#define XFS_TRANS_NOSLEEP 0x1
-#define XFS_TRANS_WAIT 0x2
#define XFS_TRANS_RELEASE_LOG_RES 0x4
#define XFS_TRANS_ABORT 0x8
@@ -300,24 +207,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
/*
- * Various log reservation values.
- * These are based on the size of the file system block
- * because that is what most transactions manipulate.
- * Each adds in an additional 128 bytes per item logged to
- * try to account for the overhead of the transaction mechanism.
- *
- * Note:
- * Most of the reservations underestimate the number of allocation
- * groups into which they could free extents in the xfs_bmap_finish()
- * call. This is because the number in the worst case is quite high
- * and quite unusual. In order to fix this we need to change
- * xfs_bmap_finish() to free extents in only a single AG at a time.
- * This will require changes to the EFI code as well, however, so that
- * the EFI for the extents not freed is logged again in each transaction.
- * See bug 261917.
- */
-
-/*
* Per-extent log reservation for the allocation btree changes
* involved in freeing or allocating an extent.
* 2 trees * (2 blocks/level * max depth - 1) * block size
@@ -341,429 +230,36 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp)
(XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \
XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1)
-/*
- * In a write transaction we can allocate a maximum of 2
- * extents. This gives:
- * the inode getting the new extents: inode size
- * the inode's bmap btree: max depth * block size
- * the agfs of the ags from which the extents are allocated: 2 * sector
- * the superblock free block counter: sector size
- * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- * And the bmap_finish transaction can free bmap blocks in a join:
- * the agfs of the ags containing the blocks: 2 * sector size
- * the agfls of the ags containing the blocks: 2 * sector size
- * the super block free block counter: sector size
- * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_WRITE_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \
- (2 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 2) + \
- (128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))),\
- ((2 * (mp)->m_sb.sb_sectsize) + \
- (2 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 2) + \
- (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
#define XFS_WRITE_LOG_RES(mp) ((mp)->m_reservations.tr_write)
-
-/*
- * In truncating a file we free up to two extents at once. We can modify:
- * the inode being truncated: inode size
- * the inode's bmap btree: (max depth + 1) * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
- * the agf for each of the ags: 4 * sector size
- * the agfl for each of the ags: 4 * sector size
- * the super block to reflect the freed blocks: sector size
- * worst case split in allocation btrees per extent assuming 4 extents:
- * 4 exts * 2 trees * (2 * max depth - 1) * block size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-#define XFS_CALC_ITRUNCATE_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + \
- (128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \
- ((4 * (mp)->m_sb.sb_sectsize) + \
- (4 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 4) + \
- (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \
- (128 * 5) + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
-
#define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate)
-
-/*
- * In renaming a files we can modify:
- * the four inodes involved: 4 * inode size
- * the two directory btrees: 2 * (max depth + v2) * dir block size
- * the two directory bmap btrees: 2 * max depth * block size
- * And the bmap_finish transaction can free dir and bmap blocks (two sets
- * of bmap blocks) giving:
- * the agf for the ags in which the blocks live: 3 * sector size
- * the agfl for the ags in which the blocks live: 3 * sector size
- * the superblock for the free block count: sector size
- * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_RENAME_LOG_RES(mp) \
- (MAX( \
- ((4 * (mp)->m_sb.sb_inodesize) + \
- (2 * XFS_DIROP_LOG_RES(mp)) + \
- (128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp)))), \
- ((3 * (mp)->m_sb.sb_sectsize) + \
- (3 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 3) + \
- (128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3))))))
-
#define XFS_RENAME_LOG_RES(mp) ((mp)->m_reservations.tr_rename)
-
-/*
- * For creating a link to an inode:
- * the parent directory inode: inode size
- * the linked inode: inode size
- * the directory btree could split: (max depth + v2) * dir block size
- * the directory bmap btree could join or split: (max depth + v2) * blocksize
- * And the bmap_finish transaction can free some bmap blocks giving:
- * the agf for the ag in which the blocks live: sector size
- * the agfl for the ag in which the blocks live: sector size
- * the superblock for the free block count: sector size
- * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_LINK_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_inodesize + \
- XFS_DIROP_LOG_RES(mp) + \
- (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \
- ((mp)->m_sb.sb_sectsize + \
- (mp)->m_sb.sb_sectsize + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
-
#define XFS_LINK_LOG_RES(mp) ((mp)->m_reservations.tr_link)
-
-/*
- * For removing a directory entry we can modify:
- * the parent directory inode: inode size
- * the removed inode: inode size
- * the directory btree could join: (max depth + v2) * dir block size
- * the directory bmap btree could join or split: (max depth + v2) * blocksize
- * And the bmap_finish transaction can free the dir and bmap blocks giving:
- * the agf for the ag in which the blocks live: 2 * sector size
- * the agfl for the ag in which the blocks live: 2 * sector size
- * the superblock for the free block count: sector size
- * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_REMOVE_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_inodesize + \
- XFS_DIROP_LOG_RES(mp) + \
- (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \
- ((2 * (mp)->m_sb.sb_sectsize) + \
- (2 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 2) + \
- (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
-
#define XFS_REMOVE_LOG_RES(mp) ((mp)->m_reservations.tr_remove)
-
-/*
- * For symlink we can modify:
- * the parent directory inode: inode size
- * the new inode: inode size
- * the inode btree entry: 1 block
- * the directory btree: (max depth + v2) * dir block size
- * the directory inode's bmap btree: (max depth + v2) * block size
- * the blocks for the symlink: 1 kB
- * Or in the first xact we allocate some inodes giving:
- * the agi and agf of the ag getting the new inodes: 2 * sectorsize
- * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_SYMLINK_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_inodesize + \
- XFS_FSB_TO_B(mp, 1) + \
- XFS_DIROP_LOG_RES(mp) + \
- 1024 + \
- (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \
- (2 * (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
- XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
-
#define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink)
-
-/*
- * For create we can modify:
- * the parent directory inode: inode size
- * the new inode: inode size
- * the inode btree entry: block size
- * the superblock for the nlink flag: sector size
- * the directory btree: (max depth + v2) * dir block size
- * the directory inode's bmap btree: (max depth + v2) * block size
- * Or in the first xact we allocate some inodes giving:
- * the agi and agf of the ag getting the new inodes: 2 * sectorsize
- * the superblock for the nlink flag: sector size
- * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-#define XFS_CALC_CREATE_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B(mp, 1) + \
- XFS_DIROP_LOG_RES(mp) + \
- (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \
- (3 * (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \
- XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))))
-
#define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create)
-
-/*
- * Making a new directory is the same as creating a new file.
- */
-#define XFS_CALC_MKDIR_LOG_RES(mp) XFS_CALC_CREATE_LOG_RES(mp)
-
#define XFS_MKDIR_LOG_RES(mp) ((mp)->m_reservations.tr_mkdir)
-
-/*
- * In freeing an inode we can modify:
- * the inode being freed: inode size
- * the super block free inode counter: sector size
- * the agi hash list and counters: sector size
- * the inode btree entry: block size
- * the on disk inode before ours in the agi hash list: inode cluster size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-#define XFS_CALC_IFREE_LOG_RES(mp) \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_sectsize + \
- (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B((mp), 1) + \
- MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \
- (128 * 5) + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
-
-
#define XFS_IFREE_LOG_RES(mp) ((mp)->m_reservations.tr_ifree)
-
-/*
- * When only changing the inode we log the inode and possibly the superblock
- * We also add a bit of slop for the transaction stuff.
- */
-#define XFS_CALC_ICHANGE_LOG_RES(mp) ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_sectsize + 512)
-
#define XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange)
-
-/*
- * Growing the data section of the filesystem.
- * superblock
- * agi and agf
- * allocation btrees
- */
-#define XFS_CALC_GROWDATA_LOG_RES(mp) \
- ((mp)->m_sb.sb_sectsize * 3 + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
-
#define XFS_GROWDATA_LOG_RES(mp) ((mp)->m_reservations.tr_growdata)
-
-/*
- * Growing the rt section of the filesystem.
- * In the first set of transactions (ALLOC) we allocate space to the
- * bitmap or summary files.
- * superblock: sector size
- * agf of the ag from which the extent is allocated: sector size
- * bmap btree for bitmap/summary inode: max depth * blocksize
- * bitmap/summary inode: inode size
- * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
- */
-#define XFS_CALC_GROWRTALLOC_LOG_RES(mp) \
- (2 * (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \
- (mp)->m_sb.sb_inodesize + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * \
- (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
-
#define XFS_GROWRTALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_growrtalloc)
-
-/*
- * Growing the rt section of the filesystem.
- * In the second set of transactions (ZERO) we zero the new metadata blocks.
- * one bitmap/summary block: blocksize
- */
-#define XFS_CALC_GROWRTZERO_LOG_RES(mp) \
- ((mp)->m_sb.sb_blocksize + 128)
-
#define XFS_GROWRTZERO_LOG_RES(mp) ((mp)->m_reservations.tr_growrtzero)
-
-/*
- * Growing the rt section of the filesystem.
- * In the third set of transactions (FREE) we update metadata without
- * allocating any new blocks.
- * superblock: sector size
- * bitmap inode: inode size
- * summary inode: inode size
- * one bitmap block: blocksize
- * summary blocks: new summary size
- */
-#define XFS_CALC_GROWRTFREE_LOG_RES(mp) \
- ((mp)->m_sb.sb_sectsize + \
- 2 * (mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_blocksize + \
- (mp)->m_rsumsize + \
- (128 * 5))
-
#define XFS_GROWRTFREE_LOG_RES(mp) ((mp)->m_reservations.tr_growrtfree)
-
-/*
- * Logging the inode modification timestamp on a synchronous write.
- * inode
- */
-#define XFS_CALC_SWRITE_LOG_RES(mp) \
- ((mp)->m_sb.sb_inodesize + 128)
-
#define XFS_SWRITE_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
-
/*
* Logging the inode timestamps on an fsync -- same as SWRITE
* as long as SWRITE logs the entire inode core
*/
#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
-
-/*
- * Logging the inode mode bits when writing a setuid/setgid file
- * inode
- */
-#define XFS_CALC_WRITEID_LOG_RES(mp) \
- ((mp)->m_sb.sb_inodesize + 128)
-
#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
-
-/*
- * Converting the inode from non-attributed to attributed.
- * the inode being converted: inode size
- * agf block and superblock (for block allocation)
- * the new block (directory sized)
- * bmap blocks for the new directory block
- * allocation btrees
- */
-#define XFS_CALC_ADDAFORK_LOG_RES(mp) \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_sectsize * 2 + \
- (mp)->m_dirblksize + \
- XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1)) + \
- XFS_ALLOCFREE_LOG_RES(mp, 1) + \
- (128 * (4 + (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \
- XFS_ALLOCFREE_LOG_COUNT(mp, 1))))
-
#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork)
-
-/*
- * Removing the attribute fork of a file
- * the inode being truncated: inode size
- * the inode's bmap btree: max depth * block size
- * And the bmap_finish transaction can free the blocks and bmap blocks:
- * the agf for each of the ags: 4 * sector size
- * the agfl for each of the ags: 4 * sector size
- * the super block to reflect the freed blocks: sector size
- * worst case split in allocation btrees per extent assuming 4 extents:
- * 4 exts * 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_ATTRINVAL_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \
- (128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))), \
- ((4 * (mp)->m_sb.sb_sectsize) + \
- (4 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 4) + \
- (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))))))
-
#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval)
-
-/*
- * Setting an attribute.
- * the inode getting the attribute
- * the superblock for allocations
- * the agfs extents are allocated from
- * the attribute btree * max depth
- * the inode allocation btree
- * Since attribute transaction space is dependent on the size of the attribute,
- * the calculation is done partially at mount time and partially at runtime.
- */
-#define XFS_CALC_ATTRSET_LOG_RES(mp) \
- ((mp)->m_sb.sb_inodesize + \
- (mp)->m_sb.sb_sectsize + \
- XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \
- (128 * (2 + XFS_DA_NODE_MAXDEPTH)))
-
#define XFS_ATTRSET_LOG_RES(mp, ext) \
((mp)->m_reservations.tr_attrset + \
(ext * (mp)->m_sb.sb_sectsize) + \
(ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \
(128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))))
-
-/*
- * Removing an attribute.
- * the inode: inode size
- * the attribute btree could join: max depth * block size
- * the inode bmap btree could join or split: max depth * block size
- * And the bmap_finish transaction can free the attr blocks freed giving:
- * the agf for the ag in which the blocks live: 2 * sector size
- * the agfl for the ag in which the blocks live: 2 * sector size
- * the superblock for the free block count: sector size
- * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
- */
-#define XFS_CALC_ATTRRM_LOG_RES(mp) \
- (MAX( \
- ((mp)->m_sb.sb_inodesize + \
- XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \
- XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \
- (128 * (1 + XFS_DA_NODE_MAXDEPTH + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \
- ((2 * (mp)->m_sb.sb_sectsize) + \
- (2 * (mp)->m_sb.sb_sectsize) + \
- (mp)->m_sb.sb_sectsize + \
- XFS_ALLOCFREE_LOG_RES(mp, 2) + \
- (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))))))
-
#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
-
-/*
- * Clearing a bad agino number in an agi hash bucket.
- */
-#define XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp) \
- ((mp)->m_sb.sb_sectsize + 128)
-
#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi)
@@ -849,8 +345,7 @@ typedef struct xfs_item_ops {
uint (*iop_size)(xfs_log_item_t *);
void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *);
void (*iop_pin)(xfs_log_item_t *);
- void (*iop_unpin)(xfs_log_item_t *);
- void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *);
+ void (*iop_unpin)(xfs_log_item_t *, int remove);
uint (*iop_trylock)(xfs_log_item_t *);
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
@@ -862,8 +357,7 @@ typedef struct xfs_item_ops {
#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip)
#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp)
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
-#define IOP_UNPIN(ip) (*(ip)->li_ops->iop_unpin)(ip)
-#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp)
+#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)
#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn)
@@ -927,8 +421,7 @@ typedef struct xfs_trans {
int64_t t_rblocks_delta;/* superblock rblocks change */
int64_t t_rextents_delta;/* superblocks rextents chg */
int64_t t_rextslog_delta;/* superblocks rextslog chg */
- unsigned int t_items_free; /* log item descs free */
- xfs_log_item_chunk_t t_items; /* first log item desc chunk */
+ struct list_head t_items; /* log item descriptors */
xfs_trans_header_t t_header; /* header for in-log trans */
struct list_head t_busy; /* list of busy extents */
unsigned long t_pflags; /* saved process flags state */
@@ -980,8 +473,8 @@ void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *,
xfs_ino_t , uint, uint, struct xfs_inode **);
-void xfs_trans_ijoin(xfs_trans_t *, struct xfs_inode *, uint);
-void xfs_trans_ihold(xfs_trans_t *, struct xfs_inode *);
+void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint);
+void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
@@ -1006,6 +499,7 @@ int xfs_trans_ail_init(struct xfs_mount *);
void xfs_trans_ail_destroy(struct xfs_mount *);
extern kmem_zone_t *xfs_trans_zone;
+extern kmem_zone_t *xfs_log_item_desc_zone;
#endif /* __KERNEL__ */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index e799824f724..dc9069568ff 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -24,7 +24,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 63d81a22f4f..90af025e683 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -24,14 +24,10 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_buf_item.h"
@@ -51,36 +47,17 @@ xfs_trans_buf_item_match(
xfs_daddr_t blkno,
int len)
{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_desc_t *lidp;
- xfs_buf_log_item_t *blip;
- int i;
+ struct xfs_log_item_desc *lidp;
+ struct xfs_buf_log_item *blip;
len = BBTOB(len);
- for (licp = &tp->t_items; licp != NULL; licp = licp->lic_next) {
- if (xfs_lic_are_all_free(licp)) {
- ASSERT(licp == &tp->t_items);
- ASSERT(licp->lic_next == NULL);
- return NULL;
- }
-
- for (i = 0; i < licp->lic_unused; i++) {
- /*
- * Skip unoccupied slots.
- */
- if (xfs_lic_isfree(licp, i))
- continue;
-
- lidp = xfs_lic_slot(licp, i);
- blip = (xfs_buf_log_item_t *)lidp->lid_item;
- if (blip->bli_item.li_type != XFS_LI_BUF)
- continue;
-
- if (XFS_BUF_TARGET(blip->bli_buf) == target &&
- XFS_BUF_ADDR(blip->bli_buf) == blkno &&
- XFS_BUF_COUNT(blip->bli_buf) == len)
- return blip->bli_buf;
- }
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
+ blip = (struct xfs_buf_log_item *)lidp->lid_item;
+ if (blip->bli_item.li_type == XFS_LI_BUF &&
+ XFS_BUF_TARGET(blip->bli_buf) == target &&
+ XFS_BUF_ADDR(blip->bli_buf) == blkno &&
+ XFS_BUF_COUNT(blip->bli_buf) == len)
+ return blip->bli_buf;
}
return NULL;
@@ -127,7 +104,7 @@ _xfs_trans_bjoin(
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t *)bip);
+ xfs_trans_add_item(tp, &bip->bli_item);
/*
* Initialize b_fsprivate2 so we can find it with incore_match()
@@ -483,7 +460,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
{
xfs_buf_log_item_t *bip;
xfs_log_item_t *lip;
- xfs_log_item_desc_t *lidp;
/*
* Default to a normal brelse() call if the tp is NULL.
@@ -514,13 +490,6 @@ xfs_trans_brelse(xfs_trans_t *tp,
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- /*
- * Find the item descriptor pointing to this buffer's
- * log item. It must be there.
- */
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
- ASSERT(lidp != NULL);
-
trace_xfs_trans_brelse(bip);
/*
@@ -536,7 +505,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
* If the buffer is dirty within this transaction, we can't
* release it until we commit.
*/
- if (lidp->lid_flags & XFS_LID_DIRTY)
+ if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
return;
/*
@@ -553,7 +522,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
/*
* Free up the log item descriptor tracking the released item.
*/
- xfs_trans_free_item(tp, lidp);
+ xfs_trans_del_item(&bip->bli_item);
/*
* Clear the hold flag in the buf log item if it is set.
@@ -665,7 +634,6 @@ xfs_trans_log_buf(xfs_trans_t *tp,
uint last)
{
xfs_buf_log_item_t *bip;
- xfs_log_item_desc_t *lidp;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
@@ -690,7 +658,7 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
- bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))xfs_buf_iodone;
+ bip->bli_item.li_cb = xfs_buf_iodone;
trace_xfs_trans_log_buf(bip);
@@ -707,11 +675,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
}
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
bip->bli_flags |= XFS_BLI_LOGGED;
xfs_buf_item_log(bip, first, last);
}
@@ -740,7 +705,6 @@ xfs_trans_binval(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_log_item_desc_t *lidp;
xfs_buf_log_item_t *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
@@ -748,8 +712,6 @@ xfs_trans_binval(
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)bip);
- ASSERT(lidp != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_binval(bip);
@@ -764,7 +726,7 @@ xfs_trans_binval(
ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
- ASSERT(lidp->lid_flags & XFS_LID_DIRTY);
+ ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
return;
}
@@ -797,7 +759,7 @@ xfs_trans_binval(
bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
memset((char *)(bip->bli_format.blf_data_map), 0,
(bip->bli_format.blf_map_size * sizeof(uint)));
- lidp->lid_flags |= XFS_LID_DIRTY;
+ bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
tp->t_flags |= XFS_TRANS_DIRTY;
}
@@ -853,12 +815,9 @@ xfs_trans_stale_inode_buf(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_STALE_INODE;
- bip->bli_item.li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*))
- xfs_buf_iodone;
+ bip->bli_item.li_cb = xfs_buf_iodone;
}
-
-
/*
* Mark the buffer as being one which contains newly allocated
* inodes. We need to make sure that even if this buffer is
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 27cce2a9c7e..f783d5e9fa7 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -23,7 +23,6 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
@@ -49,9 +48,8 @@ xfs_trans_get_efi(xfs_trans_t *tp,
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efip);
-
- return (efip);
+ xfs_trans_add_item(tp, &efip->efi_item);
+ return efip;
}
/*
@@ -65,15 +63,11 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
xfs_fsblock_t start_block,
xfs_extlen_t ext_len)
{
- xfs_log_item_desc_t *lidp;
uint next_extent;
xfs_extent_t *extp;
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efip);
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
next_extent = efip->efi_next_extent;
ASSERT(next_extent < efip->efi_format.efi_nextents);
@@ -106,9 +100,8 @@ xfs_trans_get_efd(xfs_trans_t *tp,
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp);
-
- return (efdp);
+ xfs_trans_add_item(tp, &efdp->efd_item);
+ return efdp;
}
/*
@@ -122,15 +115,11 @@ xfs_trans_log_efd_extent(xfs_trans_t *tp,
xfs_fsblock_t start_block,
xfs_extlen_t ext_len)
{
- xfs_log_item_desc_t *lidp;
uint next_extent;
xfs_extent_t *extp;
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)efdp);
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ efdp->efd_item.li_desc->lid_flags |= XFS_LID_DIRTY;
next_extent = efdp->efd_next_extent;
ASSERT(next_extent < efdp->efd_format.efd_nextents);
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 785ff101da0..cdc53a1050c 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -24,20 +24,16 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_ialloc.h"
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
+#include "xfs_trace.h"
#ifdef XFS_TRANS_DEBUG
STATIC void
@@ -47,7 +43,6 @@ xfs_trans_inode_broot_debug(
#define xfs_trans_inode_broot_debug(ip)
#endif
-
/*
* Get an inode and join it to the transaction.
*/
@@ -62,78 +57,66 @@ xfs_trans_iget(
{
int error;
- error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0);
- if (!error && tp)
- xfs_trans_ijoin(tp, *ipp, lock_flags);
+ error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp);
+ if (!error && tp) {
+ xfs_trans_ijoin(tp, *ipp);
+ (*ipp)->i_itemp->ili_lock_flags = lock_flags;
+ }
return error;
}
/*
- * Add the locked inode to the transaction.
- * The inode must be locked, and it cannot be associated with any
- * transaction. The caller must specify the locks already held
- * on the inode.
+ * Add a locked inode to the transaction.
+ *
+ * The inode must be locked, and it cannot be associated with any transaction.
*/
void
xfs_trans_ijoin(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- uint lock_flags)
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
{
xfs_inode_log_item_t *iip;
ASSERT(ip->i_transp == NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(lock_flags & XFS_ILOCK_EXCL);
if (ip->i_itemp == NULL)
xfs_inode_item_init(ip, ip->i_mount);
iip = ip->i_itemp;
- ASSERT(iip->ili_flags == 0);
+ ASSERT(iip->ili_lock_flags == 0);
/*
* Get a log_item_desc to point at the new item.
*/
- (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip));
+ xfs_trans_add_item(tp, &iip->ili_item);
xfs_trans_inode_broot_debug(ip);
/*
- * If the IO lock is already held, mark that in the inode log item.
- */
- if (lock_flags & XFS_IOLOCK_EXCL) {
- iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
- } else if (lock_flags & XFS_IOLOCK_SHARED) {
- iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
- }
-
- /*
* Initialize i_transp so we can find it with xfs_inode_incore()
* in xfs_trans_iget() above.
*/
ip->i_transp = tp;
}
-
-
/*
- * Mark the inode as not needing to be unlocked when the inode item's
- * IOP_UNLOCK() routine is called. The inode must already be locked
- * and associated with the given transaction.
+ * Add a locked inode to the transaction.
+ *
+ *
+ * Grabs a reference to the inode which will be dropped when the transaction
+ * is commited. The inode will also be unlocked at that point. The inode
+ * must be locked, and it cannot be associated with any transaction.
*/
-/*ARGSUSED*/
void
-xfs_trans_ihold(
- xfs_trans_t *tp,
- xfs_inode_t *ip)
+xfs_trans_ijoin_ref(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ uint lock_flags)
{
- ASSERT(ip->i_transp == tp);
- ASSERT(ip->i_itemp != NULL);
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
- ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
+ xfs_trans_ijoin(tp, ip);
+ IHOLD(ip);
+ ip->i_itemp->ili_lock_flags = lock_flags;
}
-
/*
* This is called to mark the fields indicated in fieldmask as needing
* to be logged when the transaction is committed. The inode must
@@ -149,17 +132,12 @@ xfs_trans_log_inode(
xfs_inode_t *ip,
uint flags)
{
- xfs_log_item_desc_t *lidp;
-
ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
- ASSERT(lidp != NULL);
-
tp->t_flags |= XFS_TRANS_DIRTY;
- lidp->lid_flags |= XFS_LID_DIRTY;
+ ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY;
/*
* Always OR in the bits from the ili_last_fields field.
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
deleted file mode 100644
index f11d37d06dc..00000000000
--- a/fs/xfs/xfs_trans_item.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
-/* XXX: from here down needed until struct xfs_trans has its own ailp */
-#include "xfs_bit.h"
-#include "xfs_buf_item.h"
-#include "xfs_sb.h"
-#include "xfs_ag.h"
-#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
-#include "xfs_mount.h"
-
-STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *,
- int, int, xfs_lsn_t);
-
-/*
- * This is called to add the given log item to the transaction's
- * list of log items. It must find a free log item descriptor
- * or allocate a new one and add the item to that descriptor.
- * The function returns a pointer to item descriptor used to point
- * to the new item. The log item will now point to its new descriptor
- * with its li_desc field.
- */
-xfs_log_item_desc_t *
-xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_item_chunk_t *licp;
- int i=0;
-
- /*
- * If there are no free descriptors, allocate a new chunk
- * of them and put it at the front of the chunk list.
- */
- if (tp->t_items_free == 0) {
- licp = (xfs_log_item_chunk_t*)
- kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP);
- ASSERT(licp != NULL);
- /*
- * Initialize the chunk, and then
- * claim the first slot in the newly allocated chunk.
- */
- xfs_lic_init(licp);
- xfs_lic_claim(licp, 0);
- licp->lic_unused = 1;
- xfs_lic_init_slot(licp, 0);
- lidp = xfs_lic_slot(licp, 0);
-
- /*
- * Link in the new chunk and update the free count.
- */
- licp->lic_next = tp->t_items.lic_next;
- tp->t_items.lic_next = licp;
- tp->t_items_free = XFS_LIC_NUM_SLOTS - 1;
-
- /*
- * Initialize the descriptor and the generic portion
- * of the log item.
- *
- * Point the new slot at this item and return it.
- * Also point the log item at its currently active
- * descriptor and set the item's mount pointer.
- */
- lidp->lid_item = lip;
- lidp->lid_flags = 0;
- lidp->lid_size = 0;
- lip->li_desc = lidp;
- lip->li_mountp = tp->t_mountp;
- lip->li_ailp = tp->t_mountp->m_ail;
- return lidp;
- }
-
- /*
- * Find the free descriptor. It is somewhere in the chunklist
- * of descriptors.
- */
- licp = &tp->t_items;
- while (licp != NULL) {
- if (xfs_lic_vacancy(licp)) {
- if (licp->lic_unused <= XFS_LIC_MAX_SLOT) {
- i = licp->lic_unused;
- ASSERT(xfs_lic_isfree(licp, i));
- break;
- }
- for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) {
- if (xfs_lic_isfree(licp, i))
- break;
- }
- ASSERT(i <= XFS_LIC_MAX_SLOT);
- break;
- }
- licp = licp->lic_next;
- }
- ASSERT(licp != NULL);
- /*
- * If we find a free descriptor, claim it,
- * initialize it, and return it.
- */
- xfs_lic_claim(licp, i);
- if (licp->lic_unused <= i) {
- licp->lic_unused = i + 1;
- xfs_lic_init_slot(licp, i);
- }
- lidp = xfs_lic_slot(licp, i);
- tp->t_items_free--;
- lidp->lid_item = lip;
- lidp->lid_flags = 0;
- lidp->lid_size = 0;
- lip->li_desc = lidp;
- lip->li_mountp = tp->t_mountp;
- lip->li_ailp = tp->t_mountp->m_ail;
- return lidp;
-}
-
-/*
- * Free the given descriptor.
- *
- * This requires setting the bit in the chunk's free mask corresponding
- * to the given slot.
- */
-void
-xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
-{
- uint slot;
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t **licpp;
-
- slot = xfs_lic_desc_to_slot(lidp);
- licp = xfs_lic_desc_to_chunk(lidp);
- xfs_lic_relse(licp, slot);
- lidp->lid_item->li_desc = NULL;
- tp->t_items_free++;
-
- /*
- * If there are no more used items in the chunk and this is not
- * the chunk embedded in the transaction structure, then free
- * the chunk. First pull it from the chunk list and then
- * free it back to the heap. We didn't bother with a doubly
- * linked list here because the lists should be very short
- * and this is not a performance path. It's better to save
- * the memory of the extra pointer.
- *
- * Also decrement the transaction structure's count of free items
- * by the number in a chunk since we are freeing an empty chunk.
- */
- if (xfs_lic_are_all_free(licp) && (licp != &(tp->t_items))) {
- licpp = &(tp->t_items.lic_next);
- while (*licpp != licp) {
- ASSERT(*licpp != NULL);
- licpp = &((*licpp)->lic_next);
- }
- *licpp = licp->lic_next;
- kmem_free(licp);
- tp->t_items_free -= XFS_LIC_NUM_SLOTS;
- }
-}
-
-/*
- * This is called to find the descriptor corresponding to the given
- * log item. It returns a pointer to the descriptor.
- * The log item MUST have a corresponding descriptor in the given
- * transaction. This routine does not return NULL, it panics.
- *
- * The descriptor pointer is kept in the log item's li_desc field.
- * Just return it.
- */
-/*ARGSUSED*/
-xfs_log_item_desc_t *
-xfs_trans_find_item(xfs_trans_t *tp, xfs_log_item_t *lip)
-{
- ASSERT(lip->li_desc != NULL);
-
- return lip->li_desc;
-}
-
-
-/*
- * Return a pointer to the first descriptor in the chunk list.
- * This does not return NULL if there are none, it panics.
- *
- * The first descriptor must be in either the first or second chunk.
- * This is because the only chunk allowed to be empty is the first.
- * All others are freed when they become empty.
- *
- * At some point this and xfs_trans_next_item() should be optimized
- * to quickly look at the mask to determine if there is anything to
- * look at.
- */
-xfs_log_item_desc_t *
-xfs_trans_first_item(xfs_trans_t *tp)
-{
- xfs_log_item_chunk_t *licp;
- int i;
-
- licp = &tp->t_items;
- /*
- * If it's not in the first chunk, skip to the second.
- */
- if (xfs_lic_are_all_free(licp)) {
- licp = licp->lic_next;
- }
-
- /*
- * Return the first non-free descriptor in the chunk.
- */
- ASSERT(!xfs_lic_are_all_free(licp));
- for (i = 0; i < licp->lic_unused; i++) {
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- return xfs_lic_slot(licp, i);
- }
- cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item");
- return NULL;
-}
-
-
-/*
- * Given a descriptor, return the next descriptor in the chunk list.
- * This returns NULL if there are no more used descriptors in the list.
- *
- * We do this by first locating the chunk in which the descriptor resides,
- * and then scanning forward in the chunk and the list for the next
- * used descriptor.
- */
-/*ARGSUSED*/
-xfs_log_item_desc_t *
-xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
-{
- xfs_log_item_chunk_t *licp;
- int i;
-
- licp = xfs_lic_desc_to_chunk(lidp);
-
- /*
- * First search the rest of the chunk. The for loop keeps us
- * from referencing things beyond the end of the chunk.
- */
- for (i = (int)xfs_lic_desc_to_slot(lidp) + 1; i < licp->lic_unused; i++) {
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- return xfs_lic_slot(licp, i);
- }
-
- /*
- * Now search the next chunk. It must be there, because the
- * next chunk would have been freed if it were empty.
- * If there is no next chunk, return NULL.
- */
- if (licp->lic_next == NULL) {
- return NULL;
- }
-
- licp = licp->lic_next;
- ASSERT(!xfs_lic_are_all_free(licp));
- for (i = 0; i < licp->lic_unused; i++) {
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
-
- return xfs_lic_slot(licp, i);
- }
- ASSERT(0);
- /* NOTREACHED */
- return NULL; /* keep gcc quite */
-}
-
-/*
- * This is called to unlock all of the items of a transaction and to free
- * all the descriptors of that transaction.
- *
- * It walks the list of descriptors and unlocks each item. It frees
- * each chunk except that embedded in the transaction as it goes along.
- */
-void
-xfs_trans_free_items(
- xfs_trans_t *tp,
- xfs_lsn_t commit_lsn,
- int flags)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t *next_licp;
- int abort;
-
- abort = flags & XFS_TRANS_ABORT;
- licp = &tp->t_items;
- /*
- * Special case the embedded chunk so we don't free it below.
- */
- if (!xfs_lic_are_all_free(licp)) {
- (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
- xfs_lic_all_free(licp);
- licp->lic_unused = 0;
- }
- licp = licp->lic_next;
-
- /*
- * Unlock each item in each chunk and free the chunks.
- */
- while (licp != NULL) {
- ASSERT(!xfs_lic_are_all_free(licp));
- (void) xfs_trans_unlock_chunk(licp, 1, abort, commit_lsn);
- next_licp = licp->lic_next;
- kmem_free(licp);
- licp = next_licp;
- }
-
- /*
- * Reset the transaction structure's free item count.
- */
- tp->t_items_free = XFS_LIC_NUM_SLOTS;
- tp->t_items.lic_next = NULL;
-}
-
-
-
-/*
- * This is called to unlock the items associated with a transaction.
- * Items which were not logged should be freed.
- * Those which were logged must still be tracked so they can be unpinned
- * when the transaction commits.
- */
-void
-xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
-{
- xfs_log_item_chunk_t *licp;
- xfs_log_item_chunk_t *next_licp;
- xfs_log_item_chunk_t **licpp;
- int freed;
-
- freed = 0;
- licp = &tp->t_items;
-
- /*
- * Special case the embedded chunk so we don't free.
- */
- if (!xfs_lic_are_all_free(licp)) {
- freed = xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
- }
- licpp = &(tp->t_items.lic_next);
- licp = licp->lic_next;
-
- /*
- * Unlock each item in each chunk, free non-dirty descriptors,
- * and free empty chunks.
- */
- while (licp != NULL) {
- ASSERT(!xfs_lic_are_all_free(licp));
- freed += xfs_trans_unlock_chunk(licp, 0, 0, commit_lsn);
- next_licp = licp->lic_next;
- if (xfs_lic_are_all_free(licp)) {
- *licpp = next_licp;
- kmem_free(licp);
- freed -= XFS_LIC_NUM_SLOTS;
- } else {
- licpp = &(licp->lic_next);
- }
- ASSERT(*licpp == next_licp);
- licp = next_licp;
- }
-
- /*
- * Fix the free descriptor count in the transaction.
- */
- tp->t_items_free += freed;
-}
-
-/*
- * Unlock each item pointed to by a descriptor in the given chunk.
- * Stamp the commit lsn into each item if necessary.
- * Free descriptors pointing to items which are not dirty if freeing_chunk
- * is zero. If freeing_chunk is non-zero, then we need to unlock all
- * items in the chunk.
- *
- * Return the number of descriptors freed.
- */
-STATIC int
-xfs_trans_unlock_chunk(
- xfs_log_item_chunk_t *licp,
- int freeing_chunk,
- int abort,
- xfs_lsn_t commit_lsn)
-{
- xfs_log_item_desc_t *lidp;
- xfs_log_item_t *lip;
- int i;
- int freed;
-
- freed = 0;
- lidp = licp->lic_descs;
- for (i = 0; i < licp->lic_unused; i++, lidp++) {
- if (xfs_lic_isfree(licp, i)) {
- continue;
- }
- lip = lidp->lid_item;
- lip->li_desc = NULL;
-
- if (commit_lsn != NULLCOMMITLSN)
- IOP_COMMITTING(lip, commit_lsn);
- if (abort)
- lip->li_flags |= XFS_LI_ABORTED;
- IOP_UNLOCK(lip);
-
- /*
- * Free the descriptor if the item is not dirty
- * within this transaction and the caller is not
- * going to just free the entire thing regardless.
- */
- if (!(freeing_chunk) &&
- (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) {
- xfs_lic_relse(licp, i);
- freed++;
- }
- }
-
- return freed;
-}
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index c6e4f2c8de6..e2d93d8ead7 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -23,22 +23,8 @@ struct xfs_log_item_desc;
struct xfs_mount;
struct xfs_trans;
-/*
- * From xfs_trans_item.c
- */
-struct xfs_log_item_desc *xfs_trans_add_item(struct xfs_trans *,
- struct xfs_log_item *);
-void xfs_trans_free_item(struct xfs_trans *,
- struct xfs_log_item_desc *);
-struct xfs_log_item_desc *xfs_trans_find_item(struct xfs_trans *,
- struct xfs_log_item *);
-struct xfs_log_item_desc *xfs_trans_first_item(struct xfs_trans *);
-struct xfs_log_item_desc *xfs_trans_next_item(struct xfs_trans *,
- struct xfs_log_item_desc *);
-
-void xfs_trans_unlock_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn);
-void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
- int flags);
+void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
+void xfs_trans_del_item(struct xfs_log_item *);
void xfs_trans_item_committed(struct xfs_log_item *lip,
xfs_lsn_t commit_lsn, int aborted);
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 4d88616bde9..b7d5769d2df 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -25,18 +25,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
#include "xfs_quota.h"
-#include "xfs_rw.h"
#include "xfs_itable.h"
#include "xfs_utils.h"
@@ -324,86 +320,3 @@ xfs_bumplink(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
return 0;
}
-
-/*
- * Try to truncate the given file to 0 length. Currently called
- * only out of xfs_remove when it has to truncate a file to free
- * up space for the remove to proceed.
- */
-int
-xfs_truncate_file(
- xfs_mount_t *mp,
- xfs_inode_t *ip)
-{
- xfs_trans_t *tp;
- int error;
-
-#ifdef QUOTADEBUG
- /*
- * This is called to truncate the quotainodes too.
- */
- if (XFS_IS_UQUOTA_ON(mp)) {
- if (ip->i_ino != mp->m_sb.sb_uquotino)
- ASSERT(ip->i_udquot);
- }
- if (XFS_IS_OQUOTA_ON(mp)) {
- if (ip->i_ino != mp->m_sb.sb_gquotino)
- ASSERT(ip->i_gdquot);
- }
-#endif
- /*
- * Make the call to xfs_itruncate_start before starting the
- * transaction, because we cannot make the call while we're
- * in a transaction.
- */
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, (xfs_fsize_t)0);
- if (error) {
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return error;
- }
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
- if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_ITRUNCATE_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return error;
- }
-
- /*
- * Follow the normal truncate locking protocol. Since we
- * hold the inode in the transaction, we know that its number
- * of references will stay constant.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ihold(tp, ip);
- /*
- * Signal a sync xaction. The only case where that isn't
- * the case is if we're truncating an already unlinked file
- * on a wsync fs. In that case, we know the blocks can't
- * reappear in the file because the links to file are
- * permanently toast. Currently, we're always going to
- * want a sync transaction because this code is being
- * called from places where nlink is guaranteed to be 1
- * but I'm leaving the tests in to protect against future
- * changes -- rcc.
- */
- error = xfs_itruncate_finish(&tp, ip, (xfs_fsize_t)0,
- XFS_DATA_FORK,
- ((ip->i_d.di_nlink != 0 ||
- !(mp->m_flags & XFS_MOUNT_WSYNC))
- ? 1 : 0));
- if (error) {
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
- XFS_TRANS_ABORT);
- } else {
- xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
- return error;
-}
diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h
index ef321225d26..f55b9678264 100644
--- a/fs/xfs/xfs_utils.h
+++ b/fs/xfs/xfs_utils.h
@@ -18,7 +18,6 @@
#ifndef __XFS_UTILS_H__
#define __XFS_UTILS_H__
-extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
xfs_dev_t, cred_t *, prid_t, int,
xfs_inode_t **, int *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 9d376be0ea3..66d585c6917 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -26,19 +26,14 @@
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
-#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
-#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_itable.h"
-#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
@@ -73,7 +68,7 @@ xfs_setattr(
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int need_iolock = 1;
- xfs_itrace_entry(ip);
+ trace_xfs_setattr(ip);
if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
@@ -143,16 +138,6 @@ xfs_setattr(
goto error_return;
}
} else {
- if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
- !(flags & XFS_ATTR_DMI)) {
- int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
- code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
- iattr->ia_size, 0, dmflags, NULL);
- if (code) {
- lock_flags = 0;
- goto error_return;
- }
- }
if (need_iolock)
lock_flags |= XFS_IOLOCK_EXCL;
}
@@ -236,8 +221,11 @@ xfs_setattr(
* transaction to modify the i_size.
*/
code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
+ if (code)
+ goto error_return;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ lock_flags &= ~XFS_ILOCK_EXCL;
/*
* We are going to log the inode size change in this
@@ -251,40 +239,38 @@ xfs_setattr(
* really care about here and prevents waiting for other data
* not within the range we care about here.
*/
- if (!code &&
- ip->i_size != ip->i_d.di_size &&
+ if (ip->i_size != ip->i_d.di_size &&
iattr->ia_size > ip->i_d.di_size) {
code = xfs_flush_pages(ip,
ip->i_d.di_size, iattr->ia_size,
XBF_ASYNC, FI_NONE);
+ if (code)
+ goto error_return;
}
/* wait for all I/O to complete */
xfs_ioend_wait(ip);
- if (!code)
- code = xfs_itruncate_data(ip, iattr->ia_size);
- if (code) {
- ASSERT(tp == NULL);
- lock_flags &= ~XFS_ILOCK_EXCL;
- ASSERT(lock_flags == XFS_IOLOCK_EXCL);
+ code = -block_truncate_page(inode->i_mapping, iattr->ia_size,
+ xfs_get_blocks);
+ if (code)
goto error_return;
- }
+
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
- if ((code = xfs_trans_reserve(tp, 0,
- XFS_ITRUNCATE_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_ITRUNCATE_LOG_COUNT))) {
- xfs_trans_cancel(tp, 0);
- if (need_iolock)
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return code;
- }
+ code = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_ITRUNCATE_LOG_COUNT);
+ if (code)
+ goto error_return;
+
+ truncate_setsize(inode, iattr->ia_size);
+
commit_flags = XFS_TRANS_RELEASE_LOG_RES;
+ lock_flags |= XFS_ILOCK_EXCL;
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, lock_flags);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* Only change the c/mtime if we are changing the size
@@ -334,8 +320,7 @@ xfs_setattr(
xfs_iflags_set(ip, XFS_ITRUNCATED);
}
} else if (tp) {
- xfs_trans_ijoin(tp, ip, lock_flags);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
}
/*
@@ -470,17 +455,10 @@ xfs_setattr(
return XFS_ERROR(code);
}
- if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
- !(flags & XFS_ATTR_DMI)) {
- (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
- NULL, DM_RIGHT_NULL, NULL, NULL,
- 0, 0, AT_DELAY_FLAG(flags));
- }
return 0;
abort_return:
commit_flags |= XFS_TRANS_ABORT;
- /* FALLTHROUGH */
error_return:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
@@ -516,7 +494,7 @@ xfs_readlink_bmap(
int error = 0;
error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0,
- mval, &nmaps, NULL, NULL);
+ mval, &nmaps, NULL);
if (error)
goto out;
@@ -557,7 +535,7 @@ xfs_readlink(
int pathlen;
int error = 0;
- xfs_itrace_entry(ip);
+ trace_xfs_readlink(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -613,14 +591,14 @@ xfs_free_eofblocks(
*/
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
- map_len = last_fsb - end_fsb;
- if (map_len <= 0)
+ if (last_fsb <= end_fsb)
return 0;
+ map_len = last_fsb - end_fsb;
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
- NULL, 0, &imap, &nimaps, NULL, NULL);
+ NULL, 0, &imap, &nimaps, NULL);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
@@ -675,10 +653,7 @@ xfs_free_eofblocks(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip,
- XFS_IOLOCK_EXCL |
- XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
error = xfs_itruncate_finish(&tp, ip,
ip->i_size,
@@ -750,8 +725,7 @@ xfs_inactive_symlink_rmt(
xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
size = (int)ip->i_d.di_size;
ip->i_d.di_size = 0;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Find the block(s) so we can inval and unmap them.
@@ -761,7 +735,7 @@ xfs_inactive_symlink_rmt(
nmaps = ARRAY_SIZE(mval);
if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
- &free_list, NULL)))
+ &free_list)))
goto error0;
/*
* Invalidate the block(s).
@@ -776,7 +750,7 @@ xfs_inactive_symlink_rmt(
* Unmap the dead block(s) to the free_list.
*/
if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
- &first_block, &free_list, NULL, &done)))
+ &first_block, &free_list, &done)))
goto error1;
ASSERT(done);
/*
@@ -795,8 +769,7 @@ xfs_inactive_symlink_rmt(
* Mark it dirty so it will be logged and moved forward in the log as
* part of every commit.
*/
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Get a new, empty transaction to return to our caller.
@@ -929,8 +902,7 @@ xfs_inactive_attrs(
goto error_cancel;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
ASSERT(ip->i_d.di_anextents == 0);
@@ -1035,8 +1007,6 @@ xfs_inactive(
int error;
int truncate;
- xfs_itrace_entry(ip);
-
/*
* If the inode is already free, then there can be nothing
* to clean up here.
@@ -1060,9 +1030,6 @@ xfs_inactive(
mp = ip->i_mount;
- if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY))
- XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL);
-
error = 0;
/* If this is a read-only mount, don't do this (would generate I/O) */
@@ -1120,8 +1087,7 @@ xfs_inactive(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* normally, we have to run xfs_itruncate_finish sync.
@@ -1154,8 +1120,7 @@ xfs_inactive(
return VN_INACTIVE_CACHE;
}
- xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
} else {
error = xfs_trans_reserve(tp, 0,
XFS_IFREE_LOG_RES(mp),
@@ -1168,8 +1133,7 @@ xfs_inactive(
}
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
}
/*
@@ -1257,7 +1221,7 @@ xfs_lookup(
int error;
uint lock_mode;
- xfs_itrace_entry(dp);
+ trace_xfs_lookup(dp, name);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
@@ -1269,7 +1233,7 @@ xfs_lookup(
if (error)
goto out;
- error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
+ error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
if (error)
goto out_free_name;
@@ -1309,21 +1273,11 @@ xfs_create(
uint log_res;
uint log_count;
- xfs_itrace_entry(dp);
+ trace_xfs_create(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
- dp, DM_RIGHT_NULL, NULL,
- DM_RIGHT_NULL, name->name, NULL,
- mode, 0, 0);
-
- if (error)
- return error;
- }
-
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
else
@@ -1427,8 +1381,7 @@ xfs_create(
* the transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
- IHOLD(dp);
- xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@@ -1487,16 +1440,7 @@ xfs_create(
xfs_qm_dqrele(gdqp);
*ipp = ip;
-
- /* Fallthrough to std_return with error = 0 */
- std_return:
- if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
- XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE, dp, DM_RIGHT_NULL,
- ip, DM_RIGHT_NULL, name->name, NULL, mode,
- error, 0);
- }
-
- return error;
+ return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
@@ -1510,8 +1454,8 @@ xfs_create(
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- goto std_return;
+ std_return:
+ return error;
out_abort_rele:
/*
@@ -1726,20 +1670,11 @@ xfs_remove(
uint resblks;
uint log_count;
- xfs_itrace_entry(dp);
- xfs_itrace_entry(ip);
+ trace_xfs_remove(dp, name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL,
- NULL, DM_RIGHT_NULL, name->name, NULL,
- ip->i_d.di_mode, 0, 0);
- if (error)
- return error;
- }
-
error = xfs_qm_dqattach(dp, 0);
if (error)
goto std_return;
@@ -1782,15 +1717,8 @@ xfs_remove(
xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
- /*
- * At this point, we've gotten both the directory and the entry
- * inodes locked.
- */
- IHOLD(ip);
- xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
-
- IHOLD(dp);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
/*
* If we're removing a directory perform some additional validation.
@@ -1877,21 +1805,15 @@ xfs_remove(
if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
xfs_filestream_deassociate(ip);
- std_return:
- if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
- XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE, dp, DM_RIGHT_NULL,
- NULL, DM_RIGHT_NULL, name->name, NULL,
- ip->i_d.di_mode, error, 0);
- }
-
- return error;
+ return 0;
out_bmap_cancel:
xfs_bmap_cancel(&free_list);
cancel_flags |= XFS_TRANS_ABORT;
out_trans_cancel:
xfs_trans_cancel(tp, cancel_flags);
- goto std_return;
+ std_return:
+ return error;
}
int
@@ -1909,25 +1831,13 @@ xfs_link(
int committed;
int resblks;
- xfs_itrace_entry(tdp);
- xfs_itrace_entry(sip);
+ trace_xfs_link(tdp, target_name);
ASSERT(!S_ISDIR(sip->i_d.di_mode));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
- tdp, DM_RIGHT_NULL,
- sip, DM_RIGHT_NULL,
- target_name->name, NULL, 0, 0, 0);
- if (error)
- return error;
- }
-
- /* Return through std_return after this point. */
-
error = xfs_qm_dqattach(sip, 0);
if (error)
goto std_return;
@@ -1953,15 +1863,8 @@ xfs_link(
xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
- /*
- * Increment vnode ref counts since xfs_trans_commit &
- * xfs_trans_cancel will both unlock the inodes and
- * decrement the associated ref counts.
- */
- IHOLD(sip);
- IHOLD(tdp);
- xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, sip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, tdp, XFS_ILOCK_EXCL);
/*
* If the source has too many links, we can't make any more to it.
@@ -2014,27 +1917,14 @@ xfs_link(
goto abort_return;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- if (error)
- goto std_return;
-
- /* Fall through to std_return with error = 0. */
-std_return:
- if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) {
- (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
- tdp, DM_RIGHT_NULL,
- sip, DM_RIGHT_NULL,
- target_name->name, NULL, 0, error, 0);
- }
- return error;
+ return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
- /* FALLTHROUGH */
-
error_return:
xfs_trans_cancel(tp, cancel_flags);
- goto std_return;
+ std_return:
+ return error;
}
int
@@ -2074,7 +1964,7 @@ xfs_symlink(
ip = NULL;
tp = NULL;
- xfs_itrace_entry(dp);
+ trace_xfs_symlink(dp, link_name);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -2086,17 +1976,6 @@ xfs_symlink(
if (pathlen >= MAXPATHLEN) /* total string too long */
return XFS_ERROR(ENAMETOOLONG);
- if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
- DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
- link_name->name,
- (unsigned char *)target_path, 0, 0, 0);
- if (error)
- return error;
- }
-
- /* Return through std_return after this point. */
-
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
@@ -2180,8 +2059,7 @@ xfs_symlink(
* transaction cancel unlocking dp so don't do it explicitly in the
* error path.
*/
- IHOLD(dp);
- xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE;
/*
@@ -2215,7 +2093,7 @@ xfs_symlink(
error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
- &free_list, NULL);
+ &free_list);
if (error) {
goto error1;
}
@@ -2278,21 +2156,8 @@ xfs_symlink(
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
- /* Fall through to std_return with error = 0 or errno from
- * xfs_trans_commit */
-std_return:
- if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) {
- (void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
- dp, DM_RIGHT_NULL,
- error ? NULL : ip,
- DM_RIGHT_NULL, link_name->name,
- (unsigned char *)target_path,
- 0, error, 0);
- }
-
- if (!error)
- *ipp = ip;
- return error;
+ *ipp = ip;
+ return 0;
error2:
IRELE(ip);
@@ -2306,8 +2171,8 @@ std_return:
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- goto std_return;
+ std_return:
+ return error;
}
int
@@ -2333,13 +2198,12 @@ xfs_set_dmattrs(
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL);
ip->i_d.di_dmevmask = evmask;
ip->i_d.di_dmstate = state;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- IHOLD(ip);
error = xfs_trans_commit(tp, 0);
return error;
@@ -2390,7 +2254,7 @@ xfs_alloc_file_space(
int committed;
int error;
- xfs_itrace_entry(ip);
+ trace_xfs_alloc_file_space(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
@@ -2412,25 +2276,9 @@ xfs_alloc_file_space(
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
- /* Generate a DMAPI event if needed. */
- if (alloc_type != 0 && offset < ip->i_size &&
- (attr_flags & XFS_ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
- xfs_off_t end_dmi_offset;
-
- end_dmi_offset = offset+len;
- if (end_dmi_offset > ip->i_size)
- end_dmi_offset = ip->i_size;
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset,
- end_dmi_offset - offset, 0, NULL);
- if (error)
- return error;
- }
-
/*
* Allocate file space until done or until there is an error
*/
-retry:
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
@@ -2488,8 +2336,7 @@ retry:
if (error)
goto error1;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* Issue the xfs_bmapi() call to allocate the blocks
@@ -2498,7 +2345,7 @@ retry:
error = xfs_bmapi(tp, ip, startoffset_fsb,
allocatesize_fsb, bmapi_flag,
&firstfsb, 0, imapp, &nimaps,
- &free_list, NULL);
+ &free_list);
if (error) {
goto error0;
}
@@ -2527,17 +2374,6 @@ retry:
startoffset_fsb += allocated_fsb;
allocatesize_fsb -= allocated_fsb;
}
-dmapi_enospc_check:
- if (error == ENOSPC && (attr_flags & XFS_ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
- ip, DM_RIGHT_NULL,
- ip, DM_RIGHT_NULL,
- NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
- if (error == 0)
- goto retry; /* Maybe DMAPI app. has made space */
- /* else fall through with error from XFS_SEND_DATA */
- }
return error;
@@ -2548,7 +2384,7 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- goto dmapi_enospc_check;
+ return error;
}
/*
@@ -2598,7 +2434,7 @@ xfs_zero_remaining_bytes(
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0,
- NULL, 0, &imap, &nimap, NULL, NULL);
+ NULL, 0, &imap, &nimap, NULL);
if (error || nimap < 1)
break;
ASSERT(imap.br_blockcount >= 1);
@@ -2661,7 +2497,6 @@ xfs_free_file_space(
{
int committed;
int done;
- xfs_off_t end_dmi_offset;
xfs_fileoff_t endoffset_fsb;
int error;
xfs_fsblock_t firstfsb;
@@ -2680,7 +2515,7 @@ xfs_free_file_space(
mp = ip->i_mount;
- xfs_itrace_entry(ip);
+ trace_xfs_free_file_space(ip);
error = xfs_qm_dqattach(ip, 0);
if (error)
@@ -2691,19 +2526,7 @@ xfs_free_file_space(
return error;
rt = XFS_IS_REALTIME_INODE(ip);
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
- end_dmi_offset = offset + len;
- endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
-
- if (offset < ip->i_size && (attr_flags & XFS_ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
- if (end_dmi_offset > ip->i_size)
- end_dmi_offset = ip->i_size;
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip,
- offset, end_dmi_offset - offset,
- AT_DELAY_FLAG(attr_flags), NULL);
- if (error)
- return error;
- }
+ endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
if (attr_flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
@@ -2731,7 +2554,7 @@ xfs_free_file_space(
if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
nimap = 1;
error = xfs_bmapi(NULL, ip, startoffset_fsb,
- 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
+ 1, 0, NULL, 0, &imap, &nimap, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@@ -2746,7 +2569,7 @@ xfs_free_file_space(
}
nimap = 1;
error = xfs_bmapi(NULL, ip, endoffset_fsb - 1,
- 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
+ 1, 0, NULL, 0, &imap, &nimap, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
@@ -2814,8 +2637,7 @@ xfs_free_file_space(
if (error)
goto error1;
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
/*
* issue the bunmapi() call to free the blocks
@@ -2823,7 +2645,7 @@ xfs_free_file_space(
xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
- 0, 2, &firstfsb, &free_list, NULL, &done);
+ 0, 2, &firstfsb, &free_list, &done);
if (error) {
goto error0;
}
@@ -2883,8 +2705,6 @@ xfs_change_file_space(
xfs_trans_t *tp;
struct iattr iattr;
- xfs_itrace_entry(ip);
-
if (!S_ISREG(ip->i_d.di_mode))
return XFS_ERROR(EINVAL);
@@ -2985,8 +2805,7 @@ xfs_change_file_space(
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
+ xfs_trans_ijoin(tp, ip);
if ((attr_flags & XFS_ATTR_DMI) == 0) {
ip->i_d.di_mode &= ~S_ISUID;