File: [Development] / xfs-linux / linux-2.4 / Attic / xfs_super.c (download)
Revision 1.336, Tue May 22 15:50:48 2007 UTC (10 years, 5 months ago) by dgc.longdrop.melbourne.sgi.com
Branch: MAIN
Changes since 1.335: +3 -2
lines
Lazy Superblock Counters
When we have a couple of hundred transactions on the fly at once,
they all typically modify the on disk superblock in some way.
create/unclink/mkdir/rmdir modify inode counts, allocation/freeing
modify free block counts.
When these counts are modified in a transaction, the must eventually
lock the superblock buffer and apply the mods. The buffer then
remains locked until the transaction is committed into the incore
log buffer. The result of this is that with enough transactions on
the fly the incore superblock buffer becomes a bottleneck.
The result of contention on the incore superblock buffer is that
transaction rates fall - the more pressure that is put on the
superblock buffer, the slower things go.
The key to removing the contention is to not require the superblock
fields in question to be locked. We do that by not marking the
superblock dirty in the transaction. IOWs, we modify the incore
superblock but do not modify the cached superblock buffer. In short,
we do not log superblock modifications to critical fields in the
superblock on every transaction. In fact we only do it just before
we write the superblock to disk every sync period or just before
unmount.
This creates an interesting problem - if we don't log or write out
the fields in every transaction, then how do the values get
recovered after a crash? the answer is simple - we keep enough
duplicate, logged information in other structures that we can
reconstruct the correct count after log recovery has been
performed.
It is the AGF and AGI structures that contain the duplicate
information; after recovery, we walk every AGI and AGF and sum their
individual counters to get the correct value, and we do a
transaction into the log to correct them. An optimisation of this is
that if we have a clean unmount record, we know the value in the
superblock is correct, so we can avoid the summation walk under
normal conditions and so mount/recovery times do not change under
normal operation.
One wrinkle that was discovered during development was that the
blocks used in the freespace btrees are never accounted for in the
AGF counters. This was once a valid optimisation to make; when the
filesystem is full, the free space btrees are empty and consume no
space. Hence when it matters, the "accounting" is correct. But that
means the when we do the AGF summations, we would not have a correct
count and xfs_check would complain. Hence a new counter was added
to track the number of blocks used by the free space btrees. This is
an *on-disk format change*.
As a result of this, lazy superblock counters are a mkfs option
and at the moment on linux there is no way to convert an old
filesystem. This is possible - xfs_db can be used to twiddle the
right bits and then xfs_repair will do the format conversion
for you. Similarly, you can convert backwards as well. At some point
we'll add functionality to xfs_admin to do the bit twiddling
easily....
Merge of xfs-linux-melb:xfs-kern:28652a by kenmcd.
Changes to support lazy superblock counters.
|
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_clnt.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_export.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_rw.h"
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_version.h"
#include "xfs_ioctl32.h"
#include <linux/init.h>
static struct quotactl_ops xfs_quotactl_operations;
static struct super_operations xfs_super_operations;
static kmem_zone_t *xfs_vnode_zone;
STATIC struct xfs_mount_args *
xfs_args_allocate(
struct super_block *sb,
int silent)
{
struct xfs_mount_args *args;
args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
args->logbufs = args->logbufsize = -1;
strncpy(args->fsname, bdevname(sb->s_dev), MAXNAMELEN);
/* Copy the already-parsed mount(2) flags we're interested in */
if (sb->s_flags & MS_SYNCHRONOUS)
args->flags |= XFSMNT_WSYNC;
if (silent)
args->flags |= XFSMNT_QUIET;
args->flags |= XFSMNT_32BITINODES;
return args;
}
__uint64_t
xfs_max_file_offset(
unsigned int blockshift)
{
unsigned int pagefactor = 1;
unsigned int bitshift = BITS_PER_LONG - 1;
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
* __block_prepare_write does this in an [unsigned] long...
* page->index << (PAGE_CACHE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
* (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
* but for smaller blocksizes it is less (bbits = log2 bsize).
* Note1: get_block_t takes a long (implicit cast from above)
* Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
* can optionally convert the [unsigned] long from above into
* an [unsigned] long long.
*/
#if BITS_PER_LONG == 32
# if defined(CONFIG_LBD)
ASSERT(sizeof(sector_t) == 8);
pagefactor = PAGE_CACHE_SIZE;
bitshift = BITS_PER_LONG;
# else
pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
# endif
#endif
return (((__uint64_t)pagefactor) << bitshift) - 1;
}
STATIC_INLINE void
xfs_set_inodeops(
struct inode *inode)
{
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &xfs_inode_operations;
inode->i_fop = &xfs_file_operations;
inode->i_mapping->a_ops = &xfs_address_space_operations;
break;
case S_IFDIR:
inode->i_op = &xfs_dir_inode_operations;
inode->i_fop = &xfs_dir_file_operations;
break;
case S_IFLNK:
inode->i_op = &xfs_symlink_inode_operations;
if (inode->i_blocks)
inode->i_mapping->a_ops = &xfs_address_space_operations;
break;
default:
inode->i_op = &xfs_inode_operations;
init_special_inode(inode, inode->i_mode,
kdev_t_to_nr(inode->i_rdev));
break;
}
}
STATIC_INLINE void
xfs_revalidate_inode(
xfs_mount_t *mp,
bhv_vnode_t *vp,
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(vp);
inode->i_mode = ip->i_d.di_mode;
inode->i_nlink = ip->i_d.di_nlink;
inode->i_uid = ip->i_d.di_uid;
inode->i_gid = ip->i_d.di_gid;
switch (inode->i_mode & S_IFMT) {
case S_IFBLK:
case S_IFCHR:
inode->i_rdev = XFS_DEV_TO_KDEVT(ip->i_df.if_u2.if_rdev);
break;
default:
inode->i_rdev = NODEV;
break;
}
inode->i_blksize = xfs_preferred_iosize(mp);
inode->i_generation = ip->i_d.di_gen;
i_size_write(inode, ip->i_d.di_size);
inode->i_blocks =
XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
inode->i_atime = ip->i_d.di_atime.t_sec;
inode->i_mtime = ip->i_d.di_mtime.t_sec;
inode->i_ctime = ip->i_d.di_ctime.t_sec;
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
inode->i_flags |= S_APPEND;
else
inode->i_flags &= ~S_APPEND;
if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
inode->i_flags |= S_SYNC;
else
inode->i_flags &= ~S_SYNC;
if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
inode->i_flags |= S_NOATIME;
else
inode->i_flags &= ~S_NOATIME;
vp->v_flag &= ~VMODIFIED;
}
void
xfs_initialize_vnode(
bhv_desc_t *bdp,
bhv_vnode_t *vp,
bhv_desc_t *inode_bhv,
int unlock)
{
xfs_inode_t *ip = XFS_BHVTOI(inode_bhv);
struct inode *inode = vn_to_inode(vp);
if (!inode_bhv->bd_vobj) {
vp->v_vfsp = bhvtovfs(bdp);
bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
}
/*
* We need to set the ops vectors, and unlock the inode, but if
* we have been called during the new inode create process, it is
* too early to fill in the Linux inode. We will get called a
* second time once the inode is properly set up, and then we can
* finish our work.
*/
if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
xfs_set_inodeops(inode);
ip->i_flags &= ~XFS_INEW;
barrier();
unlock_new_inode(inode);
}
}
struct inode *
xfs_get_inode(
bhv_desc_t *bdp,
xfs_ino_t ino,
int flags)
{
struct bhv_vfs *vfsp = bhvtovfs(bdp);
return iget_locked(vfsp->vfs_super, ino);
}
struct dentry *
d_alloc_anon(struct inode *inode)
{
struct dentry *dentry;
spin_lock(&dcache_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!(dentry->d_flags & DCACHE_NFSD_DISCONNECTED))
goto found;
}
spin_unlock(&dcache_lock);
dentry = d_alloc_root(inode);
if (likely(dentry != NULL))
dentry->d_flags |= DCACHE_NFSD_DISCONNECTED;
return dentry;
found:
dget_locked(dentry);
dentry->d_vfs_flags |= DCACHE_REFERENCED;
spin_unlock(&dcache_lock);
iput(inode);
return dentry;
}
/*ARGSUSED*/
int
xfs_blkdev_get(
xfs_mount_t *mp,
const char *name,
struct block_device **bdevp)
{
struct nameidata nd;
int error;
error = path_lookup(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd);
if (error) {
printk("XFS: Invalid device [%s], error=%d\n", name, error);
return -error;
}
/* I think we actually want bd_acquire here.. --hch */
*bdevp = bdget(kdev_t_to_nr(nd.dentry->d_inode->i_rdev));
if (*bdevp)
error = blkdev_get(*bdevp, FMODE_READ|FMODE_WRITE, 0, BDEV_FS);
else
error = -ENOMEM;
path_release(&nd);
return -error;
}
void
xfs_blkdev_put(
struct block_device *bdev)
{
if (bdev)
blkdev_put(bdev, BDEV_FS);
}
void
xfs_mountfs_check_barriers(xfs_mount_t *mp)
{
xfs_fs_cmn_err(CE_NOTE, mp,
"Write barriers not supported on Linux 2.4");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
}
void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
}
STATIC struct inode *
xfs_fs_alloc_inode(
struct super_block *sb)
{
bhv_vnode_t *vp;
vp = (bhv_vnode_t *)kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP);
if (unlikely(!vp))
return NULL;
return vn_to_inode(vp);
}
STATIC void
xfs_fs_destroy_inode(
struct inode *inode)
{
kmem_zone_free(xfs_vnode_zone, vn_from_inode(inode));
}
#define VNODE_SIZE \
(sizeof(bhv_vnode_t) - sizeof(struct inode) + offsetof(struct inode, u))
STATIC void
xfs_fs_inode_init_once(
void *vnode,
kmem_zone_t *zonep,
unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
memset(vnode, 0, VNODE_SIZE);
__inode_init_once(vn_to_inode((bhv_vnode_t *)vnode));
}
}
STATIC int
xfs_init_inodecache( void )
{
xfs_vnode_zone = kmem_zone_init_flags(VNODE_SIZE, "xfs_vnode",
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM,
xfs_fs_inode_init_once);
if (!xfs_vnode_zone)
return -ENOMEM;
return 0;
}
STATIC void
xfs_destroy_inodecache( void )
{
kmem_zone_destroy(xfs_vnode_zone);
}
/*
* Attempt to flush the inode, this will actually fail
* if the inode is pinned, but we dirty the inode again
* at the point when it is unpinned after a log write,
* since this is when the inode itself becomes flushable.
*/
STATIC void
xfs_fs_write_inode(
struct inode *inode,
int sync)
{
bhv_vnode_t *vp = vn_from_inode(inode);
int error, flags = FLUSH_INODE;
if (vp) {
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
if (sync)
flags |= FLUSH_SYNC;
error = bhv_vop_iflush(vp, flags);
if (error == EAGAIN)
error = sync? bhv_vop_iflush(vp, flags | FLUSH_LOG) : 0;
}
}
STATIC void
xfs_fs_clear_inode(
struct inode *inode)
{
bhv_vnode_t *vp = vn_from_inode(inode);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_INC(vn_reclaim);
XFS_STATS_DEC(vn_active);
/*
* This can happen because xfs_iget_core calls xfs_idestroy if we
* find an inode with di_mode == 0 but without IGET_CREATE set.
*/
if (VNHEAD(vp))
bhv_vop_inactive(vp, NULL);
VN_LOCK(vp);
vp->v_flag &= ~VMODIFIED;
VN_UNLOCK(vp, 0);
if (VNHEAD(vp))
if (bhv_vop_reclaim(vp))
panic("%s: cannot reclaim 0x%p\n", __FUNCTION__, vp);
ASSERT(VNHEAD(vp) == NULL);
#ifdef XFS_VNODE_TRACE
ktrace_free(vp->v_trace);
#endif
}
/*
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
* Doing this has two advantages:
* - It saves on stack space, which is tight in certain situations
* - It can be used (with care) as a mechanism to avoid deadlocks.
* Flushing while allocating in a full filesystem requires both.
*/
STATIC void
xfs_syncd_queue_work(
struct bhv_vfs *vfs,
void *data,
void (*syncer)(bhv_vfs_t *, void *))
{
bhv_vfs_sync_work_t *work;
work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
work->w_vfs = vfs;
spin_lock(&vfs->vfs_sync_lock);
list_add_tail(&work->w_list, &vfs->vfs_sync_list);
spin_unlock(&vfs->vfs_sync_lock);
wake_up_process(vfs->vfs_sync_task);
}
/*
* Flush delayed allocate data, attempting to free up reserved space
* from existing allocations. At this point a new allocation attempt
* has failed with ENOSPC and we are in the process of scratching our
* heads, looking about for more room...
*/
STATIC void
xfs_flush_inode_work(
bhv_vfs_t *vfs,
void *inode)
{
filemap_fdatawrite(((struct inode *)inode)->i_mapping);
iput((struct inode *)inode);
}
void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
delay(HZ/2);
}
/*
* This is the "bigger hammer" version of xfs_flush_inode_work...
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/
STATIC void
xfs_flush_device_work(
bhv_vfs_t *vfs,
void *inode)
{
fsync_no_super(((struct inode *)inode)->i_dev);
iput((struct inode *)inode);
}
void
xfs_flush_device(
xfs_inode_t *ip)
{
struct inode *inode = vn_to_inode(XFS_ITOV(ip));
struct bhv_vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
delay(HZ/2);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
STATIC void
vfs_sync_worker(
bhv_vfs_t *vfsp,
void *unused)
{
if (!(vfsp->vfs_flag & VFS_RDONLY))
bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER,
NULL);
}
STATIC int
xfssyncd(
void *arg)
{
long timeleft;
bhv_vfs_t *vfsp = (bhv_vfs_t *) arg;
struct list_head tmp;
bhv_vfs_sync_work_t *work, *n;
daemonize();
reparent_to_init();
sigmask_lock();
sigfillset(¤t->blocked);
__recalc_sigpending(current);
sigmask_unlock();
sprintf(current->comm, "xfssyncd");
vfsp->vfs_sync_work.w_vfs = vfsp;
vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
vfsp->vfs_sync_task = current;
wmb();
wake_up(&vfsp->vfs_wait_sync_task);
INIT_LIST_HEAD(&tmp);
timeleft = (xfs_syncd_centisecs * HZ) / 100;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
timeleft = schedule_timeout(timeleft);
if ((vfsp->vfs_flag & VFS_UMOUNT) &&
list_empty(&vfsp->vfs_sync_list))
break;
spin_lock(&vfsp->vfs_sync_lock);
if (!timeleft) {
timeleft = (xfs_syncd_centisecs * HZ) / 100;
INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
list_add_tail(&vfsp->vfs_sync_work.w_list,
&vfsp->vfs_sync_list);
}
list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
list_move(&work->w_list, &tmp);
spin_unlock(&vfsp->vfs_sync_lock);
list_for_each_entry_safe(work, n, &tmp, w_list) {
(*work->w_syncer)(vfsp, work->w_data);
list_del(&work->w_list);
if (work == &vfsp->vfs_sync_work)
continue;
kmem_free(work, sizeof(struct bhv_vfs_sync_work));
}
}
vfsp->vfs_sync_task = NULL;
wmb();
wake_up(&vfsp->vfs_wait_sync_task);
return 0;
}
STATIC int
xfs_fs_start_syncd(
bhv_vfs_t *vfsp)
{
int pid;
pid = kernel_thread(xfssyncd, (void *) vfsp,
CLONE_VM | CLONE_FS | CLONE_FILES);
if (pid < 0)
return pid;
wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
return 0;
}
STATIC void
xfs_fs_stop_syncd(
bhv_vfs_t *vfsp)
{
vfsp->vfs_flag |= VFS_UMOUNT;
wmb();
wake_up_process(vfsp->vfs_sync_task);
wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
}
STATIC void
xfs_fs_put_super(
struct super_block *sb)
{
bhv_vfs_t *vfsp = vfs_from_sb(sb);
int error;
xfs_fs_stop_syncd(vfsp);
bhv_vfs_sync(vfsp, SYNC_ATTR | SYNC_DELWRI, NULL);
error = bhv_vfs_unmount(vfsp, 0, NULL);
if (error) {
printk("XFS: unmount got error=%d\n", error);
printk("%s: vfs=0x%p left dangling!\n", __FUNCTION__, vfsp);
} else {
vfs_deallocate(vfsp);
}
}
STATIC void
xfs_fs_write_super(
struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY))
bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA, NULL);
sb->s_dirt = 0;
}
STATIC int
xfs_fs_sync_super(
struct super_block *sb)
{
int error;
error = bhv_vfs_sync(vfs_from_sb(sb), SYNC_FSDATA | SYNC_WAIT, NULL);
sb->s_dirt = 0;
return -error;
}
STATIC int
xfs_fs_statfs(
struct super_block *sb,
struct statfs *statp)
{
return -bhv_vfs_statvfs(vfs_from_sb(sb), statp, NULL);
}
STATIC int
xfs_fs_remount(
struct super_block *sb,
int *flags,
char *options)
{
bhv_vfs_t *vfsp = vfs_from_sb(sb);
struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
int error;
error = bhv_vfs_parseargs(vfsp, options, args, 1);
if (!error)
error = bhv_vfs_mntupdate(vfsp, flags, args);
kmem_free(args, sizeof(*args));
return -error;
}
struct super_block *freeze_bdev(struct block_device *bdev)
{
struct super_block *sb;
struct bhv_vfs *vfsp;
sb = get_super(to_kdev_t(bdev->bd_dev));
if (sb && !(sb->s_flags & MS_RDONLY)) {
vfsp = vfs_from_sb(sb);
/* Stop new writers */
vfsp->vfs_frozen = SB_FREEZE_WRITE;
wmb();
/* Flush the refcache */
bhv_vfs_sync(vfsp, SYNC_REFCACHE | SYNC_WAIT, NULL);
/* Flush delalloc and delwri data */
bhv_vfs_sync(vfsp,
SYNC_FSDATA|SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT, NULL);
/* Pause transaction subsystem */
vfsp->vfs_frozen = SB_FREEZE_TRANS;
wmb();
/* Flush any remaining inodes into buffers */
bhv_vfs_sync(vfsp, SYNC_SUPER | SYNC_ATTR | SYNC_WAIT, NULL);
/* Push all buffers out to disk */
sync_buffers(sb->s_dev, 1);
/* Push the superblock and write an unmount record */
bhv_vfs_freeze(vfsp);
}
sync_buffers(to_kdev_t(bdev->bd_dev), 1);
return sb; /* thaw_bdev releases sb->s_umount */
}
void thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
if (sb) {
struct bhv_vfs *vfsp = vfs_from_sb(sb);
BUG_ON(sb->s_bdev != bdev);
vfsp->vfs_frozen = SB_UNFROZEN;
wmb();
wake_up(&vfsp->vfs_wait_unfrozen);
drop_super(sb);
}
}
STATIC void
xfs_fs_lockfs(
struct super_block *sb)
{
if (sb->s_flags & MS_RDONLY)
return;
freeze_bdev(sb->s_bdev);
}
STATIC void
xfs_fs_unlockfs(
struct super_block *sb)
{
thaw_bdev(sb->s_bdev, sb);
}
/*
* XFS encodes and decodes the fileid portion of NFS filehandles
* itself instead of letting the generic NFS code do it. This
* was previously the case but now we use new fileid formats which
* allow filesystems with 64 bit inode numbers to be exported.
*
* Note a side effect of the new formats is that xfs_vget() won't
* be passed a zero inode/generation pair under normal circumstances.
* As however a malicious client could send us such data, the check
* remains in that code.
*/
STATIC int
xfs_fs_dentry_to_fh(
struct dentry *dentry,
__u32 *data,
int *lenp,
int need_parent)
{
struct inode *inode = dentry->d_inode;
int type = 2;
__u32 *p = data;
int len;
int is64 = 0;
#if XFS_BIG_INUMS
bhv_vfs_t *vfs = vfs_from_sb(inode->i_sb);
if (!(vfs->vfs_flag & VFS_32BITINOOPT)) {
/* filesystem may contain 64bit inode numbers */
is64 = XFS_FILEID_TYPE_64FLAG;
}
#endif
/*
* Only encode if there is enough space given. In practice
* this means we can't export a filesystem with 64bit inodes
* over NFSv2 with the subtree_check export option; the other
* seven combinations work. The real answer is "don't use v2".
*/
len = xfs_fileid_length(need_parent, is64);
if (*lenp < len)
return 255;
*lenp = len;
p = xfs_fileid_encode_inode(p, inode, is64);
if (need_parent) {
#ifdef HAVE_DPARENT_LOCK
read_lock(&dparent_lock);
#endif
p = xfs_fileid_encode_inode(p, dentry->d_parent->d_inode, is64);
#ifdef HAVE_DPARENT_LOCK
read_unlock(&dparent_lock);
#endif
type = 4;
}
if ((p - data) != len)
BUG();
return type | is64;
}
STATIC struct dentry *
xfs_fs_fh_to_dentry(
struct super_block *sb,
__u32 *data,
int len,
int fhtype,
int parent)
{
bhv_vnode_t *vp;
struct inode *inode = NULL;
struct dentry *result;
xfs_fid2_t xfid;
bhv_vfs_t *vfsp = vfs_from_sb(sb);
int error;
int is64 = 0;
#if XFS_BIG_INUMS
is64 = (fhtype & XFS_FILEID_TYPE_64FLAG);
fhtype &= ~XFS_FILEID_TYPE_64FLAG;
#endif
/*
* Note that we only accept fileids which are long enough
* rather than allow the parent generation number to default
* to zero. XFS considers zero a valid generation number not
* an invalid/wildcard value. There's little point printk'ing
* a warning here as we don't have the client information
* which would make such a warning useful.
*/
if (fhtype > 2 ||
len < xfs_fileid_length((fhtype == 2), is64) ||
(parent && fhtype != 2))
return ERR_PTR(-ESTALE);
data = xfs_fileid_decode_fid2(data, &xfid, is64);
if (parent)
data = xfs_fileid_decode_fid2(data, &xfid, is64);
error = bhv_vfs_vget(vfsp, &vp, (fid_t *)&xfid);
if (error || vp == NULL)
return ERR_PTR(-ESTALE);
inode = vn_to_inode(vp);
result = d_alloc_anon(inode);
if (unlikely(result == NULL)) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
return result;
}
STATIC int
xfs_fs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
{
return bhv_vfs_showargs(vfs_from_sb(mnt->mnt_sb), m);
}
STATIC int
xfs_fs_quotasync(
struct super_block *sb,
int type)
{
return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XQUOTASYNC, 0, NULL);
}
STATIC int
xfs_fs_getxstate(
struct super_block *sb,
struct fs_quota_stat *fqs)
{
return -bhv_vfs_quotactl(vfs_from_sb(sb), Q_XGETQSTAT, 0, (caddr_t)fqs);
}
STATIC int
xfs_fs_setxstate(
struct super_block *sb,
unsigned int flags,
int op)
{
return -bhv_vfs_quotactl(vfs_from_sb(sb), op, 0, (caddr_t)&flags);
}
STATIC int
xfs_fs_getxquota(
struct super_block *sb,
int type,
qid_t id,
struct fs_disk_quota *fdq)
{
return -bhv_vfs_quotactl(vfs_from_sb(sb),
(type == USRQUOTA) ? Q_XGETQUOTA :
((type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETPQUOTA),
id, (caddr_t)fdq);
}
STATIC int
xfs_fs_setxquota(
struct super_block *sb,
int type,
qid_t id,
struct fs_disk_quota *fdq)
{
return -bhv_vfs_quotactl(vfs_from_sb(sb),
(type == USRQUOTA) ? Q_XSETQLIM :
((type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETPQLIM),
id, (caddr_t)fdq);
}
STATIC struct super_block *
xfs_fs_read_super(
struct super_block *sb,
void *data,
int silent)
{
struct bhv_vnode *rootvp;
struct bhv_vfs *vfsp = vfs_allocate(sb);
struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
struct statfs statvfs;
int error;
bhv_insert_all_vfsops(vfsp);
error = bhv_vfs_parseargs(vfsp, (char *)data, args, 0);
if (error) {
bhv_remove_all_vfsops(vfsp, 1);
goto fail_vfsop;
}
sb_min_blocksize(sb, BBSIZE);
sb->s_qcop = &xfs_quotactl_operations;
sb->s_op = &xfs_super_operations;
error = bhv_vfs_mount(vfsp, args, NULL);
if (error) {
bhv_remove_all_vfsops(vfsp, 1);
goto fail_vfsop;
}
error = bhv_vfs_statvfs(vfsp, &statvfs, NULL);
if (error)
goto fail_unmount;
sb->s_dirt = 1;
sb->s_magic = statvfs.f_type;
sb->s_blocksize = statvfs.f_bsize;
sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
set_posix_acl_flag(sb);
error = bhv_vfs_root(vfsp, &rootvp);
if (error)
goto fail_unmount;
sb->s_root = d_alloc_root(vn_to_inode(rootvp));
if (!sb->s_root)
goto fail_vnrele;
if (is_bad_inode(sb->s_root->d_inode))
goto fail_vnrele;
if (xfs_fs_start_syncd(vfsp))
goto fail_vnrele;
vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
kmem_free(args, sizeof(*args));
return sb;
fail_vnrele:
if (sb->s_root) {
dput(sb->s_root);
sb->s_root = NULL;
} else {
VN_RELE(rootvp);
}
fail_unmount:
bhv_vfs_unmount(vfsp, 0, NULL);
fail_vfsop:
vfs_deallocate(vfsp);
kmem_free(args, sizeof(*args));
return NULL;
}
static struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
.write_inode = xfs_fs_write_inode,
.clear_inode = xfs_fs_clear_inode,
.put_super = xfs_fs_put_super,
.write_super = xfs_fs_write_super,
.sync_fs = xfs_fs_sync_super,
.write_super_lockfs = xfs_fs_lockfs,
.unlockfs = xfs_fs_unlockfs,
.statfs = xfs_fs_statfs,
.remount_fs = xfs_fs_remount,
.fh_to_dentry = xfs_fs_fh_to_dentry,
.dentry_to_fh = xfs_fs_dentry_to_fh,
.show_options = xfs_fs_show_options,
};
static struct quotactl_ops xfs_quotactl_operations = {
.quota_sync = xfs_fs_quotasync,
.get_xstate = xfs_fs_getxstate,
.set_xstate = xfs_fs_setxstate,
.get_xquota = xfs_fs_getxquota,
.set_xquota = xfs_fs_setxquota,
};
struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
.read_super = xfs_fs_read_super,
.fs_flags = FS_REQUIRES_DEV,
};
EXPORT_SYMBOL(xfs_fs_type);
STATIC int __init
init_xfs_fs( void )
{
int error;
struct sysinfo si;
static char message[] __initdata = KERN_INFO \
XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
printk(message);
si_meminfo(&si);
xfs_physmem = si.totalram;
ktrace_init(64);
error = xfs_init_inodecache();
if (error < 0)
goto undo_inodecache;
error = xfs_buf_init();
if (error < 0)
goto undo_buffers;
vn_init();
xfs_init();
uuid_init();
error = xfs_register_ioctl_converters();
if (error)
goto undo_register;
error = register_filesystem(&xfs_fs_type);
if (error)
goto undo_register;
return 0;
undo_register:
xfs_unregister_ioctl_converters();
xfs_buf_terminate();
undo_buffers:
xfs_destroy_inodecache();
undo_inodecache:
return error;
}
STATIC void __exit
exit_xfs_fs( void )
{
unregister_filesystem(&xfs_fs_type);
xfs_unregister_ioctl_converters();
xfs_cleanup();
xfs_buf_terminate();
xfs_destroy_inodecache();
ktrace_uninit();
}
module_init(init_xfs_fs);
module_exit(exit_xfs_fs);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_LICENSE("GPL");