diff -Nurp linux-2.6.7-rc1/fs/jfs/acl.c linux-jfs-dmapi/fs/jfs/acl.c
--- linux-2.6.7-rc1/fs/jfs/acl.c 2004-05-27 12:54:55.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/acl.c 2004-05-27 18:30:11.000000000 -0500
@@ -247,7 +247,7 @@ cleanup:
return rc;
}
-static int jfs_acl_chmod(struct inode *inode)
+int jfs_acl_chmod(struct inode *inode)
{
struct posix_acl *acl, *clone;
int rc;
@@ -271,20 +271,3 @@ static int jfs_acl_chmod(struct inode *i
posix_acl_release(clone);
return rc;
}
-
-int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
-{
- struct inode *inode = dentry->d_inode;
- int rc;
-
- rc = inode_change_ok(inode, iattr);
- if (rc)
- return rc;
-
- inode_setattr(inode, iattr);
-
- if (iattr->ia_valid & ATTR_MODE)
- rc = jfs_acl_chmod(inode);
-
- return rc;
-}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_attr.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_attr.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_attr.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_attr.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+/* Retrieve attributes for a single file, directory or symlink. */
+
+int
+dm_get_fileattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_stat_t *statp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_fileattr(tdp->td_ip, tdp->td_right,
+ mask, statp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/* Set one or more file attributes of a file, directory, or symlink. */
+
+int
+dm_set_fileattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_fileattr_t *attrp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_fileattr(tdp->td_ip, tdp->td_right,
+ mask, attrp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_bulkattr.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_bulkattr.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_bulkattr.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_bulkattr.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_init_attrloc(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrloc_t *locp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS|DM_TDT_DIR,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->init_attrloc(tdp->td_ip, tdp->td_right, locp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/*
+ * Retrieves both standard and DM specific file attributes for the file
+ * system indicated by the handle. (The FS has to be mounted).
+ * Syscall returns 1 to indicate SUCCESS and more information is available.
+ * -1 is returned on error, and errno will be set appropriately.
+ * 0 is returned upon successful completion.
+ */
+
+int
+dm_get_bulkattr_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_bulkattr_rvp(tdp->td_ip, tdp->td_right,
+ mask, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/*
+ * Retrieves attributes of directory entries given a handle to that
+ * directory. Iterative.
+ * Syscall returns 1 to indicate SUCCESS and more information is available.
+ * -1 is returned on error, and errno will be set appropriately.
+ * 0 is returned upon successful completion.
+ */
+
+int
+dm_get_dirattrs_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_DIR,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_dirattrs_rvp(tdp->td_ip, tdp->td_right,
+ mask, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_bulkall_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_bulkall_rvp(tdp->td_ip, tdp->td_right,
+ mask, attrnamep, locp, buflen, bufp, rlenp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_config.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_config.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_config.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_config.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+int
+dm_get_config(
+ void *hanp,
+ size_t hlen,
+ dm_config_t flagname,
+ dm_size_t *retvalp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ dm_size_t retval;
+ int system = 1;
+ int error;
+
+ /* Trap and process configuration parameters which are system-wide. */
+
+ switch (flagname) {
+ case DM_CONFIG_LEGACY:
+ case DM_CONFIG_PENDING:
+ case DM_CONFIG_OBJ_REF:
+ retval = DM_TRUE;
+ break;
+ case DM_CONFIG_MAX_MESSAGE_DATA:
+ retval = DM_MAX_MSG_DATA;
+ break;
+ default:
+ system = 0;
+ break;
+ }
+ if (system) {
+ if (copy_to_user(retvalp, &retval, sizeof(retval)))
+ return(-EFAULT);
+ return(0);
+ }
+
+ /* Must be filesystem-specific. Convert the handle into a inode. */
+
+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
+ return(error);
+
+ /* Now call the filesystem-specific routine to determine the
+ value of the configuration option for that filesystem.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_config(tdp->td_ip, tdp->td_right,
+ flagname, retvalp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_config_events(
+ void *hanp,
+ size_t hlen,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ /* Convert the handle into a inode. */
+
+ if ((error = dm_get_config_tdp(hanp, hlen, &tdp)) != 0)
+ return(error);
+
+ /* Now call the filesystem-specific routine to determine the
+ events supported by that filesystem.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_config_events(tdp->td_ip, tdp->td_right,
+ nelem, eventsetp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_dmattr.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_dmattr.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_dmattr.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_dmattr.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_clear_inherit(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->clear_inherit(tdp->td_ip, tdp->td_right,
+ attrnamep);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_dmattr(tdp->td_ip, tdp->td_right,
+ attrnamep, buflen, bufp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->getall_dmattr(tdp->td_ip, tdp->td_right,
+ buflen, bufp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_inherit(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_inherit_t *inheritbufp,
+ u_int *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->getall_inherit(tdp->td_ip, tdp->td_right,
+ nelem, inheritbufp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_remove_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int setdtime,
+ dm_attrname_t *attrnamep)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->remove_dmattr(tdp->td_ip, tdp->td_right,
+ setdtime, attrnamep);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_set_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void *bufp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_dmattr(tdp->td_ip, tdp->td_right,
+ attrnamep, setdtime, buflen, bufp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_set_inherit(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ mode_t mode)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_inherit(tdp->td_ip, tdp->td_right,
+ attrnamep, mode);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_event.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_event.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_event.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_event.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+#include "jfs_incore.h"
+
+/* The "rights" portion of the DMAPI spec is not currently implemented. A
+ framework for rights is provided in the code, but turns out to be a noop
+ in practice. The following comments are a brain dump to serve as input to
+ the poor soul that eventually has to get DMAPI rights working in IRIX.
+
+ A DMAPI right is similar but not identical to the mrlock_t mechanism
+ already used within the kernel. The similarities are that it is a
+ sleeping lock, and that a multiple-reader, single-writer protocol is used.
+ How locks are obtained and dropped are different however. With a mrlock_t,
+ a thread grabs the lock, does some stuff, then drops the lock, and all other
+ threads block in the meantime (assuming a write lock). There is a one-to-
+ one relationship between the lock and the thread which obtained the lock.
+ Not so with DMAPI right locks. A DMAPI lock is associated with a particular
+ session/token/hanp/hlen quad; since there is a dm_tokdata_t structure for
+ each such quad, you can think of it as a one-to-one relationship between the
+ lock and a dm_tokdata_t. Any application thread which presents the correct
+ quad is entitled to grab or release the lock, or to use the rights
+ associated with that lock. The thread that grabs the lock does not have to
+ be the one to use the lock, nor does it have to be the thread which drops
+ the lock. The lock can be held for very long periods of time, even across
+ multiple systems calls by multiple application threads. The idea is that a
+ coordinated group of DMAPI application threads can grab the lock, issue a
+ series of inode accesses and/or updates, then drop the lock, and be assured
+ that no other thread in the system could be modifying the inode at the same
+ time. The kernel is expected to blindly trust that the application will
+ not forget to unlock inodes it has locked, and will not deadlock itself
+ against the kernel.
+
+ There are two types of DMAPI rights, file object (inode) and filesystem
+ object (superblock?). An inode right is the equivalent of the combination
+ of both the XFS ilock and iolock; if held exclusively, no data or metadata
+ within the file can be changed by non-lock-holding threads. The filesystem
+ object lock is a little fuzzier; I think that if it is held, things like
+ unmounts can be blocked, plus there is an event mask associated with the
+ filesystem which can't be updated without the lock. (By the way, that
+ event mask is supposed to be persistent in the superblock; add that to
+ your worklist :-)
+
+ All events generated by XFS currently arrive with no rights, i.e.
+ DM_RIGHT_NULL, and return to the filesystem with no rights. It would be
+ smart to leave it this way if possible, because it otherwise becomes more
+ likely that an application thread will deadlock against the kernel if the
+ one responsible for calling dm_get_events() happens to touch a file which
+ was locked at the time the event was queued. Since the thread is blocked,
+ it can't read the event in order to find and drop the lock. Catch-22. If
+ you do have events that arrive with non-null rights, then dm_enqueue() needs
+ to have code added for synchronous events which atomically switches the
+ right from being a thread-based right to a dm_tokdata_t-based right without
+ allowing the lock to drop in between. You will probably have to add a new
+ dm_fsys_vector entry point to do this. The lock can't be lost during the
+ switch, or other threads might change the inode or superblock in between.
+ Likewise, if you need to return to the filesystem holding a right, then
+ you need a DMAPI-to-thread atomic switch to occur, most likely in
+ dm_change_right(). Again, the lock must not be lost during the switch; the
+ DMAPI spec spends a couple of pages stressing this. Another dm_fsys_vector
+ entry point is probably the answer.
+
+ There are several assumptions implied in the current layout of the code.
+ First of all, if an event returns to the filesystem with a return value of
+ zero, then the filesystem can assume that any locks (rights) held at the
+ start of the event are still in effect at the end of the event. (Note that
+ the application could have temporarily dropped and reaquired the right
+ while the event was outstanding, however). If the event returns to the
+ filesystem with an errno, then the filesystem must assume that it has lost
+ any and all rights associated with any of the objects in the event. This
+ was done for a couple of reasons. First of all, since an errno is being
+ returned, most likely the filesystem is going to immediately drop all the
+ locks anyway. If the DMAPI code was required to unconditionally reobtain
+ all locks before returning to the filesystem, then dm_pending() wouldn't
+ work for NFS server threads because the process would block indefinitely
+ trying to get its thread-based rights back, because the DMAPI-rights
+ associated with the dm_tokdata_t in the outstanding event would prevent
+ the rights from being obtained. That would be a bad thing. We wouldn't
+ be able to let users Cntl-C out of read/write/truncate events either.
+
+ If a case should ever surface where the thread has lost its rights even
+ though it has a zero return status, or where the thread has rights even
+ though it is returning with an errno, then this logic will have to be
+ reworked. This could be done by changing the 'right' parameters on all
+ the event calls to (dm_right_t *), so that they could serve both as IN
+ and OUT parameters.
+
+ Some events such as DM_EVENT_DESTROY arrive without holding a vnode
+ reference; if you don't have a vnode reference, you can't have a right
+ on the file.
+
+ One more quirk. The DM_EVENT_UNMOUNT event is defined to be synchronous
+ when it's behavior is asynchronous. If an unmount event arrives with
+ rights, the event should return with the same rights and should NOT leave
+ any rights in the dm_tokdata_t where the application could use them.
+*/
+
+
+#define GETNEXTOFF(vdat) ((vdat).vd_offset + (vdat).vd_length)
+#define HANDLE_SIZE(tdp) \
+ ((tdp)->td_type & DM_TDT_FS ? FSHSIZE : JFS_HSIZE((tdp)->td_handle))
+
+
+/* Given a inode pointer in a filesystem known to support DMAPI,
+ build a tdp structure for the corresponding inode.
+*/
+
+static dm_tokdata_t *
+dm_ip_data(
+ struct inode *ip,
+ dm_right_t right,
+ int referenced) /* != 0, caller holds inode reference */
+{
+ int error;
+ dm_tokdata_t *tdp;
+ int filetype;
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, SLAB_KERNEL);
+ if (tdp == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+
+ tdp->td_next = NULL;
+ tdp->td_tevp = NULL;
+ tdp->td_app_ref = 0;
+ tdp->td_orig_right = right;
+ tdp->td_right = right;
+ tdp->td_flags = DM_TDF_ORIG;
+ if (referenced) {
+ tdp->td_flags |= DM_TDF_EVTREF;
+ }
+
+ filetype = ip->i_mode & S_IFMT;
+ if (filetype == S_IFREG) {
+ tdp->td_type = DM_TDT_REG;
+ } else if (filetype == S_IFDIR) {
+ tdp->td_type = DM_TDT_DIR;
+ } else if (filetype == S_IFLNK) {
+ tdp->td_type = DM_TDT_LNK;
+ } else {
+ tdp->td_type = DM_TDT_OTH;
+ }
+
+ if (referenced) {
+ tdp->td_ip = ip;
+ } else {
+ tdp->td_ip = NULL;
+ }
+ tdp->td_icount = 0;
+
+ if ((error = dm_ip_to_handle(ip, &tdp->td_handle)) != 0) {
+ panic("dm_ip_data: dm_ip_to_handle failed for ip %p in "
+ "a DMAPI filesystem, errno %d\n", ip, error);
+ }
+
+ return(tdp);
+}
+
+
+/* Given a super_block pointer to a filesystem known to support DMAPI, build a
+ * tdp structure for that sbp.
+*/
+static dm_tokdata_t *
+dm_sbp_data(
+ struct super_block *sbp,
+ struct inode *ip, /* will be NULL for DM_EVENT_UNMOUNT */
+ dm_right_t right)
+{
+ dm_tokdata_t *tdp;
+ struct jfs_sb_info *sbi = JFS_SBI(sbp);
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, SLAB_KERNEL);
+ if (tdp == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+
+ tdp->td_next = NULL;
+ tdp->td_tevp = NULL;
+ tdp->td_app_ref = 0;
+ tdp->td_orig_right = right;
+ tdp->td_right = right;
+ tdp->td_flags = DM_TDF_ORIG;
+ if (ip) {
+ tdp->td_flags |= DM_TDF_EVTREF;
+ }
+ tdp->td_type = DM_TDT_FS;
+ if (ip) {
+ tdp->td_ip = ip;
+ } else {
+ tdp->td_ip = NULL;
+ }
+ tdp->td_icount = 0;
+
+ memcpy(&tdp->td_handle.ha_fsid, &sbi->dm_fsid, sizeof(jfs_fsid_t));
+ memset((char *)&tdp->td_handle.ha_fsid + sizeof(jfs_fsid_t), 0,
+ sizeof(tdp->td_handle) - sizeof(jfs_fsid_t));
+
+ return(tdp);
+}
+
+
+/* Link a tdp structure into the tevp. */
+
+static void
+dm_add_handle_to_event(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp)
+{
+ tdp->td_next = tevp->te_tdp;
+ tevp->te_tdp = tdp;
+ tdp->td_tevp = tevp;
+}
+
+
+/* Generate the given data event for the inode, and wait for a reply. The
+ caller must guarantee that the inode's reference count is greater than zero
+ so that the filesystem can't disappear while the request is outstanding.
+*/
+
+int
+dm_send_data_event(
+ dm_eventtype_t event,
+ struct inode *ip,
+ dm_right_t ip_right, /* current right for ip */
+ dm_off_t offset,
+ size_t length,
+ int flags) /* 0 or DM_FLAGS_NDELAY */
+{
+ dm_data_event_t *datap;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp;
+ int error;
+
+ tdp = dm_ip_data(ip, ip_right, /* reference held */ 1);
+ if (tdp == NULL)
+ return -ENOMEM;
+
+ /* Calculate the size of the event in bytes, create an event structure
+ for it, and insert the file's handle into the event.
+ */
+
+ tevp = dm_evt_create_tevp(event, HANDLE_SIZE(tdp), (void **)&datap);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ return(-ENOMEM);
+ }
+ dm_add_handle_to_event(tevp, tdp);
+
+ /* Now fill in all the dm_data_event_t fields. */
+
+ datap->de_handle.vd_offset = sizeof(*datap);
+ datap->de_handle.vd_length = HANDLE_SIZE(tdp);
+ memcpy((char *)datap + datap->de_handle.vd_offset, &tdp->td_handle,
+ datap->de_handle.vd_length);
+ datap->de_offset = offset;
+ datap->de_length = length;
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_normal_event(ip->i_sb, tevp, flags);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error > 0 ? -error : error); // XFS BUG #40
+}
+
+
+/* Generate the destroy event for the inode and wait until the request has been
+ queued. The caller does not hold a inode reference or a right on the inode,
+ but it must otherwise lock down the inode such that the filesystem can't
+ disappear while the request is waiting to be queued. While waiting to be
+ queued, the inode must not be referenceable either by path or by a call
+ to dm_handle_to_ip().
+*/
+
+int
+dm_send_destroy_event(
+ struct inode *ip,
+ dm_right_t ip_right) /* always DM_RIGHT_NULL */
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp;
+ dm_destroy_event_t *destp;
+ dm_attrname_t attrname;
+ char *value;
+ int value_len;
+ int error;
+
+ tdp = dm_ip_data(ip, ip_right, /* no reference held */ 0);
+ if (tdp == NULL)
+ return -ENOMEM;
+
+ if ((error = dm_waitfor_destroy_attrname(ip->i_sb, &attrname)) != 0)
+ return(error);
+
+ /* If a return-on-destroy attribute name exists for this filesystem,
+ see if the object being deleted has this attribute. If the object
+ doesn't have the attribute or if we encounter an error, then send
+ the event without the attribute.
+ */
+
+ value_len = -1; /* because zero is a valid attribute length */
+ if (attrname.an_chars[0] != '\0') {
+ fsys_vector = dm_fsys_vector(ip);
+ error = fsys_vector->get_destroy_dmattr(ip, ip_right, &attrname,
+ &value, &value_len);
+ if (error)
+ return error;
+ }
+
+ /* Now that we know the size of the attribute value, if any, calculate
+ the size of the event in bytes, create an event structure for it,
+ and insert the handle into the event.
+ */
+
+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY,
+ HANDLE_SIZE(tdp) + (value_len >= 0 ? value_len : 0),
+ (void **)&destp);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ if (value_len > 0)
+ kfree(value);
+ return(-ENOMEM);
+ }
+ dm_add_handle_to_event(tevp, tdp);
+
+ /* Now fill in all the dm_destroy_event_t fields. */
+
+ destp->ds_handle.vd_offset = sizeof(*destp);
+ destp->ds_handle.vd_length = HANDLE_SIZE(tdp);
+ memcpy((char *)destp + destp->ds_handle.vd_offset, &tdp->td_handle,
+ destp->ds_handle.vd_length);
+ if (value_len >= 0) {
+ destp->ds_attrname = attrname;
+ destp->ds_attrcopy.vd_length = value_len;
+ if (value_len == 0) {
+ destp->ds_attrcopy.vd_offset = 0;
+ } else {
+ destp->ds_attrcopy.vd_offset = GETNEXTOFF(destp->ds_handle);
+ memcpy((char *)destp + destp->ds_attrcopy.vd_offset, value,
+ value_len);
+ kfree(value);
+ }
+ }
+
+ /* Queue the message asynchronously. */
+
+ error = dm_enqueue_normal_event(ip->i_sb, tevp, 0);
+
+ /* Since we had no rights upon entry, we have none to reobtain before
+ leaving.
+ */
+
+ dm_evt_rele_tevp(tevp, 1);
+
+ return(error > 0 ? -error : error); // XFS BUG #40
+}
+
+
+/* The dm_mount_event_t event is sent in turn to all sessions that have asked
+ for it until one either rejects it or accepts it. The filesystem is not
+ going anywhere because the mount is blocked until the event is answered.
+*/
+
+int
+dm_send_mount_event(
+ struct super_block *sbp, /* filesystem being mounted */
+ dm_right_t sbp_right,
+ struct inode *ip, /* mounted on directory */
+ dm_right_t ip_right,
+ struct inode *rootip,
+ dm_right_t rootip_right,
+ char *name1, /* mount path */
+ char *name2) /* filesystem device name */
+{
+ int error;
+ dm_tokevent_t *tevp = NULL;
+ dm_tokdata_t *tdp1 = NULL; /* filesystem handle for event */
+ dm_tokdata_t *tdp2 = NULL; /* file handle for mounted-on dir. */
+ dm_tokdata_t *tdp3 = NULL; /* file handle for root inode */
+ dm_mount_event_t *mp;
+ size_t nextoff;
+
+ /* Convert the sbp to a filesystem handle, and ip and rootip into
+ file handles. ip (the mounted-on directory) may not have a handle
+ if it is a different filesystem type such as EFS which does not
+ support DMAPI.
+ */
+
+ tdp1 = dm_sbp_data(sbp, rootip, sbp_right);
+ if (tdp1 == NULL)
+ goto out_nomem;
+
+ if ((ip == NULL) || dm_check_dmapi_ip(ip)) {
+ ip = NULL; /* assume we are mounting on non JFS */
+ } else {
+ tdp2 = dm_ip_data(ip, ip_right, /* reference held */ 1);
+ if (tdp2 == NULL)
+ goto out_nomem;
+ }
+
+ tdp3 = dm_ip_data(rootip, rootip_right, /* reference held */ 1);
+ if (tdp3 == NULL)
+ goto out_nomem;
+
+ /* Calculate the size of the event in bytes, create an event structure
+ for it, and insert the handles into the event.
+ */
+
+ tevp = dm_evt_create_tevp(DM_EVENT_MOUNT,
+ HANDLE_SIZE(tdp1) + (ip ? HANDLE_SIZE(tdp2) : 0) +
+ HANDLE_SIZE(tdp3) + strlen(name1) + 1 +
+ strlen(name2) + 1, (void **)&mp);
+ if (tevp == NULL)
+ goto out_nomem;
+
+ dm_add_handle_to_event(tevp, tdp1);
+ if (ip)
+ dm_add_handle_to_event(tevp, tdp2);
+ dm_add_handle_to_event(tevp, tdp3);
+
+ /* Now fill in all the dm_mount_event_t fields. */
+
+ mp->me_handle1.vd_offset = sizeof(*mp);
+ mp->me_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) mp + mp->me_handle1.vd_offset, &tdp1->td_handle,
+ mp->me_handle1.vd_length);
+ nextoff = GETNEXTOFF(mp->me_handle1);
+
+ if (ip) {
+ mp->me_handle2.vd_offset = nextoff;
+ mp->me_handle2.vd_length = HANDLE_SIZE(tdp2);
+ memcpy((char *)mp + mp->me_handle2.vd_offset, &tdp2->td_handle,
+ mp->me_handle2.vd_length);
+ nextoff = GETNEXTOFF(mp->me_handle2);
+ }
+
+ mp->me_name1.vd_offset = nextoff;
+ mp->me_name1.vd_length = strlen(name1) + 1;
+ memcpy((char *)mp + mp->me_name1.vd_offset, name1, mp->me_name1.vd_length);
+ nextoff = GETNEXTOFF(mp->me_name1);
+
+ mp->me_name2.vd_offset = nextoff;
+ mp->me_name2.vd_length = strlen(name2) + 1;
+ memcpy((char *)mp + mp->me_name2.vd_offset, name2, mp->me_name2.vd_length);
+ nextoff = GETNEXTOFF(mp->me_name2);
+
+ mp->me_roothandle.vd_offset = nextoff;
+ mp->me_roothandle.vd_length = HANDLE_SIZE(tdp3);
+ memcpy((char *)mp + mp->me_roothandle.vd_offset, &tdp3->td_handle,
+ mp->me_roothandle.vd_length);
+
+ mp->me_mode = (sbp->s_flags & MS_RDONLY ? DM_MOUNT_RDONLY : 0);
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_mount_event(sbp, tevp);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error > 0 ? -error : error); // XFS BUG #40
+
+out_nomem:
+ if (tevp)
+ kfree(tevp);
+ if (tdp1)
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ if (tdp2)
+ kmem_cache_free(dm_tokdata_cachep, tdp2);
+ if (tdp3)
+ kmem_cache_free(dm_tokdata_cachep, tdp3);
+ return -ENOMEM;
+}
+
+
+/* Generate an DM_EVENT_UNMOUNT event and wait for a reply. The 'retcode'
+ field indicates whether this is a successful or unsuccessful unmount.
+ If successful, the filesystem is already unmounted, and any pending handle
+ reference to the filesystem will be failed. If the unmount was
+ unsuccessful, then the filesystem will be placed back into full service.
+
+ The DM_EVENT_UNMOUNT event should really be asynchronous, because the
+ application has no control over whether or not the unmount succeeds. (The
+ DMAPI spec defined it that way because asynchronous events aren't always
+ guaranteed to be delivered.)
+
+ Since the filesystem is already unmounted in the successful case, the
+ DM_EVENT_UNMOUNT event can't make available any vnode to be used in
+ subsequent sid/hanp/hlen/token calls by the application. The event will
+ hang around until the application does a DM_RESP_CONTINUE, but the handle
+ within the event is unusable by the application.
+*/
+
+void
+dm_send_unmount_event(
+ struct super_block *sbp,
+ struct inode *ip, /* NULL if unmount successful */
+ dm_right_t vfsp_right,
+ mode_t mode,
+ int retcode, /* errno, if unmount failed */
+ int flags)
+{
+ dm_namesp_event_t *np;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp1;
+
+ /* If the unmount failed, put the filesystem back into full service,
+ allowing blocked handle references to finish. If it succeeded, put
+ the filesystem into the DM_STATE_UNMOUNTED state and fail all
+ blocked DM_NO_TOKEN handle accesses.
+ */
+
+ if (retcode != 0) { /* unmount was unsuccessful */
+ dm_change_fsys_entry(sbp, DM_STATE_MOUNTED);
+ } else {
+ dm_change_fsys_entry(sbp, DM_STATE_UNMOUNTED);
+ }
+
+ /* If the event wasn't in the filesystem dm_eventset_t, just remove
+ the filesystem from the list of DMAPI filesystems and return.
+ */
+
+ if (flags & DM_FLAGS_UNWANTED) {
+ if (retcode == 0)
+ dm_remove_fsys_entry(sbp);
+ return;
+ }
+
+ /* Calculate the size of the event in bytes and allocate zeroed memory
+ for it.
+ */
+
+ tdp1 = dm_sbp_data(sbp, ip, vfsp_right);
+ if (tdp1 == NULL)
+ return;
+
+ tevp = dm_evt_create_tevp(DM_EVENT_UNMOUNT, HANDLE_SIZE(tdp1),
+ (void **)&np);
+ if (tevp == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return;
+ }
+
+ dm_add_handle_to_event(tevp, tdp1);
+
+ /* Now copy in all the dm_namesp_event_t specific fields. */
+
+ np->ne_handle1.vd_offset = sizeof(*np);
+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
+ np->ne_handle1.vd_length);
+ np->ne_mode = mode;
+ np->ne_retcode = retcode;
+
+ /* Since DM_EVENT_UNMOUNT is effectively asynchronous, queue the
+ message and ignore any error return for DM_EVENT_UNMOUNT.
+ */
+
+ (void)dm_enqueue_normal_event(sbp, tevp, flags);
+
+ if (retcode == 0)
+ dm_remove_fsys_entry(sbp);
+
+ dm_evt_rele_tevp(tevp, 0);
+}
+
+
+/* Generate the given namespace event and wait for a reply (if synchronous) or
+ until the event has been queued (asynchronous). The caller must guarantee
+ that at least one inode within the filesystem has had its reference count
+ bumped so that the filesystem can't disappear while the event is
+ outstanding.
+*/
+
+int
+dm_send_namesp_event(
+ dm_eventtype_t event,
+ struct inode *ip1,
+ dm_right_t ip1_right,
+ struct inode *ip2,
+ dm_right_t ip2_right,
+ char *name1,
+ char *name2,
+ mode_t mode,
+ int retcode,
+ int flags)
+{
+ dm_namesp_event_t *np;
+ dm_tokevent_t *tevp;
+ dm_tokdata_t *tdp1 = NULL; /* primary handle for event */
+ dm_tokdata_t *tdp2 = NULL; /* additional handle for event */
+ size_t nextoff;
+ int error;
+
+ switch (event) {
+ case DM_EVENT_PREUNMOUNT:
+ /*
+ * PREUNMOUNT - Send the file system handle in handle1,
+ * and the handle for the root dir in the second. Otherwise
+ * it's a normal sync message; i.e. succeeds or fails
+ * depending on the app's return code.
+ * ip1 and ip2 are both the root dir of mounted FS
+ * ip1_right is the filesystem right.
+ * ip2_right is the root inode right.
+ */
+
+ if (flags & DM_FLAGS_UNWANTED) {
+ dm_change_fsys_entry(ip1->i_sb, DM_STATE_UNMOUNTING);
+ return(0);
+ }
+ tdp1 = dm_sbp_data(ip1->i_sb, ip1, ip1_right);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ tdp2 = dm_ip_data(ip2, ip2_right, /* reference held */ 1);
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ break;
+
+ case DM_EVENT_NOSPACE:
+ /* ip1_right is the filesystem right. */
+
+ tdp1 = dm_sbp_data(ip1->i_sb, ip1, ip1_right);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ tdp2 = dm_ip_data(ip2, ip2_right, /* reference held */ 1); /* additional info - not in the spec */
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ break;
+
+ default:
+ /* All other events only pass in inodes and don't require any
+ special cases.
+ */
+
+ tdp1 = dm_ip_data(ip1, ip1_right, /* reference held */ 1);
+ if (tdp1 == NULL)
+ return -ENOMEM;
+ if (ip2) {
+ tdp2 = dm_ip_data(ip2, ip2_right, /* reference held */ 1);
+ if (tdp2 == NULL) {
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ /* Calculate the size of the event in bytes and allocate zeroed memory
+ for it.
+ */
+
+ tevp = dm_evt_create_tevp(event,
+ HANDLE_SIZE(tdp1) + (ip2 ? HANDLE_SIZE(tdp2) : 0) +
+ (name1 ? strlen(name1) + 1 : 0) +
+ (name2 ? strlen(name2) + 1 : 0), (void **)&np);
+ if (tevp == NULL) {
+ if (tdp1)
+ kmem_cache_free(dm_tokdata_cachep, tdp1);
+ if (tdp2)
+ kmem_cache_free(dm_tokdata_cachep, tdp2);
+ return(-ENOMEM);
+ }
+
+ dm_add_handle_to_event(tevp, tdp1);
+ if (ip2)
+ dm_add_handle_to_event(tevp, tdp2);
+
+ /* Now copy in all the dm_namesp_event_t specific fields. */
+
+ np->ne_handle1.vd_offset = sizeof(*np);
+ np->ne_handle1.vd_length = HANDLE_SIZE(tdp1);
+ memcpy((char *) np + np->ne_handle1.vd_offset, &tdp1->td_handle,
+ np->ne_handle1.vd_length);
+ nextoff = GETNEXTOFF(np->ne_handle1);
+ if (ip2) {
+ np->ne_handle2.vd_offset = nextoff;
+ np->ne_handle2.vd_length = HANDLE_SIZE(tdp2);
+ memcpy((char *)np + np->ne_handle2.vd_offset, &tdp2->td_handle,
+ np->ne_handle2.vd_length);
+ nextoff = GETNEXTOFF(np->ne_handle2);
+ }
+ if (name1) {
+ np->ne_name1.vd_offset = nextoff;
+ np->ne_name1.vd_length = strlen(name1) + 1;
+ memcpy((char *)np + np->ne_name1.vd_offset, name1,
+ np->ne_name1.vd_length);
+ nextoff = GETNEXTOFF(np->ne_name1);
+ }
+ if (name2) {
+ np->ne_name2.vd_offset = nextoff;
+ np->ne_name2.vd_length = strlen(name2) + 1;
+ memcpy((char *)np + np->ne_name2.vd_offset, name2,
+ np->ne_name2.vd_length);
+ }
+ np->ne_mode = mode;
+ np->ne_retcode = retcode;
+
+ /* Queue the message and wait for the reply. */
+
+ error = dm_enqueue_normal_event(ip1->i_sb, tevp, flags);
+
+ /* If no errors occurred, we must leave with the same rights we had
+ upon entry. If errors occurred, we must leave with no rights.
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ if (!error && event == DM_EVENT_PREUNMOUNT) {
+ dm_change_fsys_entry(ip1->i_sb, DM_STATE_UNMOUNTING);
+ }
+
+ return(error > 0 ? -error : error); // XFS BUG #40
+}
+
+
+/*
+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
+ * don't have to worry about rights here.
+ */
+
+int
+dm_send_msg(
+ dm_sessid_t targetsid,
+ dm_msgtype_t msgtype, /* SYNC or ASYNC */
+ size_t buflen,
+ void *bufp)
+{
+ dm_tokevent_t *tevp;
+ int sync;
+ void *msgp;
+ int error;
+
+ if (buflen > DM_MAX_MSG_DATA)
+ return(-E2BIG);
+ if (msgtype == DM_MSGTYPE_ASYNC) {
+ sync = 0;
+ } else if (msgtype == DM_MSGTYPE_SYNC) {
+ sync = 1;
+ } else {
+ return(-EINVAL);
+ }
+
+ tevp = dm_evt_create_tevp(DM_EVENT_USER, buflen, (void **)&msgp);
+ if (tevp == NULL)
+ return -ENOMEM;
+
+ if (buflen && copy_from_user(msgp, bufp, buflen)) {
+ dm_evt_rele_tevp(tevp, 0);
+ return(-EFAULT);
+ }
+
+ /* Enqueue the request and wait for the reply. */
+
+ error = dm_enqueue_sendmsg_event(targetsid, tevp, sync);
+
+ /* Destroy the tevp and return the reply. (dm_pending is not
+ supported here.)
+ */
+
+ dm_evt_rele_tevp(tevp, error);
+
+ return(error > 0 ? -error : error); // XFS BUG #39, #40
+}
+
+
+/*
+ * Send a message of type "DM_EVENT_USER". Since no inode is involved, we
+ * don't have to worry about rights here.
+ */
+
+int
+dm_create_userevent(
+ dm_sessid_t sid,
+ size_t msglen,
+ void *msgdatap,
+ dm_token_t *tokenp) /* return token created */
+{
+ dm_tokevent_t *tevp;
+ dm_token_t token;
+ int error;
+ void *msgp;
+
+ if (msglen > DM_MAX_MSG_DATA)
+ return(-E2BIG);
+
+ tevp = dm_evt_create_tevp(DM_EVENT_USER, msglen, (void **)&msgp);
+ if (tevp == NULL)
+ return(-ENOMEM);
+
+ if (msglen && copy_from_user(msgp, msgdatap, msglen)) {
+ dm_evt_rele_tevp(tevp, 0);
+ return(-EFAULT);
+ }
+
+ /* Queue the message. If that didn't work, free the tevp structure. */
+
+ if ((error = dm_enqueue_user_event(sid, tevp, &token)) != 0)
+ dm_evt_rele_tevp(tevp, 0);
+
+ if (!error && copy_to_user(tokenp, &token, sizeof(token))) {
+ dm_dequeue_user_event(sid, tevp, token); // XFS BUG #11
+ dm_evt_rele_tevp(tevp, 0); // XFS BUG #11
+ error = -EFAULT;
+ }
+
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_handle.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_handle.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_handle.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_handle.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_create_by_handle(
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->create_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_mkdir_by_handle(
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->mkdir_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_symlink_by_handle(
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname,
+ char *path)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, dirhanp, dirhlen, token, DM_TDT_DIR,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->symlink_by_handle(tdp->td_ip, tdp->td_right,
+ hanp, hlen, cname, path);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_hole.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_hole.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_hole.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_hole.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_get_allocinfo_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t *offp,
+ u_int nelem,
+ dm_extent_t *extentp,
+ u_int *nelemp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_allocinfo_rvp(tdp->td_ip, tdp->td_right,
+ offp, nelem, extentp, nelemp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_probe_hole(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t *roffp,
+ dm_size_t *rlenp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->probe_hole(tdp->td_ip, tdp->td_right,
+ off, len, roffp, rlenp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_punch_hole(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->punch_hole(tdp->td_ip, tdp->td_right, off, len);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_io.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_io.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_io.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_io.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_read_invis_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->read_invis_rvp(tdp->td_ip, tdp->td_right,
+ off, len, bufp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_write_invis_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->write_invis_rvp(tdp->td_ip, tdp->td_right,
+ flags, off, len, bufp, rvp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_sync_by_handle (
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->sync_by_handle(tdp->td_ip, tdp->td_right);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_jfs.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_jfs.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_jfs.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_jfs.c 2004-05-28 10:36:13.000000000 -0500
@@ -0,0 +1,3841 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) International Business Machines Corp., 2000-2004
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/xattr.h>
+#include <linux/slab.h>
+#include <linux/namespace.h>
+#include <linux/buffer_head.h>
+#include <linux/mm.h>
+#include <linux/security.h>
+#ifdef CONFIG_COMPAT
+#include <linux/ioctl.h>
+#include <linux/ioctl32.h>
+#endif
+#include "dmapi_private.h"
+#include "jfs_debug.h"
+#include "jfs_incore.h"
+#include "jfs_filsys.h"
+#include "jfs_xattr.h"
+#include "jfs_txnmgr.h"
+#include "jfs_dmap.h"
+#include "jfs_dmapi.h"
+
+/* Here's what's left to be done:
+ * Figure out locktype in jfs_dm_send_data_event()
+ * Figure out how to get name of mounted dir for mount event without stupid
+ * mount option (mtpt=)
+ * Add jfs_dm_get_bulkattr (although unused by TSM)
+ * Add DM_EVENT_NOSPACE (VERY intrusive to JFS code)
+ * Finish up dt_change (may not cover all cases yet)
+ * ? Whazzup with the dump under sleep-inside-spinlock checking?
+ * ? Whazzup with unmount hang under spinlock debugging?
+ */
+
+/* XFS bugs fixed from original port
+ * #1 - dm_create_session truncated sessinfo bigger than DM_SESSION_INFO_LEN, should return E2BIG
+ * #2 - dm_create_session returned 0 with invalid sessinfo, should return EFAULT
+ * #3 - dm_path_to_handle returned ENOENT with invalid path, should return EFAULT
+ * #4 - dm_path_to_handle returned EINVAL with non-DMAPI path, should return ENXIO
+ * #5 - dm_path_to_fshandle returned ENOENT with invalid path, should return EFAULT
+ * #6 - dm_path_to_fshandle returned EINVAL with non-DMAPI path, should return ENXIO
+ * #7 - dm_get_allocinfo returned EOPNOTSUPP with non-regular file, should return EINVAL
+ * #8 - dm_probe_hole returned 0 with off+len past EOF, should return E2BIG
+ * #9 - dm_read_invis returned 0 with off>EOF, should return EINVAL
+ * #10 - dm_set_dmattr returned EINVAL with invalid path, should return EFAULT
+ * #11 - dm_create_userevent returned EINVAL with invalid tokenp but left tevp on queue
+ * #12 - dm_handle_to_path only worked if target object in current directory
+ * #13 - jfs_copyin_attrname did not null-terminate attrname
+ * #14 - dm_set_return_on_destroy returned non-zero copy_from_user return code, should return EFAULT
+ * #15 - dm_set_eventlist returned EBADF for global handle, should return EINVAL
+ * #16 - dm_get_eventlist returned 0 for dir handle, should return EINVAL
+ * #17 - dm_get_eventlist did not handle number of elements in/out properly
+ * #18 - dm_get_config_events did not handle number of elements in/out properly
+ * #19 - dm_get_config_events did not return DM_EVENT_USER even though it is supported
+ * #20 - dm_set_fileattr returned 0 for invalid mask, should return EINVAL
+ * #21 - dm_set_fileattr changed ctime when DM_AT_DTIME set and no DM attributes
+ * #22 - dm_get_fileattr returned 0 for invalid mask, should return EINVAL
+ * #23 - dm_get_fileattr returned ctime when DM_AT_DTIME set and no DM attributes
+ * #24 - dm_get_dirattrs returned 0 for invalid mask, should return EINVAL
+ * #25 - dm_get_dirattrs returned ctime when DM_AT_DTIME set and no DM attributes
+ * #26 - dm_get_dirattrs returned E2BIG for zero buflen, should return 1
+ * #27 - dm_fd_to_handle returned EBADF with non-DMAPI path, should return ENXIO
+ * #28 - dm_get_dirattrs returned handle with DM_AT_HANDLE clear in mask
+ * #29 - dm_request_right returned 0 for invalid right, should return EINVAL
+ * #30 - dm_request_right returned 0 for DM_RIGHT_EXCL and DM_RR_WAIT set, should return EACCES
+ * #31 - dm_upgrade_right returned EACCES for DM_RIGHT_NULL, should return EPERM
+ * #32 - dm_downgrade_right returned EACCES for DM_RIGHT_NULL, should return EPERM
+ * #33 - dm_send_mmap_event sent offset 0, length EOF instead of actual page-aligned region
+ * #34 - dm_move_event returned ESRCH for dm_find_msg failure, should return ENOENT
+ * #35 - dm_find_eventmsg returned ESRCH for dm_find_msg failure, should return EINVAL
+ * #36 - dm_move_event moved token to targetsid despite returning EFAULT for invalid rlenp
+ * #37 - dm_respond_event returned 0 with invalid buflen, should return E2BIG
+ * #38 - dm_pending returned 0 with invalid delay, should return EFAULT
+ * #39 - dm_send_msg returned dm_respond_event's reterror instead of -1 and errno = reterror
+ * #40 - dm_send_xxx_event returned |errno| instead of -1 and errno = reterror if error occurred
+ * #41 - dm_handle_cmp faulted instead of returning 0 for global handle
+ */
+
+#define MAXNAMLEN MAXNAMELEN
+
+#define NBBY 8 /* Number bits per byte */
+#define MODEMASK 07777 /* mode bits plus permission bits */
+
+#ifdef DEBUG
+#define STATIC static
+#else
+#define STATIC
+#endif
+
+#define BULKSTAT_RV_NOTHING 0
+#define BULKSTAT_RV_DIDONE 1
+#define BULKSTAT_RV_GIVEUP 2
+
+extern void jfs_truncate_nolock(struct inode *, loff_t);
+extern int xtPunchHole(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag);
+
+/* This structure must match the one described in ../xattr.c */
+struct ea_buffer {
+ int flag; /* Indicates what storage xattr points to */
+ int max_size; /* largest xattr that fits in current buffer */
+ dxd_t new_ea; /* dxd to replace ea when modifying xattr */
+ struct metapage *mp; /* metapage containing ea list */
+ struct jfs_ea_list *xattr; /* buffer containing ea list */
+};
+
+extern int jfs_ea_get(struct inode *, struct ea_buffer *, int);
+extern void jfs_ea_release(struct inode *, struct ea_buffer *);
+int jfs_dm_write_pers_data(struct jfs_inode_info *jfs_ip);
+
+/* Structure used to hold the on-disk version of a dm_attrname_t. All
+ on-disk attribute names start with the 9-byte string "user.dmi.".
+*/
+
+typedef struct {
+ char dan_chars[DMATTR_PREFIXLEN + DM_ATTR_NAME_SIZE + 1];
+} dm_dkattrname_t;
+
+/* In the on-disk inode, DMAPI attribute names consist of the user-provided
+ name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be
+ changed!
+*/
+
+STATIC const char dmattr_prefix[DMATTR_PREFIXLEN + 1] = DMATTR_PREFIXSTRING;
+
+STATIC dm_size_t dm_min_dio_xfer = 0; /* direct I/O disabled for now */
+
+
+/* See jfs_dm_get_dmattr() for a description of why this is needed. */
+
+#define DM_MAX_ATTR_BYTES_ON_DESTROY 256
+
+#define DM_STAT_SIZE(mask, namelen) \
+ (sizeof(dm_stat_t) + \
+ (((mask) & DM_AT_HANDLE) ? sizeof(jfs_handle_t) : 0) + (namelen))
+#define MAX_DIRENT_SIZE (sizeof(dirent_t) + JFS_NAME_MAX + 1)
+
+#define DM_STAT_ALIGN (sizeof(u64))
+
+/* DMAPI's E2BIG == EA's ERANGE */
+/* DMAPI's ENOENT == EA's ENODATA */
+#define DM_EA_XLATE_ERR(err) { if (err == -ERANGE) err = -E2BIG; else if (err == -ENODATA) err = -ENOENT; }
+
+#define REGION_MASK_TO_EVENT_MASK (DM_EVENT_READ - DM_REGION_READ + 1)
+
+#define MAX_MANAGED_REGIONS 0x7fffffff
+
+/*
+ * jfs_dm_send_data_event()
+ *
+ * Send data event to DMAPI. Drop IO lock (if specified) before
+ * the dm_send_data_event() call and reacquire it afterwards.
+ */
+int
+jfs_dm_send_data_event(
+ dm_eventtype_t event,
+ struct inode *ip,
+ dm_off_t offset,
+ size_t length,
+ int flags,
+ int/*vrwlock_t*/ *locktype)
+{
+ int error = 0;
+ uint16_t dmstate;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ ASSERT(IP_IS_JFS(ip));
+
+#ifndef DM_SUPPORT_ONE_MANAGED_REGION
+ if (jfs_ip->dmnumrgns > 0) {
+ /* There are managed regions, check each one to see if event
+ * matches, abort if not.
+ */
+ int i;
+ dm_off_t rgnbeg;
+ dm_off_t rgnend;
+ dm_off_t filbeg = offset;
+ dm_off_t filend = filbeg + length - 1;
+ unsigned int rgnflags;
+
+ /* Quick check to make sure at least one region wants event */
+ if (!DMEV_ISSET(event, jfs_ip->dmattrs.da_dmevmask))
+ return 0;
+
+ for (i = 0; i < jfs_ip->dmnumrgns; i++) {
+ rgnbeg = jfs_ip->dmrgns[i].rg_offset;
+ rgnflags = jfs_ip->dmrgns[i].rg_flags;
+
+ /* Region goes to end of file */
+ if (jfs_ip->dmrgns[i].rg_size == 0) {
+ if (rgnbeg < filend) {
+ if ((1 << event) & (rgnflags << REGION_MASK_TO_EVENT_MASK)) {
+ break;
+ }
+ }
+ } else {
+ rgnend = rgnbeg + jfs_ip->dmrgns[i].rg_size - 1;
+
+ /* Region intersects memory-mapped area */
+ if (((rgnbeg >= filbeg) && (rgnbeg <= filend)) ||
+ ((rgnend >= filbeg) && (rgnend <= filend)) ||
+ ((rgnbeg < filbeg) && (rgnend > filend))) {
+ if ((1 << event) & (rgnflags << REGION_MASK_TO_EVENT_MASK)) {
+ break;
+ }
+ }
+ }
+
+ /* SPECIAL CASE: truncation prior to DM_REGION_TRUNCATE
+ * region still generates event!
+ */
+ if ((rgnflags & DM_REGION_TRUNCATE) &&
+ (event == DM_EVENT_TRUNCATE) &&
+ (offset <= rgnbeg)) {
+ break;
+ }
+ }
+
+ if (i >= jfs_ip->dmnumrgns) {
+ return 0;
+ }
+ }
+#endif
+
+ do {
+ dmstate = jfs_ip->dmattrs.da_dmstate;
+// if (locktype)
+// xfs_rwunlock(bdp, *locktype);
+ error = dm_send_data_event(event, ip, DM_RIGHT_NULL,
+ offset, length, flags);
+// if (locktype)
+// xfs_rwlock(bdp, *locktype);
+ } while (!error && (jfs_ip->dmattrs.da_dmstate != dmstate));
+
+ return error;
+}
+
+
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+
+/* prohibited_mr_events
+ *
+ * Return event bits representing any events which cannot have managed
+ * region events set due to memory mapping of the file. If the maximum
+ * protection allowed in any pregion includes PROT_WRITE, and the region
+ * is shared and not text, then neither READ nor WRITE events can be set.
+ * Otherwise if the file is memory mapped, no READ event can be set.
+ *
+ */
+
+STATIC int
+prohibited_mr_events(
+ struct inode *ip)
+{
+ struct address_space *mapping;
+ struct vm_area_struct *vma = NULL;
+ struct prio_tree_iter iter;
+ int prohibited = 0;
+
+ mapping = ip->i_mapping;
+
+ spin_lock(&mapping->i_mmap_lock);
+ if (!prio_tree_empty(&mapping->i_mmap)) {
+ while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
+ &iter, 0, ip->i_size)) != NULL) {
+ /* SPECIAL CASE: all events prohibited if any mmap
+ * areas with VM_EXEC
+ */
+ if (vma->vm_flags & VM_EXEC) {
+ prohibited |= ((1 << DM_EVENT_READ) |
+ (1 << DM_EVENT_WRITE) |
+ (1 << DM_EVENT_TRUNCATE));
+ break;
+ }
+
+ if (vma->vm_flags & VM_READ) {
+ prohibited |= 1 << DM_EVENT_READ;
+ }
+ if (vma->vm_flags & VM_WRITE) {
+ prohibited |= 1 << DM_EVENT_WRITE;
+ }
+ }
+ }
+ spin_unlock(&mapping->i_mmap_lock);
+ return prohibited;
+}
+
+#else
+
+STATIC int
+prohibited_mr_events(
+ struct inode *ip,
+ dm_region_t *rgn)
+{
+ struct address_space *mapping;
+ struct vm_area_struct *vma = NULL;
+ struct prio_tree_iter iter;
+ int prohibited = 0;
+ dm_off_t rgnbeg = rgn->rg_offset;
+ dm_off_t rgnend = rgn->rg_size ? rgnbeg + rgn->rg_size
+ : ip->i_size;
+ dm_off_t mmbeg;
+ dm_off_t mmend;
+
+ mapping = ip->i_mapping;
+
+ spin_lock(&mapping->i_mmap_lock);
+ if (!prio_tree_empty(&mapping->i_mmap)) {
+ while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap,
+ &iter, 0, ip->i_size)) != NULL) {
+ /* SPECIAL CASE: all events prohibited if any mmap
+ * areas with VM_EXEC
+ */
+ if (vma->vm_flags & VM_EXEC) {
+ prohibited |= ((1 << DM_EVENT_READ) |
+ (1 << DM_EVENT_WRITE) |
+ (1 << DM_EVENT_TRUNCATE));
+ break;
+ }
+
+ mmbeg = vma->vm_pgoff * PAGE_SIZE;
+ mmend = mmbeg + (vma->vm_end - vma->vm_start);
+
+ /* Region intersects memory-mapped area */
+ if (((rgnbeg > mmbeg) && (rgnbeg < mmend)) ||
+ ((rgnend > mmbeg) && (rgnend < mmend)) ||
+ ((rgnbeg <= mmbeg) && (rgnend >= mmend))) {
+ if (vma->vm_flags & VM_READ) {
+ prohibited |= 1 << DM_EVENT_READ;
+ }
+ if (vma->vm_flags & VM_WRITE) {
+ prohibited |= 1 << DM_EVENT_WRITE;
+ }
+ }
+ }
+ }
+ spin_unlock(&mapping->i_mmap_lock);
+ return prohibited;
+}
+#endif
+
+
+#ifdef DEBUG_RIGHTS
+STATIC int
+jfs_ip_to_hexhandle(
+ struct inode *ip,
+ u_int type,
+ char *buffer)
+{
+ jfs_handle_t handle;
+ u_char *chp;
+ int length;
+ int error;
+ int i;
+
+ if ((error = dm_ip_to_handle(ip, &handle)))
+ return(error);
+
+ if (type == DM_FSYS_OBJ) { /* a filesystem handle */
+ length = FSHSIZE;
+ } else {
+ length = JFS_HSIZE(handle);
+ }
+ for (chp = (u_char *)&handle, i = 0; i < length; i++) {
+ *buffer++ = "0123456789abcdef"[chp[i] >> 4];
+ *buffer++ = "0123456789abcdef"[chp[i] & 0xf];
+ }
+ *buffer = '\0';
+ return(0);
+}
+#endif /* DEBUG_RIGHTS */
+
+/* Copy in and validate an attribute name from user space. It should be a
+ string of at least one and at most DM_ATTR_NAME_SIZE characters. Because
+ the dm_attrname_t structure doesn't provide room for the trailing NULL
+ byte, we just copy in one extra character and then zero it if it
+ happens to be non-NULL.
+*/
+
+STATIC int
+jfs_copyin_attrname(
+ dm_attrname_t *from, /* dm_attrname_t in user space */
+ dm_dkattrname_t *to) /* name buffer in kernel space */
+{
+ int error;
+ size_t len;
+
+ strcpy(to->dan_chars, dmattr_prefix);
+
+ len = strnlen_user((char*)from, DM_ATTR_NAME_SIZE);
+ if (len == 0) // XFS BUG #10
+ return(-EFAULT); // XFS BUG #10
+ error = copy_from_user(&to->dan_chars[DMATTR_PREFIXLEN], from, len);
+
+ if (!error && (to->dan_chars[DMATTR_PREFIXLEN] == '\0'))
+ error = -EINVAL;
+ if (error == -ENAMETOOLONG) {
+ to->dan_chars[sizeof(to->dan_chars) - 1] = '\0';
+ error = 0;
+ } else if (!error) { // XFS BUG #13
+ to->dan_chars[DMATTR_PREFIXLEN + len - 1] = '\0';// XFS_BUG #13
+ }
+ return(error);
+}
+
+STATIC int
+jfs_dmattr_exist(struct inode *ip)
+{
+ int exist = 0;
+ struct ea_buffer eabuf;
+
+ /* This will block all get/set EAs */
+ down_read(&JFS_IP(ip)->xattr_sem);
+
+ /* Check if EAs exist */
+ if (jfs_ea_get(ip, &eabuf, 0) > 0) {
+ struct jfs_ea_list *ealist = (struct jfs_ea_list *)eabuf.xattr;
+ struct jfs_ea *ea;
+
+ for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+ char *user_name;
+
+ /* Skip over all non-DMAPI attributes. If the
+ attribute name is too long, we assume it is
+ non-DMAPI even if it starts with the correct
+ prefix.
+ */
+ if (strncmp(ea->name, dmattr_prefix, DMATTR_PREFIXLEN))
+ continue;
+ user_name = &ea->name[DMATTR_PREFIXLEN];
+ if (strlen(user_name) > DM_ATTR_NAME_SIZE)
+ continue;
+
+ /* We have a valid DMAPI attribute. */
+ exist = 1;
+ break;
+ }
+
+ jfs_ea_release(ip, &eabuf);
+ }
+
+ /* This will unblock all get/set EAs */
+ up_read(&JFS_IP(ip)->xattr_sem);
+
+ return(exist);
+}
+
+/* This copies selected fields in an inode into a dm_stat structure.
+
+ The inode must be kept locked SHARED by the caller.
+*/
+
+STATIC void
+jfs_ip_to_stat(
+ struct inode *ip,
+ u_int mask,
+ dm_stat_t *buf)
+{
+ int filetype;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (mask & DM_AT_STAT) {
+ buf->dt_size = ip->i_size;
+ buf->dt_dev = old_encode_dev(ip->i_sb->s_dev);
+
+ buf->dt_ino = ip->i_ino;
+
+ /*
+ * Copy from in-core inode.
+ */
+ buf->dt_mode = (ip->i_mode & S_IFMT) | (ip->i_mode & MODEMASK);
+ buf->dt_uid = ip->i_uid;
+ buf->dt_gid = ip->i_gid;
+ buf->dt_nlink = ip->i_nlink;
+ /*
+ * Minor optimization, check the common cases first.
+ */
+ filetype = ip->i_mode & S_IFMT;
+ if ((filetype == S_IFREG) || (filetype == S_IFDIR)) {
+ buf->dt_rdev = 0;
+ } else if ((filetype == S_IFCHR) || (filetype == S_IFBLK) ) {
+ buf->dt_rdev = old_encode_dev(ip->i_rdev);
+ } else {
+ buf->dt_rdev = 0; /* not a b/c spec. */
+ }
+
+ buf->dt_atime = ip->i_atime.tv_sec;
+ buf->dt_mtime = ip->i_mtime.tv_sec;
+ buf->dt_ctime = ip->i_ctime.tv_sec;
+
+ /*
+ * We use the read buffer size as a recommended I/O
+ * size. This should always be larger than the
+ * write buffer size, so it should be OK.
+ * The value returned is in bytes.
+ */
+ buf->dt_blksize = ip->i_blksize;
+ buf->dt_blocks = ip->i_blocks;
+ }
+
+ if (mask & DM_AT_EMASK) {
+ buf->dt_emask = (dm_eventset_t)jfs_ip->dmattrs.da_dmevmask;
+ buf->dt_nevents = DM_EVENT_MAX;
+ }
+
+ if (mask & DM_AT_PATTR) {
+ buf->dt_pers = jfs_dmattr_exist(ip);
+ }
+
+ if (mask & DM_AT_CFLAG) {
+ buf->dt_change = ip->i_version;
+ }
+ if ((mask & DM_AT_DTIME) && jfs_dmattr_exist(ip)) // XFS BUG #25
+ buf->dt_dtime = ip->i_ctime.tv_sec;
+
+ if (mask & DM_AT_PMANR) {
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+ /* Set if one of READ, WRITE or TRUNCATE bits is set in emask */
+ buf->dt_pmanreg = ( DMEV_ISSET(DM_EVENT_READ, buf->dt_emask) ||
+ DMEV_ISSET(DM_EVENT_WRITE, buf->dt_emask) ||
+ DMEV_ISSET(DM_EVENT_TRUNCATE, buf->dt_emask) ) ? 1 : 0;
+#else
+ buf->dt_pmanreg = (jfs_ip->dmnumrgns ? 1 : 0);
+#endif
+ }
+}
+
+
+/*
+ * This is used by dm_get_bulkattr() as well as dm_get_dirattrs().
+ * Given a inumber, it igets the inode and fills the given buffer
+ * with the dm_stat structure for the file.
+ */
+/* ARGSUSED */
+STATIC int
+jfs_dm_bulkstat_one(
+ struct inode *mp, /* mount point for filesystem */
+ u_int mask, /* fields mask */
+ tid_t tid, /* transaction pointer */
+ dm_ino_t ino, /* inode number to get data for */
+ void *buffer, /* buffer to place output in */
+ int *res) /* bulkstat result code */
+{
+ struct inode *ip;
+ dm_stat_t *buf;
+ jfs_handle_t handle;
+ u_int statstruct_sz;
+
+ buf = (dm_stat_t *)buffer;
+
+ ip = iget(mp->i_sb, ino);
+ if (!ip) {
+ *res = BULKSTAT_RV_NOTHING;
+ return(-EIO);
+ }
+
+ if (is_bad_inode(ip)) {
+ iput(ip);
+ *res = BULKSTAT_RV_NOTHING;
+ return(-EIO);
+ }
+
+ if (ip->i_mode == 0) {
+ iput(ip);
+ *res = BULKSTAT_RV_NOTHING;
+ return(-ENOENT);
+ }
+
+ /*
+ * copy everything to the dm_stat buffer
+ */
+ jfs_ip_to_stat(ip, mask, buf);
+
+ /*
+ * Make the handle and the link to the next dm_stat buffer
+ */
+ // XFS BUG #28 BEGIN
+ statstruct_sz = DM_STAT_SIZE(mask, 0);
+ if (mask & DM_AT_HANDLE) {
+ dm_ip_to_handle(ip, &handle);
+ memcpy(buf+1, &handle, sizeof(handle)); /* handle follows stat struct */
+
+ buf->dt_handle.vd_offset = (int) sizeof(dm_stat_t);
+ buf->dt_handle.vd_length = (unsigned int) JFS_HSIZE(handle);
+
+ statstruct_sz = (statstruct_sz+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1);
+ } else {
+ memset((void *)&buf->dt_handle, 0, sizeof(dm_vardata_t));
+ }
+ // XFS BUG #28 END
+ buf->_link = statstruct_sz;
+
+ /*
+ * This is unused in bulkstat - so we zero it out.
+ */
+ memset((void *) &buf->dt_compname, 0, sizeof(dm_vardata_t));
+
+ iput(ip);
+
+ *res = BULKSTAT_RV_DIDONE;
+ return(0);
+}
+
+
+struct filldir_pos {
+ void *addr;
+ size_t len;
+};
+
+/* This structure must match the one described in ../xtree.c */
+struct jfs_dirent {
+ loff_t position;
+ int ino;
+ u16 name_len;
+ char name[0];
+};
+
+
+STATIC int
+jfs_dm_filldir(
+ void *bufp,
+ const char *name,
+ int namelen,
+ loff_t off,
+ ino_t ino,
+ unsigned type)
+{
+ struct filldir_pos *fdposp = bufp;
+ struct jfs_dirent *jdp;
+ int entlen = sizeof(struct jfs_dirent) + namelen + 1;
+
+ /* Not enough space left for this entry */
+ if (fdposp->len < entlen)
+ return 1;
+
+ jdp = (struct jfs_dirent *)fdposp->addr;
+ jdp->position = off;
+ jdp->ino = ino;
+ jdp->name_len = namelen;
+ strcpy(jdp->name, name);
+
+ fdposp->len -= entlen;
+ fdposp->addr = (char *)fdposp->addr + entlen;
+
+ return 0;
+}
+
+
+STATIC int
+jfs_get_dirents(
+ struct inode *dirp,
+ void *bufp,
+ size_t bufsz,
+ dm_off_t *locp,
+ size_t *nreadp)
+{
+ int rval;
+ struct file tempfile;
+ struct dentry tempdentry;
+ struct filldir_pos fdpos;
+
+ /* Simulate the necessary info for jfs_readdir */
+ tempdentry.d_inode = dirp;
+ tempfile.f_dentry = &tempdentry;
+ tempfile.f_pos = *locp;
+
+ fdpos.addr = bufp;
+ fdpos.len = bufsz;
+
+ *nreadp = 0;
+
+ rval = jfs_readdir(&tempfile, &fdpos, jfs_dm_filldir);
+ if (! rval) {
+ /*
+ * number of bytes read into the dirent buffer
+ */
+ *nreadp = bufsz - fdpos.len;
+
+ /*
+ * Nothing read and loc passed in not end of dir, must be
+ * invalid loc; otherwise save new loc
+ */
+ if ((*nreadp == 0) && (*locp != DIREND))
+ rval = -EINVAL;
+ else
+ *locp = tempfile.f_pos;
+ }
+ return(rval);
+}
+
+
+STATIC int
+jfs_dirents_to_stats(
+ struct inode *ip,
+ u_int mask, /* fields mask */
+ struct jfs_dirent *direntp, /* array of dirent structs */
+ void *bufp, /* buffer to fill */
+ size_t *direntbufsz, /* sz of filled part of dirent buf */
+ size_t *spaceleftp, /* IO - space left in user buffer */
+ size_t *nwrittenp, /* number of bytes written to 'bufp' */
+ dm_off_t *locp,
+ size_t *offlastlinkp) /* offset of last stat's _link */
+{
+ struct jfs_dirent *p;
+ dm_stat_t *statp;
+ size_t reclen;
+ size_t namelen;
+ size_t spaceleft;
+ dm_off_t prevoff, offlastlink;
+ int res;
+
+ spaceleft = *spaceleftp;
+ *nwrittenp = 0;
+
+ /*
+ * Make sure the first entry will fit in buffer before doing any
+ * processing
+ */
+ if (spaceleft <= DM_STAT_SIZE(mask, direntp->name_len + 1)) {
+ return 0;
+ }
+
+ *spaceleftp = 0;
+ prevoff = 0;
+ offlastlink = 0;
+
+ /*
+ * Go thru all the dirent records, making dm_stat structures from
+ * them, one by one, until dirent buffer is empty or stat buffer
+ * is full.
+ */
+ p = direntp;
+ statp = (dm_stat_t *) bufp;
+ for (reclen = (size_t) sizeof(struct jfs_dirent) + p->name_len + 1;
+ *direntbufsz > 0;
+ *direntbufsz -= reclen,
+ p = (struct jfs_dirent *) ((char *) p + reclen),
+ reclen = (size_t) sizeof(struct jfs_dirent) + p->name_len + 1) {
+
+ namelen = p->name_len + 1;
+
+ /*
+ * Make sure we have enough space.
+ */
+ if (spaceleft <= DM_STAT_SIZE(mask, namelen)) {
+ /*
+ * d_off field in dirent_t points at the next entry.
+ */
+ *locp = p->position;
+ *spaceleftp = 0;
+
+ /*
+ * The last link is NULL.
+ */
+ statp->_link = 0;
+ return(0);
+ }
+
+ statp = (dm_stat_t *) bufp;
+
+ (void)jfs_dm_bulkstat_one(ip, mask, 0, p->ino, statp, &res);
+ if (res != BULKSTAT_RV_DIDONE)
+ continue;
+
+ /*
+ * On return from jfs_dm_bulkstat_one(), stap->_link points
+ * at the end of the handle in the stat structure.
+ */
+ statp->dt_compname.vd_offset = statp->_link;
+ statp->dt_compname.vd_length = namelen;
+ /*
+ * Directory entry name is guaranteed to be
+ * null terminated; the copy gets the '\0' too.
+ */
+ memcpy((char *) statp + statp->_link, p->name, namelen);
+
+ /* Word-align the record */
+ statp->_link = (statp->_link + namelen + (DM_STAT_ALIGN - 1))
+ & ~(DM_STAT_ALIGN - 1);
+
+ spaceleft -= statp->_link;
+ *nwrittenp += statp->_link;
+
+ offlastlink += prevoff;
+ prevoff = statp->_link;
+
+ bufp = (char *)statp + statp->_link;
+ *locp = p->position;
+
+ /*
+ * If we just happen to use the very last byte, bump by one
+ * to let logic at beginning of loop above handle it
+ */
+ if (!spaceleft) {
+ spaceleft++;
+ }
+ }
+ /* statp->_link = 0; this is handled in get_dirattrs if no more left */
+ *offlastlinkp = (size_t)offlastlink;
+
+ /*
+ * If there's space left to put in more, caller should know that..
+ */
+ *spaceleftp = spaceleft;
+
+ return(0);
+}
+
+
+// XFS BUG #17 START
+STATIC int
+jfs_dm_get_high_set_event(dm_eventset_t *eventsetp)
+{
+ int i;
+
+ for (i = DM_EVENT_MAX-1; i >= 0 && !DMEV_ISSET(i, *eventsetp); i--);
+ return i + 1;
+}
+// XFS BUG #17 END
+
+
+/* jfs_dm_f_get_eventlist - return the dm_eventset_t mask for inode ip. */
+
+STATIC int
+jfs_dm_f_get_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int *nelemp) /* in kernel space! */
+{
+ int highSetEvent; // XFS BUG #17
+ dm_eventset_t eventset;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ /* Note that we MUST return a regular file's managed region bits as
+ part of the mask because dm_get_eventlist is supposed to return the
+ union of all managed region flags in those bits. Since we only
+ support one region, we can just return the bits as they are. For
+ all other object types, the bits will already be zero. Handy, huh?
+ */
+
+ eventset = jfs_ip->dmattrs.da_dmevmask;
+
+ // XFS BUG #17 START
+ highSetEvent = jfs_dm_get_high_set_event(&eventset);
+ if (highSetEvent > nelem) {
+ *nelemp = highSetEvent;
+ return(-E2BIG);
+ }
+ // XFS BUG #17 END
+
+ /* Now copy the event mask and event count back to the caller. We
+ return the lesser of nelem and DM_EVENT_MAX.
+ */
+
+ if (nelem > DM_EVENT_MAX)
+ nelem = DM_EVENT_MAX;
+ eventset &= (1 << nelem) - 1;
+
+ *eventsetp = eventset;
+ *nelemp = highSetEvent; // XFS BUG #17
+ return(0);
+}
+
+
+/* jfs_dm_f_set_eventlist - update the dm_eventset_t mask in the inode ip. Only the
+ bits from zero to maxevent-1 are being replaced; higher bits are preserved.
+*/
+
+STATIC int
+jfs_dm_f_set_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int maxevent)
+{
+ dm_eventset_t eventset;
+ dm_eventset_t max_mask;
+ dm_eventset_t valid_events;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ eventset = *eventsetp;
+ if (maxevent >= sizeof(jfs_ip->dmattrs.da_dmevmask) * NBBY)
+ return(-EINVAL);
+ max_mask = (1 << maxevent) - 1;
+
+ if (S_ISDIR(ip->i_mode)) {
+ valid_events = DM_JFS_VALID_DIRECTORY_EVENTS;
+ } else { /* file or symlink */
+ valid_events = DM_JFS_VALID_FILE_EVENTS;
+ }
+ if ((eventset & max_mask) & ~valid_events)
+ return(-EINVAL);
+
+ /* Adjust the event mask so that the managed region bits will not
+ be altered.
+ */
+
+ max_mask &= ~(1 <<DM_EVENT_READ); /* preserve current MR bits */
+ max_mask &= ~(1 <<DM_EVENT_WRITE);
+ max_mask &= ~(1 <<DM_EVENT_TRUNCATE);
+
+ jfs_ip->dmattrs.da_dmevmask = (eventset & max_mask) |
+ (jfs_ip->dmattrs.da_dmevmask & ~max_mask);
+
+ mark_inode_dirty(ip);
+
+ return(0);
+}
+
+
+/* jfs_dm_fs_get_eventlist - return the dm_eventset_t mask for filesystem ip. */
+
+STATIC int
+jfs_dm_fs_get_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int *nelemp) /* in kernel space! */
+{
+ int highSetEvent; // XFS BUG #17
+ dm_eventset_t eventset;
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ eventset = sbi->dm_evmask;
+
+ // XFS BUG #17 START
+ highSetEvent = jfs_dm_get_high_set_event(&eventset);
+ if (highSetEvent > nelem) {
+ *nelemp = highSetEvent;
+ return(-E2BIG);
+ }
+ // XFS BUG #17 END
+
+ /* Now copy the event mask and event count back to the caller. We
+ return the lesser of nelem and DM_EVENT_MAX.
+ */
+
+ if (nelem > DM_EVENT_MAX)
+ nelem = DM_EVENT_MAX;
+ eventset &= (1 << nelem) - 1;
+
+ *eventsetp = eventset;
+ *nelemp = highSetEvent; // XFS BUG #17
+ return(0);
+}
+
+
+/* jfs_dm_fs_set_eventlist - update the dm_eventset_t mask in the mount structure for
+ filesystem ip. Only the bits from zero to maxevent-1 are being replaced;
+ higher bits are preserved.
+*/
+
+STATIC int
+jfs_dm_fs_set_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int maxevent)
+{
+ dm_eventset_t eventset;
+ dm_eventset_t max_mask;
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ eventset = *eventsetp;
+
+ if (maxevent >= sizeof(sbi->dm_evmask) * NBBY)
+ return(-EINVAL);
+ max_mask = (1 << maxevent) - 1;
+
+ if ((eventset & max_mask) & ~DM_JFS_VALID_FS_EVENTS)
+ return(-EINVAL);
+
+ sbi->dm_evmask = (eventset & max_mask) | (sbi->dm_evmask & ~max_mask);
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_direct_ok(
+ struct inode *ip,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp)
+{
+ /* Realtime files can ONLY do direct I/O. */
+
+ /* If direct I/O is disabled, or if the request is too small, use
+ buffered I/O.
+ */
+
+ if (!dm_min_dio_xfer || len < dm_min_dio_xfer)
+ return(0);
+
+#if 0 /* SGI's exclusion, not IBM's */
+ /* If the request is not well-formed or is too large, use
+ buffered I/O.
+ */
+
+ if ((__psint_t)bufp & scache_linemask) /* if buffer not aligned */
+ return(0);
+ if (off & mp->m_blockmask) /* if file offset not aligned */
+ return(0);
+ if (len & mp->m_blockmask) /* if xfer length not aligned */
+ return(0);
+ if (len > ctooff(v.v_maxdmasz - 1)) /* if transfer too large */
+ return(0);
+
+ /* A valid direct I/O candidate. */
+
+ return(1);
+#else
+ return(0);
+#endif
+}
+
+
+/* We need to be able to select various combinations of FINVIS, O_NONBLOCK,
+ O_DIRECT, and O_SYNC, yet we don't have a file descriptor and we don't have
+ the file's pathname. All we have is a handle.
+*/
+
+STATIC int
+jfs_dm_rdwr(
+ struct inode *ip,
+ uint fflag,
+ mode_t fmode,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp)
+{
+ int error;
+ int oflags;
+ ssize_t xfer;
+ struct file file;
+ struct dentry *dentry;
+ boolean_t bCloseFile = FALSE;
+ struct jfs_inode_info *ji = JFS_IP(ip);
+
+ if (off < 0 || !S_ISREG(ip->i_mode))
+ return(-EINVAL);
+
+ if (fmode & FMODE_READ) {
+ oflags = O_RDONLY;
+ } else {
+ oflags = O_WRONLY;
+ }
+
+ /* Build file descriptor flags and I/O flags. O_NONBLOCK is needed so
+ that we don't block on mandatory file locks. FINVIS is needed so
+ that we don't change any file timestamps.
+ */
+
+ fmode |= FINVIS;
+ oflags |= O_NONBLOCK;
+ if (jfs_dm_direct_ok(ip, off, len, bufp))
+ oflags |= O_DIRECT;
+
+ if (fflag & O_SYNC)
+ oflags |= O_SYNC;
+
+ if( ip->i_fop == NULL ){
+ /* no iput; caller did get, and will do put */
+ return(-EINVAL);
+ }
+ igrab(ip);
+
+ /* Find a dentry. Get a well-connected one, if possible. */
+ dentry = d_alloc_root(ip);
+ if (dentry == NULL) {
+ iput(ip);
+ return -ENOMEM;
+ }
+
+ if( ip->i_ino != dentry->d_inode->i_ino ){
+ dput(dentry);
+ return -EINVAL;
+ }
+
+ if (fmode & FMODE_WRITE) {
+ error = get_write_access(ip);
+ if (error) {
+ dput(dentry);
+ return(error);
+ }
+ }
+
+ error = open_private_file( &file, dentry, oflags );
+ if(error){
+ if (error == -EFBIG) {
+ /* try again */
+ oflags |= O_LARGEFILE;
+ file.f_flags = oflags;
+ error = file.f_op->open( dentry->d_inode, &file );
+ }
+ if (error) {
+ if (fmode & FMODE_WRITE)
+ put_write_access(ip);
+ dput(dentry);
+ return(-EINVAL);
+ }
+ } else {
+ bCloseFile = TRUE;
+ }
+
+ /* file.f_flags = oflags; handled by open_private_file now */
+
+ if (fmode & FMODE_READ) {
+ /* generic_file_read updates the atime but we need to
+ * undo that because this I/O was supposed to be invisible.
+ */
+ struct timespec save_atime = ip->i_atime;
+ xfer = generic_file_read(&file, bufp, len, &off);
+ ip->i_atime = save_atime;
+ mark_inode_dirty(ip);
+ } else {
+ /* generic_file_write updates the mtime/ctime but we need
+ * to undo that because this I/O was supposed to be
+ * invisible.
+ */
+ struct timespec save_mtime = ip->i_mtime;
+ struct timespec save_ctime = ip->i_ctime;
+ xfer = generic_file_write(&file, bufp, len, &off);
+ ip->i_mtime = save_mtime;
+ ip->i_ctime = save_ctime;
+ mark_inode_dirty(ip);
+ }
+ if (xfer >= 0) {
+ *rvp = xfer;
+ error = 0;
+ } else {
+ error = (int)xfer;
+ }
+
+ if (file.f_mode & FMODE_WRITE)
+ put_write_access(ip);
+ if (bCloseFile) {
+ /* Calling close_private_file results in calling jfs_release,
+ * which results in a DM_EVENT_CLOSE event; so do necessary
+ * jfs_release stuff here and eliminate release
+ */
+ if (ji->active_ag != -1) {
+ struct bmap *bmap = JFS_SBI(ip->i_sb)->bmap;
+ atomic_dec(&bmap->db_active[ji->active_ag]);
+ ji->active_ag = -1;
+ }
+ file.f_op->release = NULL;
+ close_private_file(&file);
+ }
+ dput(dentry);
+ return error;
+}
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_clear_inherit(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep)
+{
+ return(-ENOSYS);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_create_by_handle(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname)
+{
+ return(-ENOSYS);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_downgrade_right(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type) /* DM_FSYS_OBJ or zero */
+{
+#ifdef DEBUG_RIGHTS
+ char buffer[sizeof(jfs_handle_t) * 2 + 1];
+
+ if (!jfs_ip_to_hexhandle(ip, type, buffer)) {
+ printf("dm_downgrade_right: old %d new %d type %d handle %s\n",
+ right, DM_RIGHT_SHARED, type, buffer);
+ } else {
+ printf("dm_downgrade_right: old %d new %d type %d handle "
+ "<INVALID>\n", right, DM_RIGHT_SHARED, type);
+ }
+#endif /* DEBUG_RIGHTS */
+ return(0);
+}
+
+#define NUM_XADS_PER_QUERY 10
+
+/* Note: jfs_dm_get_allocinfo() makes no attempt to coalesce two adjacent
+ extents when both are of type DM_EXTENT_RES; this is left to the caller.
+ JFS guarantees that there will never be two adjacent DM_EXTENT_HOLE extents.
+*/
+
+STATIC int
+jfs_dm_get_allocinfo_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t *offp,
+ u_int nelem,
+ dm_extent_t *extentp,
+ u_int *nelemp,
+ int *rvp)
+{
+ dm_off_t fsb_offset;
+ dm_size_t fsb_length;
+ int elem;
+
+ dm_extent_t extent;
+ struct lxdlist lxdlist;
+ lxd_t lxd;
+ struct xadlist xadlist;
+ xad_t *pxad_array = NULL;
+ int alloc_size = 0;
+ int i, error;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if ((ip->i_mode & S_IFMT) != S_IFREG)
+ return(-EINVAL);
+
+ if (nelem == 0) {
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ return(-EINVAL);
+ }
+
+ if (copy_from_user( &fsb_offset, offp, sizeof(fsb_offset)))
+ return(-EFAULT);
+
+ /* Real extent can't start on anything other than blocksize boundary */
+ if (fsb_offset & (ip->i_blksize-1))
+ return(-EINVAL);
+
+ fsb_length = ip->i_size - fsb_offset;
+
+ if (fsb_length <= 0) {
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ *rvp = 0;
+ return(0);
+ }
+
+ elem = 0;
+
+ /* Obtain single array of xads that covers entire hole */
+ do {
+ /* Free prior xad array if one exists */
+ if (pxad_array != NULL) {
+ kmem_free(pxad_array, alloc_size);
+ }
+
+ elem += 16; /* 256-byte chunk */
+ alloc_size = elem * sizeof(xad_t);
+ pxad_array = kmem_alloc(alloc_size, KM_SLEEP);
+
+ if (pxad_array == NULL)
+ return -ENOMEM;
+
+ lxdlist.maxnlxd = lxdlist.nlxd = 1;
+ LXDlength(&lxd, (fsb_length + (ip->i_blksize-1)) >> ip->i_blkbits);
+ LXDoffset(&lxd, fsb_offset >> ip->i_blkbits);
+ lxdlist.lxd = &lxd;
+
+ xadlist.maxnxad = xadlist.nxad = elem;
+ xadlist.xad = pxad_array;
+
+ IREAD_LOCK(ip);
+ error = xtLookupList(ip, &lxdlist, &xadlist, 0);
+ IREAD_UNLOCK(ip);
+
+ if (error) {
+ if (pxad_array != NULL)
+ kmem_free(pxad_array, alloc_size);
+ return error;
+ }
+ } while ((xadlist.nxad == elem) &&
+ ((offsetXAD(&xadlist.xad[elem-1]) + lengthXAD(&xadlist.xad[elem-1])) < fsb_offset + fsb_length));
+
+ elem = 0;
+
+ if (xadlist.nxad > 0) {
+ extent.ex_type = DM_EXTENT_INVALID;
+ extent.ex_length = 0;
+ extent.ex_offset = fsb_offset;
+
+ for (i = 0; (i < xadlist.nxad) && (elem < nelem) && (fsb_length > 0); i++) {
+ dm_off_t xad_off;
+ dm_size_t xad_len;
+ xad_t *pxad;
+
+ pxad = &xadlist.xad[i];
+ xad_off = offsetXAD(pxad) << ip->i_blkbits;
+ xad_len = lengthXAD(pxad) << ip->i_blkbits;
+ if (xad_off == extent.ex_offset + extent.ex_length) {
+ /* found contiguous extent */
+ if (extent.ex_type == DM_EXTENT_INVALID) {
+ extent.ex_type = pxad->flag & XAD_NOTRECORDED ? DM_EXTENT_HOLE : DM_EXTENT_RES;
+ } else if (!(((pxad->flag & XAD_NOTRECORDED) &&
+ (extent.ex_type == DM_EXTENT_HOLE)) ||
+ ((!(pxad->flag & XAD_NOTRECORDED)) &&
+ (extent.ex_type == DM_EXTENT_RES)))) {
+ /* done with current extent */
+ if (copy_to_user( extentp, &extent, sizeof(extent)))
+ return(-EFAULT);
+
+ fsb_offset += extent.ex_length;
+ fsb_length -= extent.ex_length;
+ elem++;
+ extentp++;
+
+ /* initialize new extent */
+ extent.ex_type = pxad->flag & XAD_NOTRECORDED ? DM_EXTENT_HOLE : DM_EXTENT_RES;
+ extent.ex_offset += extent.ex_length;
+ extent.ex_length = 0;
+ }
+
+ extent.ex_length += xad_len;
+ } else {
+ /* found non-contiguous extent (hole) */
+ dm_size_t holelen = xad_off - (extent.ex_offset + extent.ex_length);
+
+ if (extent.ex_type == DM_EXTENT_RES) {
+ /* done with current extent */
+ if (copy_to_user(extentp, &extent, sizeof(extent)))
+ return(-EFAULT);
+
+ fsb_offset += extent.ex_length;
+ fsb_length -= extent.ex_length;
+ elem++;
+ extentp++;
+
+ /* initialize hole extent */
+ extent.ex_type = DM_EXTENT_HOLE;
+ extent.ex_offset += extent.ex_length;
+ extent.ex_length = holelen;
+ } else {
+ extent.ex_type = DM_EXTENT_HOLE;
+ extent.ex_length += holelen;
+ }
+
+ if (pxad->flag & XAD_NOTRECORDED) {
+ /* add to hole */
+ extent.ex_length += xad_len;
+ } else {
+ /* done with hole */
+ if (elem >= nelem)
+ break;
+
+ if (copy_to_user(extentp, &extent, sizeof(extent)))
+ return(-EFAULT);
+
+ fsb_offset += extent.ex_length;
+ fsb_length -= extent.ex_length;
+ elem++;
+ extentp++;
+
+ /* initialize resident extent */
+ extent.ex_type = DM_EXTENT_RES;
+ extent.ex_offset = xad_off;
+ extent.ex_length = xad_len;
+ }
+ }
+
+ /* current extent has gone past end of file */
+ if (extent.ex_offset + extent.ex_length > fsb_offset + fsb_length) {
+ dm_size_t len = (extent.ex_offset + extent.ex_length) - (fsb_offset + fsb_length);
+ extent.ex_length -= len;
+ break;
+ }
+ }
+ } else if (fsb_offset < ip->i_size) {
+ extent.ex_type = DM_EXTENT_HOLE;
+ extent.ex_length = fsb_length;
+ extent.ex_offset = fsb_offset;
+ }
+
+ /* put current extent if room is available */
+ if (elem < nelem && fsb_length > 0) {
+ if (fsb_offset + extent.ex_length < ip->i_size) {
+ /* hole at end of file */
+ if (extent.ex_type == DM_EXTENT_HOLE) {
+ extent.ex_length += (ip->i_size - fsb_offset - extent.ex_length);
+ } else {
+ if (copy_to_user( extentp, &extent, sizeof(extent)))
+ return(-EFAULT);
+
+ fsb_offset += extent.ex_length;
+ fsb_length -= extent.ex_length;
+ elem++;
+ extentp++;
+
+ /* initialize hole extent */
+ extent.ex_type = DM_EXTENT_HOLE;
+ extent.ex_offset = fsb_offset;
+ extent.ex_length = fsb_length;
+ }
+ }
+
+ if (elem < nelem && fsb_length > 0) {
+ if (copy_to_user( extentp, &extent, sizeof(extent)))
+ return(-EFAULT);
+
+ fsb_offset += extent.ex_length;
+ fsb_length -= extent.ex_length;
+ elem++;
+ extentp++;
+ }
+ }
+
+ if (pxad_array != NULL)
+ kmem_free(pxad_array, alloc_size);
+
+ if (copy_to_user( offp, &fsb_offset, sizeof(fsb_offset)))
+ return(-EFAULT);
+
+ if (copy_to_user( nelemp, &elem, sizeof(elem)))
+ return(-EFAULT);
+
+ *rvp = (fsb_length == 0 ? 0 : 1);
+
+ return(0);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_bulkall_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp, /* address of buffer in user space */
+ size_t *rlenp, /* user space address */
+ int *rvalp)
+{
+ return(-ENOSYS);
+}
+
+
+/*
+ * TBD, although unused by TSM
+ */
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_bulkattr_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvalp)
+{
+#if 0
+ int error, done;
+ int nelems;
+ u_int statstruct_sz;
+ dm_attrloc_t loc;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if (copy_from_user( &loc, locp, sizeof(loc)))
+ return(-EFAULT);
+
+ /* Because we will write directly to the user's buffer, make sure that
+ the buffer is properly aligned.
+ */
+
+ if (((__psint_t)bufp & (DM_STAT_ALIGN - 1)) != 0)
+ return(-EFAULT);
+
+ /* size of the handle is constant for this function */
+
+ statstruct_sz = DM_STAT_SIZE(mask, 0);
+ statstruct_sz = (statstruct_sz+(DM_STAT_ALIGN-1)) & ~(DM_STAT_ALIGN-1);
+
+ nelems = buflen / statstruct_sz;
+ if (nelems < 1) {
+ if (put_user( statstruct_sz, rlenp ))
+ return(-EFAULT);
+ return(-E2BIG);
+ }
+
+ /*
+ * fill the buffer with dm_stat_t's
+ */
+ // TBD
+
+ if (error)
+ return(error);
+ if (!done) {
+ *rvalp = 1;
+ } else {
+ *rvalp = 0;
+ }
+
+ if (put_user( statstruct_sz * nelems, rlenp ))
+ return(-EFAULT);
+
+ if (copy_to_user( locp, &loc, sizeof(loc)))
+ return(-EFAULT);
+
+ /*
+ * If we didn't do any, we must not have any more to do.
+ */
+ if (nelems < 1)
+ return(0);
+ /* set _link in the last struct to zero */
+ if (put_user( 0,
+ &((dm_stat_t *)((char *)bufp + statstruct_sz*(nelems-1)))->_link)
+ )
+ return(-EFAULT);
+#endif
+ return(-ENOSYS);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_config(
+ struct inode *ip,
+ dm_right_t right,
+ dm_config_t flagname,
+ dm_size_t *retvalp)
+{
+ dm_size_t retval;
+
+ switch (flagname) {
+ case DM_CONFIG_DTIME_OVERLOAD:
+ case DM_CONFIG_PERS_ATTRIBUTES:
+ case DM_CONFIG_PERS_MANAGED_REGIONS:
+ case DM_CONFIG_PUNCH_HOLE:
+ case DM_CONFIG_WILL_RETRY:
+ retval = DM_TRUE;
+ break;
+
+ case DM_CONFIG_CREATE_BY_HANDLE: /* these will never be done */
+ case DM_CONFIG_LOCK_UPGRADE:
+ case DM_CONFIG_PERS_EVENTS:
+ case DM_CONFIG_PERS_INHERIT_ATTRIBS:
+ retval = DM_FALSE;
+ break;
+
+ case DM_CONFIG_BULKALL: /* these will be done someday */
+ retval = DM_FALSE;
+ break;
+ case DM_CONFIG_MAX_ATTR_ON_DESTROY:
+ retval = DM_MAX_ATTR_BYTES_ON_DESTROY;
+ break;
+
+ case DM_CONFIG_MAX_ATTRIBUTE_SIZE:
+ retval = MAXEASIZE;
+ break;
+
+ case DM_CONFIG_MAX_HANDLE_SIZE:
+ retval = DM_MAX_HANDLE_SIZE;
+ break;
+
+ case DM_CONFIG_MAX_MANAGED_REGIONS:
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+ retval = 1;
+#else
+ retval = MAX_MANAGED_REGIONS; /* actually it's unlimited */
+#endif
+ break;
+
+ case DM_CONFIG_TOTAL_ATTRIBUTE_SPACE:
+ retval = 0x7fffffff; /* actually it's unlimited */
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ /* Copy the results back to the user. */
+
+ if (copy_to_user( retvalp, &retval, sizeof(retval)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_config_events(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp)
+{
+ int highSetEvent; // XFS BUG #18
+ dm_eventset_t eventset;
+
+ //if (nelem == 0) // XFS BUG #18
+ // return(-EINVAL); // XFS BUG #18
+
+ eventset = DM_JFS_SUPPORTED_EVENTS;
+
+ // XFS BUG #18 START
+ highSetEvent = jfs_dm_get_high_set_event(&eventset);
+ if (highSetEvent > nelem) {
+ *nelemp = highSetEvent;
+ return(-E2BIG);
+ }
+ // XFS BUG #18 END
+
+ /* Now copy the event mask and event count back to the caller. We
+ return the lesser of nelem and DM_EVENT_MAX.
+ */
+
+ if (nelem > DM_EVENT_MAX)
+ nelem = DM_EVENT_MAX;
+ eventset &= (1 << nelem) - 1;
+
+ if (copy_to_user( eventsetp, &eventset, sizeof(eventset)))
+ return(-EFAULT);
+
+ if (put_user(highSetEvent, nelemp)) // XFS BUG #18
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_destroy_dmattr(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ char **valuepp,
+ int *vlenp)
+{
+ char buffer[128];
+ dm_dkattrname_t dkattrname;
+ int alloc_size;
+ int value_len;
+ char *value;
+ int bytes_read;
+
+ *vlenp = -1; /* assume failure by default */
+
+ if (attrnamep->an_chars[0] == '\0')
+ return(-EINVAL);
+
+ /* Build the on-disk version of the attribute name. */
+
+ strcpy(dkattrname.dan_chars, dmattr_prefix);
+ strncpy(&dkattrname.dan_chars[DMATTR_PREFIXLEN],
+ (char *)attrnamep->an_chars, DM_ATTR_NAME_SIZE + 1);
+ dkattrname.dan_chars[sizeof(dkattrname.dan_chars) - 1] = '\0';
+
+ alloc_size = 0;
+ value_len = sizeof(buffer); /* in/out parameter */
+ value = buffer;
+
+ bytes_read = __jfs_getxattr(ip, dkattrname.dan_chars, value, value_len);
+
+ if (bytes_read == -ERANGE) {
+ alloc_size = MAXEASIZE;
+ value = kmalloc(alloc_size, SLAB_KERNEL);
+ if (value == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+
+ bytes_read = __jfs_getxattr(ip, dkattrname.dan_chars, value,
+ alloc_size);
+ DM_EA_XLATE_ERR(bytes_read);
+ }
+ if (bytes_read < 0) {
+ if (alloc_size)
+ kfree(value);
+ DM_EA_XLATE_ERR(bytes_read);
+ return(bytes_read);
+ } else
+ value_len = bytes_read;
+
+ /* The attribute exists and has a value. Note that a value_len of
+ zero is valid!
+ */
+
+ if (value_len == 0) {
+ *vlenp = 0;
+ return(0);
+ }
+
+ if (!alloc_size) {
+ value = kmalloc(value_len, SLAB_KERNEL);
+ if (value == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ memcpy(value, buffer, value_len);
+ } else if (value_len > DM_MAX_ATTR_BYTES_ON_DESTROY) {
+ int value_len2 = DM_MAX_ATTR_BYTES_ON_DESTROY;
+ char *value2;
+
+ value2 = kmalloc(value_len2, SLAB_KERNEL);
+ if (value2 == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ kfree(value);
+ return(-ENOMEM);
+ }
+ memcpy(value2, value, value_len2);
+ kfree(value);
+ value = value2;
+ value_len = value_len2;
+ }
+ *vlenp = value_len;
+ *valuepp = value;
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_get_dirattrs_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp, /* address of buffer in user space */
+ size_t *rlenp, /* user space address */
+ int *rvp)
+{
+ size_t direntbufsz, statbufsz;
+ size_t nread, spaceleft, nwritten=0, totnwritten=0;
+ void *direntp, *statbufp;
+ int error;
+ dm_attrloc_t loc, dirloc;
+ size_t offlastlink;
+ int *lastlink = NULL;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if (mask & ~(DM_AT_HANDLE|DM_AT_EMASK|DM_AT_PMANR|DM_AT_PATTR|DM_AT_DTIME|DM_AT_CFLAG|DM_AT_STAT)) // XFS BUG #24
+ return(-EINVAL); // XFS BUG #24
+
+ if (copy_from_user( &loc, locp, sizeof(loc)))
+ return(-EFAULT);
+
+/* if ((buflen / DM_STAT_SIZE(mask, 2))== 0) {
+ if (put_user( DM_STAT_SIZE(mask, 2), rlenp ))
+ return(-EFAULT);
+ return(-E2BIG);
+ } XFS BUG #26 */
+
+ if ((ip->i_mode & S_IFMT) != S_IFDIR)
+ return(-ENOTDIR);
+
+ /*
+ * Don't get more dirents than are guaranteed to fit.
+ * The minimum that the stat buf holds is the buf size over
+ * maximum entry size. That times the minimum dirent size
+ * is an overly conservative size for the dirent buf.
+ */
+ statbufsz = PAGE_SIZE;
+ direntbufsz = (PAGE_SIZE / DM_STAT_SIZE(mask, JFS_NAME_MAX + 1))
+ * sizeof(struct jfs_dirent);
+
+ direntp = kmem_alloc(direntbufsz, KM_SLEEP);
+ statbufp = kmem_alloc(statbufsz, KM_SLEEP);
+ error = 0;
+ spaceleft = buflen;
+ /*
+ * Keep getting dirents until the ubuffer is packed with
+ * dm_stat structures.
+ */
+ do {
+ ulong dir_gen = 0;
+
+ down(&ip->i_sem);
+
+ /* See if the directory was removed after it was opened. */
+ if (ip->i_nlink <= 0) {
+ up(&ip->i_sem);
+ error = -ENOENT;
+ break;
+ }
+ if (dir_gen == 0)
+ dir_gen = ip->i_generation;
+ else if (dir_gen != ip->i_generation) {
+ /* if dir changed, quit. May be overzealous... */
+ up(&ip->i_sem);
+ break;
+ }
+ dirloc = loc;
+ error = jfs_get_dirents(ip, direntp, direntbufsz,
+ (dm_off_t *)&dirloc, &nread);
+ up(&ip->i_sem);
+
+ if (error) {
+ break;
+ }
+ if (nread == 0)
+ break;
+ /*
+ * Now iterate thru them and call bulkstat_one() on all
+ * of them
+ */
+ error = jfs_dirents_to_stats(ip,
+ mask,
+ (struct jfs_dirent *) direntp,
+ statbufp,
+ &nread,
+ &spaceleft,
+ &nwritten,
+ (dm_off_t *)&loc,
+ &offlastlink);
+ if (error) {
+ break;
+ }
+
+ if (nwritten) {
+ if (copy_to_user( bufp, statbufp, nwritten)) {
+ error = -EFAULT;
+ break;
+ }
+
+ lastlink = (int *)((char *)bufp + offlastlink);
+ bufp = (char *)bufp + nwritten;
+ totnwritten += nwritten;
+ }
+
+ /*
+ * Done if dirents_to_stats unable to convert all entries
+ * returned by get_dirents
+ */
+ if (nread > 0)
+ break;
+ else
+ loc = dirloc;
+ } while (spaceleft && (dirloc != DIREND));
+
+ /*
+ * Need to terminate (set _link to 0) last entry if there's room for
+ * more (if we ran out of room in user buffer, dirents_to_stats set
+ * _link to 0 already)
+ */
+ if (!error && spaceleft && lastlink) {
+ int i = 0;
+ if (copy_to_user( lastlink, &i, sizeof(i))) {
+ error = -EFAULT;
+ }
+ }
+
+ /*
+ * If jfs_dirents_to_stats found anything, there might be more to do.
+ * If it didn't read anything, signal all done (rval == 0).
+ * (Doesn't matter either way if there was an error.)
+ */
+ if (nread || dirloc != DIREND) {
+ *rvp = 1;
+ } else {
+ loc = DIREND;
+ *rvp = 0;
+ }
+
+ kmem_free(statbufp, statbufsz);
+ kmem_free(direntp, direntbufsz);
+ if (!error){
+ if (put_user(totnwritten, rlenp))
+ return(-EFAULT);
+ }
+
+ if (!error && copy_to_user(locp, &loc, sizeof(loc)))
+ error = -EFAULT;
+ return(error);
+}
+
+
+STATIC int
+jfs_dm_get_dmattr(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_dkattrname_t name;
+ char *value;
+ int value_len;
+ int alloc_size;
+ int bytes_read;
+ int error;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if ((error = jfs_copyin_attrname(attrnamep, &name)) != 0)
+ return(error);
+
+ /* Allocate a buffer to receive the attribute's value. We allocate
+ at least one byte even if the caller specified a buflen of zero.
+ (A buflen of zero is considered valid.)
+ */
+
+ alloc_size = buflen;
+ if ((alloc_size < 0) || (alloc_size > MAXEASIZE))
+ alloc_size = MAXEASIZE;
+ value = kmem_alloc(alloc_size, KM_SLEEP);
+
+ /* Get the attribute's value. */
+
+ value_len = alloc_size; /* in/out parameter */
+
+ bytes_read = __jfs_getxattr(ip, name.dan_chars, value, value_len);
+
+ /* Bump up buffer size and try again if user buffer too small */
+ if (bytes_read == -ERANGE) {
+ if (value != NULL)
+ kmem_free(value, alloc_size);
+
+ alloc_size = MAXEASIZE;
+ value = kmem_alloc(alloc_size, KM_SLEEP);
+
+ value_len = alloc_size; /* in/out parameter */
+ bytes_read = __jfs_getxattr(ip, name.dan_chars, value,
+ value_len);
+ }
+
+ if (bytes_read >= 0) {
+ error = 0;
+ value_len = bytes_read;
+ } else {
+ error = bytes_read;
+ DM_EA_XLATE_ERR(error);
+ }
+
+ /* DMAPI requires an errno of ENOENT if an attribute does not exist,
+ so remap ENOATTR here.
+ */
+
+ if (!error && value_len > buflen)
+ error = -E2BIG;
+ if (!error && copy_to_user(bufp, value, value_len))
+ error = -EFAULT;
+ if (!error || error == -E2BIG) {
+ if (put_user(value_len, rlenp))
+ error = -EFAULT;
+ }
+
+ kmem_free(value, alloc_size);
+ return(error);
+}
+
+STATIC int
+jfs_dm_get_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp)
+{
+ int error;
+
+ if (type == DM_FSYS_OBJ) {
+ error = jfs_dm_fs_get_eventlist(ip, right, nelem,
+ eventsetp, nelemp);
+ } else {
+ error = jfs_dm_f_get_eventlist(ip, right, nelem,
+ eventsetp, nelemp);
+ }
+ return(error);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_get_fileattr(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask, /* not used; always return everything */
+ dm_stat_t *statp)
+{
+ dm_stat_t stat;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if (mask & ~(DM_AT_EMASK|DM_AT_PMANR|DM_AT_PATTR|DM_AT_DTIME|DM_AT_CFLAG|DM_AT_STAT)) // XFS BUG #22
+ return(-EINVAL); // XFS BUG #22
+
+// XFS BUG #23 START
+ /* don't update dtime if there are no DM attrs, and initialize dtime
+ field so user will see it didn't change as there is no error
+ indication returned */
+ if ((mask & DM_AT_DTIME) && (!jfs_dmattr_exist(ip))) {
+ mask = mask & (~DM_AT_DTIME);
+ if (copy_from_user(&stat.dt_dtime,
+ &statp->dt_dtime,
+ sizeof(stat.dt_dtime)))
+ return(-EFAULT);
+ }
+// XFS BUG #23 END
+
+ jfs_ip_to_stat(ip, mask, &stat);
+
+ if (copy_to_user( statp, &stat, sizeof(stat)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+
+/* We currently only support a maximum of one managed region per file, and
+ use the DM_EVENT_READ, DM_EVENT_WRITE, and DM_EVENT_TRUNCATE events in
+ the file's dm_eventset_t event mask to implement the DM_REGION_READ,
+ DM_REGION_WRITE, and DM_REGION_TRUNCATE flags for that single region.
+*/
+
+STATIC int
+jfs_dm_get_region(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ u_int *nelemp)
+{
+ dm_eventset_t evmask;
+ dm_region_t region;
+ u_int elem;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ evmask = jfs_ip->dmattrs.da_dmevmask; /* read the mask "atomically" */
+
+ /* Get the file's current managed region flags out of the
+ dm_eventset_t mask and use them to build a managed region that
+ covers the entire file, i.e. set rg_offset and rg_size to zero.
+ */
+
+ memset((char *)®ion, 0, sizeof(region));
+
+ if (evmask & (1 << DM_EVENT_READ))
+ region.rg_flags |= DM_REGION_READ;
+ if (evmask & (1 << DM_EVENT_WRITE))
+ region.rg_flags |= DM_REGION_WRITE;
+ if (evmask & (1 << DM_EVENT_TRUNCATE))
+ region.rg_flags |= DM_REGION_TRUNCATE;
+
+ elem = (region.rg_flags ? 1 : 0);
+
+ if (copy_to_user( nelemp, &elem, sizeof(elem)))
+ return(-EFAULT);
+ if (elem > nelem)
+ return(-E2BIG);
+ if (elem && copy_to_user(regbufp, ®ion, sizeof(region)))
+ return(-EFAULT);
+ return(0);
+}
+
+#else
+
+STATIC int
+jfs_dm_get_region(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ u_int *nelemp)
+{
+ u_int elem;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ elem = jfs_ip->dmnumrgns;
+
+ if (copy_to_user( nelemp, &elem, sizeof(elem)))
+ return(-EFAULT);
+ if (elem > nelem)
+ return(-E2BIG);
+ if (elem && copy_to_user(regbufp, jfs_ip->dmrgns,
+ elem * sizeof(dm_region_t)))
+ return(-EFAULT);
+ return(0);
+}
+#endif
+
+
+STATIC int
+jfs_dm_getall_dmattr(
+ struct inode *ip,
+ dm_right_t right,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_attrlist_t *ulist;
+ int *last_link;
+ int alignment;
+ int error;
+ int eabuf_size;
+ ssize_t req_size = 0;
+ struct jfs_ea_list *ealist;
+ struct jfs_ea *ea;
+ struct ea_buffer eabuf;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ /* Verify that the user gave us a buffer that is 4-byte aligned, lock
+ it down, and work directly within that buffer. As a side-effect,
+ values of buflen < sizeof(int) return EINVAL.
+ */
+
+ alignment = sizeof(int) - 1;
+ if (((__psint_t)bufp & alignment) != 0) {
+ return(-EFAULT);
+ }
+ buflen &= ~alignment; /* round down the alignment */
+
+#if defined(HAVE_USERACC)
+ if ((error = useracc(bufp, buflen, B_READ, NULL)) != 0)
+ return error;
+#endif
+
+ /* Get a buffer full of attribute names. If there aren't any
+ more or if we encounter an error, then finish up.
+ */
+
+ /* This will block all get/set EAs */
+ down_read(&JFS_IP(ip)->xattr_sem);
+
+ /* Get all EAs */
+ eabuf_size = jfs_ea_get(ip, &eabuf, 0);
+
+ /* Quit if error occurred */
+ if (eabuf_size < 0) {
+ error = eabuf_size;
+ goto error_return;
+ }
+
+ /* Quit if no EAs exist */
+ if (eabuf_size == 0) {
+ error = req_size = 0;
+ goto error_return_release;
+ }
+
+ ealist = (struct jfs_ea_list *) eabuf.xattr;
+
+ /* compute required size of list */
+ req_size = 0;
+ for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+ char *user_name;
+ int size_needed;
+
+ /* Skip over all non-DMAPI attributes. If the
+ attribute name is too long, we assume it is
+ non-DMAPI even if it starts with the correct
+ prefix.
+ */
+ if (strncmp(ea->name, dmattr_prefix, DMATTR_PREFIXLEN))
+ continue;
+ user_name = &ea->name[DMATTR_PREFIXLEN];
+ if (strlen(user_name) > DM_ATTR_NAME_SIZE)
+ continue;
+
+ /* We have a valid DMAPI attribute to return. If it
+ won't fit in the user's buffer, we still need to
+ keep track of the number of bytes for the user's
+ next call.
+ */
+ size_needed = sizeof(*ulist) + le16_to_cpu(ea->valuelen);
+ size_needed = (size_needed + alignment) & ~alignment;
+
+ req_size += size_needed;
+ }
+
+ /* Quit if no buffer for dm_attrlist or buffer too small */
+ if ((!bufp) || (req_size > buflen)) {
+ error = -E2BIG;
+ goto error_return_release;
+ }
+
+ /* copy contents into list */
+ ulist = (dm_attrlist_t *)bufp;
+ last_link = NULL;
+ for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
+ char *user_name;
+ int size_needed;
+
+ /* Skip over all non-DMAPI attributes. If the
+ attribute name is too long, we assume it is
+ non-DMAPI even if it starts with the correct
+ prefix.
+ */
+ if (strncmp(ea->name, dmattr_prefix, DMATTR_PREFIXLEN))
+ continue;
+ user_name = &ea->name[DMATTR_PREFIXLEN];
+ if (strlen(user_name) > DM_ATTR_NAME_SIZE)
+ continue;
+
+ /* We have a valid DMAPI attribute to return. If it
+ won't fit in the user's buffer, we still need to
+ keep track of the number of bytes for the user's
+ next call.
+ */
+ size_needed = sizeof(*ulist) + le16_to_cpu(ea->valuelen);
+ size_needed = (size_needed + alignment) & ~alignment;
+
+ strncpy((char *)ulist->al_name.an_chars, user_name,
+ DM_ATTR_NAME_SIZE);
+ ulist->al_data.vd_offset = sizeof(*ulist);
+ ulist->al_data.vd_length = le16_to_cpu(ea->valuelen);
+ ulist->_link = size_needed;
+ last_link = &ulist->_link;
+
+ /* Next read the attribute's value into its correct
+ location after the dm_attrlist structure. Any sort
+ of error indicates that the data is moving under us,
+ so we return EIO to let the user know.
+ */
+
+ memcpy((void *)(ulist + 1),
+ (char *)ea + sizeof(ea) + ea->namelen + 1,
+ le16_to_cpu(ea->valuelen));
+
+ ulist = (dm_attrlist_t *)((char *)ulist + ulist->_link);
+ }
+
+ if (last_link)
+ *last_link = 0;
+ error = 0;
+
+error_return_release:
+ jfs_ea_release(ip, &eabuf);
+
+error_return:
+ /* This will unblock all get/set EAs */
+ up_read(&JFS_IP(ip)->xattr_sem);
+
+ if (!error || error == -E2BIG) {
+ if (put_user(req_size, rlenp))
+ error = -EFAULT;
+ }
+
+ return(error);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_getall_inherit(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_inherit_t *inheritbufp,
+ u_int *nelemp)
+{
+ return(-ENOSYS);
+}
+
+
+/* Initialize location pointer for subsequent dm_get_dirattrs,
+ dm_get_bulkattr, and dm_get_bulkall calls. The same initialization must
+ work for vnode-based routines (dm_get_dirattrs) and filesystem-based
+ routines (dm_get_bulkattr and dm_get_bulkall). Filesystem-based functions
+ call this routine using the filesystem's root vnode.
+*/
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_init_attrloc(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrloc_t *locp)
+{
+ dm_attrloc_t loc = 0;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if (copy_to_user( locp, &loc, sizeof(loc)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_mkdir_by_handle(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname)
+{
+ return(-ENOSYS);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_probe_hole(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len, /* we ignore this for now */
+ dm_off_t *roffp,
+ dm_size_t *rlenp)
+{
+ dm_off_t roff;
+ dm_size_t rlen;
+ loff_t realsize;
+ u_int bsize;
+
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if ((ip->i_mode & S_IFMT) != S_IFREG)
+ return(-EINVAL);
+
+ bsize = ip->i_sb->s_blocksize;
+
+ realsize = ip->i_size;
+
+ if ((off >= realsize) || (off + len > realsize)) // XFS BUG #8
+ return(-E2BIG);
+
+ roff = (off + bsize-1) & ~(bsize-1);
+ if ((len == 0) || (off + len == realsize)) {
+ rlen = 0;
+ } else {
+ rlen = ((off + len) & ~(bsize-1)) - roff;
+ if (rlen <= 0) /* hole doesn't exist! */
+ return(-EINVAL);
+ }
+
+ if (copy_to_user( roffp, &roff, sizeof(roff)))
+ return(-EFAULT);
+ if (copy_to_user( rlenp, &rlen, sizeof(rlen)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_punch_hole(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len)
+{
+ u_int bsize;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+ loff_t realsize;
+ tid_t tid;
+ int error = 0;
+#ifndef DM_SUPPORT_ONE_MANAGED_REGION
+ dm_region_t rgn = { .rg_offset = off, .rg_size = len };
+#endif
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if (!S_ISREG(ip->i_mode))
+ return(-EINVAL);
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+ if (prohibited_mr_events(ip))
+#else
+ if (prohibited_mr_events(ip, &rgn))
+ return(-EBUSY);
+#endif
+
+ IWRITE_LOCK(ip);
+
+ bsize = ip->i_sb->s_blocksize;
+
+ realsize = ip->i_size;
+
+ if ((off >= realsize) || (off + len > realsize)) {
+ IWRITE_UNLOCK(ip);
+ return(-E2BIG);
+ }
+
+ /* hole begin and end must be aligned on blocksize if not truncate */
+ if (off + len == realsize)
+ len = 0;
+ if ((off & (bsize-1)) || (len & (bsize-1))) {
+ IWRITE_UNLOCK(ip);
+ return(-EAGAIN);
+ }
+
+ tid = txBegin(ip->i_sb, 0);
+ down(&JFS_IP(ip)->commit_sem);
+ error = xtPunchHole(tid, ip, off, len, 0);
+ if (!error)
+ error = txCommit(tid, 1, &ip, 0);
+ else
+ txAbort(tid, 1);
+ txEnd(tid);
+ up(&JFS_IP(ip)->commit_sem);
+ IWRITE_UNLOCK(ip);
+
+ /* Let threads in send_data_event know we punched the file. */
+ if (!error)
+ jfs_ip->dmattrs.da_dmstate++;
+
+ return(error);
+}
+
+
+STATIC int
+jfs_dm_read_invis_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp)
+{
+ if (right < DM_RIGHT_SHARED)
+ return(-EACCES);
+
+ if (off > ip->i_size) // XFS BUG #9
+ return(-EINVAL); // XFS BUG #9
+
+ return(jfs_dm_rdwr(ip, 0, FMODE_READ, off, len, bufp, rvp));
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_release_right(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type) /* DM_FSYS_OBJ or zero */
+{
+#ifdef DEBUG_RIGHTS
+ char buffer[sizeof(jfs_handle_t) * 2 + 1];
+
+ if (!jfs_ip_to_hexhandle(ip, type, buffer)) {
+ printf("dm_release_right: old %d type %d handle %s\n",
+ right, type, buffer);
+ } else {
+ printf("dm_release_right: old %d type %d handle "
+ " <INVALID>\n", right, type);
+ }
+#endif /* DEBUG_RIGHTS */
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_remove_dmattr(
+ struct inode *ip,
+ dm_right_t right,
+ int setdtime,
+ dm_attrname_t *attrnamep)
+{
+ dm_dkattrname_t name;
+ int error;
+ struct timespec save_ctime;
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if ((error = jfs_copyin_attrname(attrnamep, &name)) != 0)
+ return(error);
+
+ if (!setdtime) {
+ save_ctime = ip->i_ctime;
+ }
+
+ /* Remove the attribute from the object. */
+
+ error = __jfs_setxattr(ip, name.dan_chars, 0, 0, XATTR_REPLACE);
+
+ if (!setdtime) {
+ ip->i_ctime = save_ctime;
+ mark_inode_dirty(ip);
+ }
+
+ /* Persistent attribute change */
+ if (!error && setdtime) {
+ ip->i_version++;
+ mark_inode_dirty(ip);
+ }
+
+ DM_EA_XLATE_ERR(error);
+
+ return(error);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_request_right(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type, /* DM_FSYS_OBJ or zero */
+ u_int flags,
+ dm_right_t newright)
+{
+#ifdef DEBUG_RIGHTS
+ char buffer[sizeof(jfs_handle_t) * 2 + 1];
+
+ if (!jfs_ip_to_hexhandle(ip, type, buffer)) {
+ printf("dm_request_right: old %d new %d type %d flags 0x%x "
+ "handle %s\n", right, newright, type, flags, buffer);
+ } else {
+ printf("dm_request_right: old %d new %d type %d flags 0x%x "
+ "handle <INVALID>\n", right, newright, type, flags);
+ }
+#endif /* DEBUG_RIGHTS */
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_set_dmattr(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void *bufp)
+{
+ dm_dkattrname_t name;
+ char *value;
+ int alloc_size;
+ int error;
+ struct timespec save_ctime;
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if ((error = jfs_copyin_attrname(attrnamep, &name)) != 0)
+ return(error);
+ if (buflen > MAXEASIZE)
+ return(-E2BIG);
+
+ /* Copy in the attribute's value and store the <name,value> pair in
+ the object. We allocate a buffer of at least one byte even if the
+ caller specified a buflen of zero. (A buflen of zero is considered
+ valid.)
+ */
+
+ alloc_size = (buflen == 0) ? 1 : buflen;
+ value = kmem_alloc(alloc_size, KM_SLEEP);
+ if (copy_from_user( value, bufp, buflen)) {
+ error = -EFAULT;
+ } else {
+ if (!setdtime) {
+ save_ctime = ip->i_ctime;
+ }
+
+ error = __jfs_setxattr(ip, name.dan_chars, value, buflen, 0);
+
+ if (!setdtime) {
+ ip->i_ctime = save_ctime;
+ mark_inode_dirty(ip);
+ }
+
+ DM_EA_XLATE_ERR(error);
+ }
+ kmem_free(value, alloc_size);
+
+ /* Persistent attribute change */
+ if (!error) {
+ ip->i_version++;
+ mark_inode_dirty(ip);
+ }
+
+ return(error);
+}
+
+STATIC int
+jfs_dm_set_eventlist(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int maxevent)
+{
+ int error;
+
+ if (type == DM_FSYS_OBJ) {
+ error = jfs_dm_fs_set_eventlist(ip, right, eventsetp, maxevent);
+ } else {
+ error = jfs_dm_f_set_eventlist(ip, right, eventsetp, maxevent);
+ }
+ return(error);
+}
+
+
+/*
+ * jfs_dm_setattr
+ */
+STATIC int
+jfs_dm_setattr(
+ struct inode *ip,
+ struct iattr *iap)
+{
+ int mask;
+ int code;
+ uid_t uid=0, iuid=0;
+ gid_t gid=0, igid=0;
+ int file_owner;
+
+ /*
+ * Cannot set certain attributes.
+ */
+ mask = iap->ia_valid;
+
+//Q /*
+//U * If disk quotas is on, we make sure that the dquots do exist on disk,
+//O * before we start any other transactions. Trying to do this later
+//T * is messy. We don't care to take a readlock to look at the ids
+//A * in inode here, because we can't hold it across the trans_reserve.
+// * If the IDs do change before we take the ilock, we're covered
+// * because the i_*dquot fields will get updated anyway.
+// */
+// if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) {
+// uint qflags = 0;
+//
+// if (mask & XFS_AT_UID) {
+// uid = vap->va_uid;
+// qflags |= XFS_QMOPT_UQUOTA;
+// } else {
+// uid = ip->i_d.di_uid;
+// }
+// if (mask & XFS_AT_GID) {
+// gid = vap->va_gid;
+// qflags |= XFS_QMOPT_GQUOTA;
+// } else {
+// gid = ip->i_d.di_gid;
+// }
+// /*
+// * We take a reference when we initialize udqp and gdqp,
+// * so it is important that we never blindly double trip on
+// * the same variable. See xfs_create() for an example.
+// */
+// ASSERT(udqp == NULL);
+// ASSERT(gdqp == NULL);
+// code = XFS_QM_DQVOPALLOC(mp, ip, uid,gid, qflags, &udqp, &gdqp);
+// if (code)
+// return (code);
+// }
+
+ IWRITE_LOCK(ip);
+
+ /* boolean: are we the file owner? */
+ file_owner = (current->fsuid == ip->i_uid);
+
+ /*
+ * Change various properties of a file.
+ * Only the owner or users with CAP_FOWNER
+ * capability may do these things.
+ */
+ if (mask &
+ (ATTR_MODE|ATTR_UID|ATTR_GID)) {
+ /*
+ * CAP_FOWNER overrides the following restrictions:
+ *
+ * The user ID of the calling process must be equal
+ * to the file owner ID, except in cases where the
+ * CAP_FSETID capability is applicable.
+ */
+ if (!file_owner && !capable(CAP_FOWNER)) {
+ code = -EPERM;
+ goto error_return;
+ }
+
+ /*
+ * CAP_FSETID overrides the following restrictions:
+ *
+ * The effective user ID of the calling process shall match
+ * the file owner when setting the set-user-ID and
+ * set-group-ID bits on that file.
+ *
+ * The effective group ID or one of the supplementary group
+ * IDs of the calling process shall match the group owner of
+ * the file when setting the set-group-ID bit on that file
+ */
+ if (mask & ATTR_MODE) {
+ mode_t m = 0;
+
+ if ((iap->ia_mode & S_ISUID) && !file_owner)
+ m |= S_ISUID;
+ if ((iap->ia_mode & S_ISGID) &&
+ !in_group_p((gid_t)ip->i_gid))
+ m |= S_ISGID;
+ if (m && !capable(CAP_FSETID))
+ iap->ia_mode &= ~m;
+ }
+ }
+
+ /*
+ * Change file ownership. Must be the owner or privileged.
+ * If the system was configured with the "restricted_chown"
+ * option, the owner is not permitted to give away the file,
+ * and can change the group id only to a group of which he
+ * or she is a member.
+ */
+ if (mask & (ATTR_UID|ATTR_GID)) {
+ /*
+ * These IDs could have changed since we last looked at them.
+ * But, we're assured that if the ownership did change
+ * while we didn't have the inode locked, inode's dquot(s)
+ * would have changed also.
+ */
+ iuid = ip->i_uid;
+ igid = ip->i_gid;
+ gid = (mask & ATTR_GID) ? iap->ia_gid : igid;
+ uid = (mask & ATTR_UID) ? iap->ia_uid : iuid;
+
+//Q /*
+//U * Do a quota reservation only if uid or gid is actually
+//O * going to change.
+//T */
+//A if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
+// (XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
+// ASSERT(tp);
+// code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
+// capable(CAP_FOWNER) ?
+// XFS_QMOPT_FORCE_RES : 0);
+// if (code) /* out of quota */
+// goto error_return;
+// }
+ }
+
+ /*
+ * Change file size. Must have write permission and not be a directory.
+ */
+ if (mask & ATTR_SIZE) {
+ if ((ip->i_mode & S_IFMT) == S_IFDIR) {
+ code = -EISDIR;
+ goto error_return;
+ } else if ((ip->i_mode & S_IFMT) != S_IFREG) {
+ code = -EINVAL;
+ goto error_return;
+ }
+
+//Q /*
+//U * Make sure that the dquots are attached to the inode.
+//O */
+//T if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED)))
+//A goto error_return;
+ }
+
+ /*
+ * Change file access modes.
+ */
+ if (mask & ATTR_MODE) {
+ ip->i_mode &= S_IFMT;
+ ip->i_mode |= iap->ia_mode & ~S_IFMT;
+ }
+
+ /*
+ * Change file ownership. Must be the owner or privileged.
+ * If the system was configured with the "restricted_chown"
+ * option, the owner is not permitted to give away the file,
+ * and can change the group id only to a group of which he
+ * or she is a member.
+ */
+ if (mask & (ATTR_UID|ATTR_GID)) {
+ /*
+ * CAP_FSETID overrides the following restrictions:
+ *
+ * The set-user-ID and set-group-ID bits of a file will be
+ * cleared upon successful return from chown()
+ */
+ if ((ip->i_mode & (S_ISUID|S_ISGID)) &&
+ !capable(CAP_FSETID)) {
+ ip->i_mode &= ~(S_ISUID|S_ISGID);
+ }
+
+ /*
+ * Change the ownerships and register quota modifications
+ * in the transaction.
+ */
+ if (iuid != uid) {
+//Q if (XFS_IS_UQUOTA_ON(mp)) {
+//U ASSERT(mask & XFS_AT_UID);
+//O ASSERT(udqp);
+//T olddquot1 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
+//A &ip->i_udquot, udqp);
+// }
+ ip->i_uid = uid;
+ }
+ if (igid != gid) {
+//Q if (XFS_IS_GQUOTA_ON(mp)) {
+//U ASSERT(mask & XFS_AT_GID);
+//O ASSERT(gdqp);
+//T olddquot2 = XFS_QM_DQVOPCHOWN(mp, tp, ip,
+//A &ip->i_gdquot, gdqp);
+// }
+ ip->i_gid = gid;
+ }
+ }
+
+
+ /*
+ * Change file access or modified times.
+ */
+ if (mask & ATTR_ATIME) {
+ ip->i_atime = iap->ia_atime;
+ }
+ if (mask & ATTR_MTIME) {
+ ip->i_mtime = iap->ia_mtime;
+ }
+
+ /*
+ * Change file inode change time only if XFS_AT_CTIME set
+ * AND we have been called by a DMI function.
+ */
+ if (mask & ATTR_CTIME) {
+ ip->i_ctime = iap->ia_ctime;
+ }
+
+ /*
+ * Change file size.
+ */
+
+ if (mask & ATTR_SIZE) {
+ ip->i_size = iap->ia_size;
+
+ if (iap->ia_size >= ip->i_size) {
+ struct timespec curtime = CURRENT_TIME;
+ if (!(mask & ATTR_MTIME)) {
+ ip->i_mtime = curtime;
+ }
+ if (!(mask & ATTR_CTIME)) {
+ ip->i_ctime = curtime;
+ }
+ mark_inode_dirty(ip);
+ } else /* if (iap->ia_size < ip->i_size) */ {
+ nobh_truncate_page(ip->i_mapping, ip->i_size);
+ jfs_truncate_nolock(ip, ip->i_size);
+ }
+ } else
+ mark_inode_dirty(ip);
+
+ IWRITE_UNLOCK(ip);
+
+//Q /*
+//U * Release any dquot(s) the inode had kept before chown.
+//O */
+//T XFS_QM_DQRELE(mp, olddquot1);
+//A XFS_QM_DQRELE(mp, olddquot2);
+// XFS_QM_DQRELE(mp, udqp);
+// XFS_QM_DQRELE(mp, gdqp);
+
+ return 0;
+
+ error_return:
+//QUOTA XFS_QM_DQRELE(mp, udqp);
+// XFS_QM_DQRELE(mp, gdqp);
+ IWRITE_UNLOCK(ip);
+
+ return code;
+}
+
+
+/*
+ * This turned out not XFS-specific, but leave it here with get_fileattr.
+ */
+
+STATIC int
+jfs_dm_set_fileattr(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_fileattr_t *statp)
+{
+ dm_fileattr_t stat;
+ struct iattr at;
+ int error;
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if (mask & ~(DM_AT_ATIME|DM_AT_MTIME|DM_AT_CTIME|DM_AT_DTIME|DM_AT_UID|DM_AT_GID|DM_AT_MODE|DM_AT_SIZE)) // XFS BUG #20
+ return(-EINVAL); // XFS BUG #20
+
+ if (copy_from_user( &stat, statp, sizeof(stat)))
+ return(-EFAULT);
+
+ at.ia_valid = 0;
+
+ if (mask & DM_AT_MODE) {
+ at.ia_valid |= ATTR_MODE;
+ at.ia_mode = stat.fa_mode;
+ }
+ if (mask & DM_AT_UID) {
+ at.ia_valid |= ATTR_UID;
+ at.ia_uid = stat.fa_uid;
+ }
+ if (mask & DM_AT_GID) {
+ at.ia_valid |= ATTR_GID;
+ at.ia_gid = stat.fa_gid;
+ }
+ if (mask & DM_AT_ATIME) {
+ at.ia_valid |= ATTR_ATIME;
+ at.ia_atime.tv_sec = stat.fa_atime;
+ at.ia_atime.tv_nsec = 0;
+ }
+ if (mask & DM_AT_MTIME) {
+ at.ia_valid |= ATTR_MTIME;
+ at.ia_mtime.tv_sec = stat.fa_mtime;
+ at.ia_mtime.tv_nsec = 0;
+ }
+ if (mask & DM_AT_CTIME) {
+ at.ia_valid |= ATTR_CTIME;
+ at.ia_ctime.tv_sec = stat.fa_ctime;
+ at.ia_ctime.tv_nsec = 0;
+ }
+
+ /* DM_AT_DTIME only takes effect if DM_AT_CTIME is not specified. We
+ overload ctime to also act as dtime, i.e. DM_CONFIG_DTIME_OVERLOAD.
+ */
+
+ if ((mask & DM_AT_DTIME) && jfs_dmattr_exist(ip) && !(mask & DM_AT_CTIME)) { // XFS BUG #21
+ at.ia_valid |= ATTR_CTIME;
+ at.ia_ctime.tv_sec = stat.fa_dtime;
+ at.ia_ctime.tv_nsec = 0;
+ }
+ if (mask & DM_AT_SIZE) {
+ at.ia_valid |= ATTR_SIZE;
+ at.ia_size = stat.fa_size;
+ }
+
+ error = jfs_dm_setattr(ip, &at);
+ return(error);
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_set_inherit(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ mode_t mode)
+{
+ return(-ENOSYS);
+}
+
+
+#ifdef DM_SUPPORT_ONE_MANAGED_REGION
+
+STATIC int
+jfs_dm_set_region(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ dm_boolean_t *exactflagp)
+{
+ dm_region_t region;
+ dm_eventset_t new_mask;
+ dm_eventset_t mr_mask;
+ u_int exactflag;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ /* If the caller gave us more than one dm_region_t structure, complain.
+ (He has to call dm_get_config() to find out what our limit is.)
+ */
+
+ if (nelem > 1)
+ return(-E2BIG);
+
+ /* If the user provided a dm_region_t structure, then copy it in,
+ validate it, and convert its flags to the corresponding bits in a
+ dm_set_eventlist() event mask. A call with zero regions is
+ equivalent to clearing all region flags.
+ */
+
+ new_mask = 0;
+ if (nelem == 1) {
+ if (copy_from_user( ®ion, regbufp, sizeof(region)))
+ return(-EFAULT);
+
+ if (region.rg_flags & ~(DM_REGION_READ|DM_REGION_WRITE|DM_REGION_TRUNCATE))
+ return(-EINVAL);
+ if (region.rg_flags & DM_REGION_READ)
+ new_mask |= 1 << DM_EVENT_READ;
+ if (region.rg_flags & DM_REGION_WRITE)
+ new_mask |= 1 << DM_EVENT_WRITE;
+ if (region.rg_flags & DM_REGION_TRUNCATE)
+ new_mask |= 1 << DM_EVENT_TRUNCATE;
+ }
+ if ((new_mask & prohibited_mr_events(ip)) != 0)
+ return(-EBUSY);
+ mr_mask = DM_JFS_VALID_REGION_EVENTS;
+
+ /* Get the file's existing event mask, clear the old managed region
+ bits, add in the new ones, and update the file's mask.
+ */
+
+ jfs_ip->dmattrs.da_dmevmask = (jfs_ip->dmattrs.da_dmevmask & ~mr_mask)
+ | new_mask;
+
+ igrab(ip);
+ mark_inode_dirty(ip);
+
+ /* Return the proper value for *exactflagp depending upon whether or not
+ we "changed" the user's managed region. In other words, if the user
+ specified a non-zero value for either rg_offset or rg_size, we
+ round each of those values back to zero.
+ */
+
+ if (nelem && (region.rg_offset || region.rg_size)) {
+ exactflag = DM_FALSE; /* user region was changed */
+ } else {
+ exactflag = DM_TRUE; /* user region was unchanged */
+ }
+ if (copy_to_user( exactflagp, &exactflag, sizeof(exactflag)))
+ return(-EFAULT);
+ return(0);
+}
+
+#else
+
+STATIC int
+jfs_dm_set_region(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ dm_boolean_t *exactflagp)
+{
+ dm_eventset_t new_mask;
+ dm_eventset_t new_mrevmask = 0;
+ u_int exactflag;
+ u_int changeflag;
+ int size_array = 0;
+ dm_region_t *newrgns = NULL;
+ struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if (nelem > MAX_MANAGED_REGIONS)
+ return(-E2BIG);
+
+ /* If the user provided dm_region_t structure(s), then copy in and
+ validate. A call with zero regions is equivalent to clearing all
+ regions.
+ */
+
+ if (nelem != 0) {
+ int i;
+ size_array = nelem * sizeof(dm_region_t);
+ newrgns = kmalloc(size_array, SLAB_KERNEL);
+
+ if (newrgns == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(newrgns, regbufp, size_array)) {
+ kfree(newrgns);
+ return(-EFAULT);
+ }
+
+ for (i = 0; i < nelem; i++) {
+ if (newrgns[i].rg_flags & ~(DM_REGION_READ|DM_REGION_WRITE|DM_REGION_TRUNCATE)) {
+ kfree(newrgns);
+ return(-EINVAL);
+ }
+
+ new_mask = 0;
+
+ /* No checking required if no events for region */
+ if (newrgns[i].rg_flags != DM_REGION_NOEVENT) {
+ if (newrgns[i].rg_flags & DM_REGION_READ)
+ new_mask |= 1 << DM_EVENT_READ;
+ if (newrgns[i].rg_flags & DM_REGION_WRITE)
+ new_mask |= 1 << DM_EVENT_WRITE;
+ if (newrgns[i].rg_flags & DM_REGION_TRUNCATE)
+ new_mask |= 1 << DM_EVENT_TRUNCATE;
+
+ if ((new_mask &
+ prohibited_mr_events(ip, &newrgns[i])) != 0) {
+ kfree(newrgns);
+ return(-EBUSY);
+ }
+ }
+
+ new_mrevmask |= new_mask;
+ }
+ }
+
+ /* Determine if regions are same */
+ if ((nelem == jfs_ip->dmnumrgns) &&
+ ((nelem == 0) ||
+ (memcmp(newrgns, jfs_ip->dmrgns, size_array) == 0))) {
+ changeflag = 0;
+ } else {
+ changeflag = 1;
+ }
+
+ jfs_ip->dmattrs.da_dmevmask =
+ (jfs_ip->dmattrs.da_dmevmask & ~DM_JFS_VALID_REGION_EVENTS)
+ | new_mrevmask;
+
+ jfs_ip->dmnumrgns = nelem;
+ if (jfs_ip->dmrgns) {
+ kfree(jfs_ip->dmrgns);
+ }
+ jfs_ip->dmrgns = newrgns;
+
+ mark_inode_dirty(ip);
+
+ /* We never change the user's managed region. */
+ exactflag = DM_TRUE;
+ if (copy_to_user( exactflagp, &exactflag, sizeof(exactflag)))
+ return(-EFAULT);
+
+ if (changeflag) {
+ int error = jfs_dm_write_pers_data(jfs_ip);
+
+ if (error) {
+ jfs_ip->dmnumrgns = 0;
+ if (jfs_ip->dmrgns) {
+ kfree(jfs_ip->dmrgns);
+ }
+ jfs_ip->dmrgns = NULL;
+
+ return(error);
+ }
+ }
+ return(0);
+}
+#endif
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_symlink_by_handle(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname,
+ char *path)
+{
+ return(-ENOSYS);
+}
+
+
+extern int jfs_commit_inode(struct inode *, int);
+
+STATIC int
+jfs_dm_sync_by_handle (
+ struct inode *ip,
+ dm_right_t right)
+{
+ int rc = 0;
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ /* The following is basically jfs_fsync, but since we don't have
+ * the file * or dentry * the code is repeated here.
+ */
+ if (!(ip->i_state & I_DIRTY)) {
+ jfs_flush_journal(JFS_SBI(ip->i_sb)->log, 1);
+ return rc;
+ }
+
+ rc |= jfs_commit_inode(ip, 1);
+
+ return rc ? -EIO : 0;
+}
+
+
+/* ARGSUSED */
+STATIC int
+jfs_dm_upgrade_right(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type) /* DM_FSYS_OBJ or zero */
+{
+#ifdef DEBUG_RIGHTS
+ char buffer[sizeof(jfs_handle_t) * 2 + 1];
+
+ if (!jfs_ip_to_hexhandle(ip, type, buffer)) {
+ printf("dm_upgrade_right: old %d new %d type %d handle %s\n",
+ right, DM_RIGHT_EXCL, type, buffer);
+ } else {
+ printf("dm_upgrade_right: old %d new %d type %d handle "
+ "<INVALID>\n", right, DM_RIGHT_EXCL, type);
+ }
+#endif /* DEBUG_RIGHTS */
+ return(0);
+}
+
+
+STATIC int
+jfs_dm_write_invis_rvp(
+ struct inode *ip,
+ dm_right_t right,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp)
+{
+ int fflag = 0;
+
+ if (right < DM_RIGHT_EXCL)
+ return(-EACCES);
+
+ if ((off > MAXFILESIZE) || (len > MAXFILESIZE) || (off > MAXFILESIZE - len))
+ return(-EFBIG);
+
+ if (flags & DM_WRITE_SYNC)
+ fflag |= O_SYNC;
+ return(jfs_dm_rdwr(ip, fflag, FMODE_WRITE, off, len, bufp, rvp));
+}
+
+
+STATIC void
+jfs_dm_obj_ref_hold(
+ struct inode *ip)
+{
+ struct inode *inode;
+
+ inode = igrab(ip);
+ ASSERT(inode);
+}
+
+
+STATIC fsys_function_vector_t jfs_fsys_vector[DM_FSYS_MAX];
+
+
+int
+jfs_dm_get_fsys_vector(
+ struct inode *ip,
+ caddr_t addr)
+{
+ static int initialized = 0;
+ dm_fcntl_vector_t *vecrq;
+ fsys_function_vector_t *vecp;
+ int i = 0;
+
+ vecrq = (dm_fcntl_vector_t *)addr;
+ vecrq->count =
+ sizeof(jfs_fsys_vector) / sizeof(jfs_fsys_vector[0]);
+ vecrq->vecp = jfs_fsys_vector;
+ if (initialized)
+ return(0);
+ vecrq->code_level = DM_CLVL_XOPEN;
+ vecp = jfs_fsys_vector;
+
+ vecp[i].func_no = DM_FSYS_CLEAR_INHERIT;
+ vecp[i++].u_fc.clear_inherit = jfs_dm_clear_inherit;
+ vecp[i].func_no = DM_FSYS_CREATE_BY_HANDLE;
+ vecp[i++].u_fc.create_by_handle = jfs_dm_create_by_handle;
+ vecp[i].func_no = DM_FSYS_DOWNGRADE_RIGHT;
+ vecp[i++].u_fc.downgrade_right = jfs_dm_downgrade_right;
+ vecp[i].func_no = DM_FSYS_GET_ALLOCINFO_RVP;
+ vecp[i++].u_fc.get_allocinfo_rvp = jfs_dm_get_allocinfo_rvp;
+ vecp[i].func_no = DM_FSYS_GET_BULKALL_RVP;
+ vecp[i++].u_fc.get_bulkall_rvp = jfs_dm_get_bulkall_rvp;
+ vecp[i].func_no = DM_FSYS_GET_BULKATTR_RVP;
+ vecp[i++].u_fc.get_bulkattr_rvp = jfs_dm_get_bulkattr_rvp;
+ vecp[i].func_no = DM_FSYS_GET_CONFIG;
+ vecp[i++].u_fc.get_config = jfs_dm_get_config;
+ vecp[i].func_no = DM_FSYS_GET_CONFIG_EVENTS;
+ vecp[i++].u_fc.get_config_events = jfs_dm_get_config_events;
+ vecp[i].func_no = DM_FSYS_GET_DESTROY_DMATTR;
+ vecp[i++].u_fc.get_destroy_dmattr = jfs_dm_get_destroy_dmattr;
+ vecp[i].func_no = DM_FSYS_GET_DIRATTRS_RVP;
+ vecp[i++].u_fc.get_dirattrs_rvp = jfs_dm_get_dirattrs_rvp;
+ vecp[i].func_no = DM_FSYS_GET_DMATTR;
+ vecp[i++].u_fc.get_dmattr = jfs_dm_get_dmattr;
+ vecp[i].func_no = DM_FSYS_GET_EVENTLIST;
+ vecp[i++].u_fc.get_eventlist = jfs_dm_get_eventlist;
+ vecp[i].func_no = DM_FSYS_GET_FILEATTR;
+ vecp[i++].u_fc.get_fileattr = jfs_dm_get_fileattr;
+ vecp[i].func_no = DM_FSYS_GET_REGION;
+ vecp[i++].u_fc.get_region = jfs_dm_get_region;
+ vecp[i].func_no = DM_FSYS_GETALL_DMATTR;
+ vecp[i++].u_fc.getall_dmattr = jfs_dm_getall_dmattr;
+ vecp[i].func_no = DM_FSYS_GETALL_INHERIT;
+ vecp[i++].u_fc.getall_inherit = jfs_dm_getall_inherit;
+ vecp[i].func_no = DM_FSYS_INIT_ATTRLOC;
+ vecp[i++].u_fc.init_attrloc = jfs_dm_init_attrloc;
+ vecp[i].func_no = DM_FSYS_MKDIR_BY_HANDLE;
+ vecp[i++].u_fc.mkdir_by_handle = jfs_dm_mkdir_by_handle;
+ vecp[i].func_no = DM_FSYS_PROBE_HOLE;
+ vecp[i++].u_fc.probe_hole = jfs_dm_probe_hole;
+ vecp[i].func_no = DM_FSYS_PUNCH_HOLE;
+ vecp[i++].u_fc.punch_hole = jfs_dm_punch_hole;
+ vecp[i].func_no = DM_FSYS_READ_INVIS_RVP;
+ vecp[i++].u_fc.read_invis_rvp = jfs_dm_read_invis_rvp;
+ vecp[i].func_no = DM_FSYS_RELEASE_RIGHT;
+ vecp[i++].u_fc.release_right = jfs_dm_release_right;
+ vecp[i].func_no = DM_FSYS_REMOVE_DMATTR;
+ vecp[i++].u_fc.remove_dmattr = jfs_dm_remove_dmattr;
+ vecp[i].func_no = DM_FSYS_REQUEST_RIGHT;
+ vecp[i++].u_fc.request_right = jfs_dm_request_right;
+ vecp[i].func_no = DM_FSYS_SET_DMATTR;
+ vecp[i++].u_fc.set_dmattr = jfs_dm_set_dmattr;
+ vecp[i].func_no = DM_FSYS_SET_EVENTLIST;
+ vecp[i++].u_fc.set_eventlist = jfs_dm_set_eventlist;
+ vecp[i].func_no = DM_FSYS_SET_FILEATTR;
+ vecp[i++].u_fc.set_fileattr = jfs_dm_set_fileattr;
+ vecp[i].func_no = DM_FSYS_SET_INHERIT;
+ vecp[i++].u_fc.set_inherit = jfs_dm_set_inherit;
+ vecp[i].func_no = DM_FSYS_SET_REGION;
+ vecp[i++].u_fc.set_region = jfs_dm_set_region;
+ vecp[i].func_no = DM_FSYS_SYMLINK_BY_HANDLE;
+ vecp[i++].u_fc.symlink_by_handle = jfs_dm_symlink_by_handle;
+ vecp[i].func_no = DM_FSYS_SYNC_BY_HANDLE;
+ vecp[i++].u_fc.sync_by_handle = jfs_dm_sync_by_handle;
+ vecp[i].func_no = DM_FSYS_UPGRADE_RIGHT;
+ vecp[i++].u_fc.upgrade_right = jfs_dm_upgrade_right;
+ vecp[i].func_no = DM_FSYS_WRITE_INVIS_RVP;
+ vecp[i++].u_fc.write_invis_rvp = jfs_dm_write_invis_rvp;
+ vecp[i].func_no = DM_FSYS_OBJ_REF_HOLD;
+ vecp[i++].u_fc.obj_ref_hold = jfs_dm_obj_ref_hold;
+
+ return(0);
+}
+
+
+/* jfs_dm_mapevent - send events needed for memory mapping a file.
+ *
+ * DMAPI events are not being generated at a low enough level
+ * in the kernel for page reads/writes to generate the correct events.
+ * So for memory-mapped files we generate read or write events for the
+ * whole byte range being mapped. If the mmap call can never cause a
+ * write to the file, then only a read event is sent.
+ *
+ * Code elsewhere prevents adding managed regions to a file while it
+ * is still mapped.
+ */
+
+/* ARGSUSED */
+static int
+jfs_dm_mapevent(
+ struct inode *ip,
+ int flags,
+ loff_t offset,
+ dm_fcntl_mapevent_t *mapevp)
+{
+ loff_t filesize; /* event read/write "size" */
+ loff_t end_of_area, evsize;
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+
+ /* exit immediately if not regular file in a DMAPI file system */
+
+ mapevp->error = 0; /* assume success */
+
+ if ((!S_ISREG(ip->i_mode)) || !(sbi->flag & JFS_DMI))
+ return 0;
+
+ if (mapevp->max_event != DM_EVENT_WRITE &&
+ mapevp->max_event != DM_EVENT_READ)
+ return 0;
+
+ /* Set file size to work with. */
+
+ filesize = ip->i_size;
+
+ /* Set first byte number beyond the map area. */
+
+ if (mapevp->length) {
+ end_of_area = offset + mapevp->length;
+ if (end_of_area > filesize)
+ end_of_area = filesize;
+ } else {
+ end_of_area = filesize;
+ }
+
+ /* Set the real amount being mapped. */
+ evsize = end_of_area - offset;
+ if (evsize < 0)
+ evsize = 0;
+
+ /* If write possible, try a DMAPI write event */
+ if (mapevp->max_event == DM_EVENT_WRITE &&
+ DM_EVENT_ENABLED (ip, DM_EVENT_WRITE)) {
+ mapevp->error = JFS_SEND_DATA(DM_EVENT_WRITE, ip,
+ offset, evsize, 0, NULL);
+ return(0);
+ }
+
+ /* Try a read event if max_event was != DM_EVENT_WRITE or if it
+ * was DM_EVENT_WRITE but the WRITE event was not enabled.
+ */
+ if (DM_EVENT_ENABLED (ip, DM_EVENT_READ)) {
+ mapevp->error = JFS_SEND_DATA(DM_EVENT_READ, ip,
+ offset, evsize, 0, NULL);
+ }
+
+ return 0;
+}
+
+int
+jfs_dm_send_mmap_event(
+ struct vm_area_struct *vma,
+ unsigned int wantflag)
+{
+ struct inode *ip;
+ int ret = 0;
+ struct jfs_sb_info *sbi;
+
+ dm_fcntl_mapevent_t maprq;
+ dm_eventtype_t max_event = DM_EVENT_READ;
+
+ if (!vma->vm_file)
+ return 0;
+
+ ip = vma->vm_file->f_dentry->d_inode;
+ ASSERT(ip);
+
+ sbi = JFS_SBI(ip->i_sb);
+
+ if ((!S_ISREG(ip->i_mode)) || !(sbi->flag & JFS_DMI))
+ return 0;
+
+ /* If they specifically asked for 'read', then give it to them.
+ * Otherwise, see if it's possible to give them 'write'.
+ */
+ if( wantflag & VM_READ ){
+ max_event = DM_EVENT_READ;
+ }
+ else if( ! (vma->vm_flags & VM_DENYWRITE) ) {
+ if((wantflag & VM_WRITE) || (vma->vm_flags & VM_WRITE))
+ max_event = DM_EVENT_WRITE;
+ }
+
+ if( (wantflag & VM_WRITE) && (max_event != DM_EVENT_WRITE) ){
+ return -EACCES;
+ }
+
+ maprq.max_event = max_event;
+
+ /* Figure out how much of the file is being requested by the user. */
+ maprq.length = vma->vm_end - vma->vm_start; // XFS BUG #33
+
+ if(DM_EVENT_ENABLED(ip, max_event)){
+ jfs_dm_mapevent(ip, 0, vma->vm_pgoff << ip->i_blkbits, &maprq); // XFS BUG #33
+ ret = maprq.error;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static struct {
+ unsigned int cmd;
+ int reg;
+} ioctl32_cmds[] = {
+ { .cmd = JFS_DM_IOC_CLEAR_INHERIT },
+ { .cmd = JFS_DM_IOC_CREATE_BY_HANDLE },
+ { .cmd = JFS_DM_IOC_CREATE_SESSION },
+ { .cmd = JFS_DM_IOC_CREATE_USEREVENT },
+ { .cmd = JFS_DM_IOC_DESTROY_SESSION },
+ { .cmd = JFS_DM_IOC_DOWNGRADE_RIGHT },
+ { .cmd = JFS_DM_IOC_FD_TO_HANDLE },
+ { .cmd = JFS_DM_IOC_FIND_EVENTMSG },
+ { .cmd = JFS_DM_IOC_GET_ALLOCINFO },
+ { .cmd = JFS_DM_IOC_GET_BULKALL },
+ { .cmd = JFS_DM_IOC_GET_BULKATTR },
+ { .cmd = JFS_DM_IOC_GET_CONFIG },
+ { .cmd = JFS_DM_IOC_GET_CONFIG_EVENTS },
+ { .cmd = JFS_DM_IOC_GET_DIRATTRS },
+ { .cmd = JFS_DM_IOC_GET_DMATTR },
+ { .cmd = JFS_DM_IOC_GET_EVENTLIST },
+ { .cmd = JFS_DM_IOC_GET_EVENTS },
+ { .cmd = JFS_DM_IOC_GET_FILEATTR },
+ { .cmd = JFS_DM_IOC_GET_MOUNTINFO },
+ { .cmd = JFS_DM_IOC_GET_REGION },
+ { .cmd = JFS_DM_IOC_GETALL_DISP },
+ { .cmd = JFS_DM_IOC_GETALL_DMATTR },
+ { .cmd = JFS_DM_IOC_GETALL_INHERIT },
+ { .cmd = JFS_DM_IOC_GETALL_SESSIONS },
+ { .cmd = JFS_DM_IOC_GETALL_TOKENS },
+ { .cmd = JFS_DM_IOC_INIT_ATTRLOC },
+ { .cmd = JFS_DM_IOC_MKDIR_BY_HANDLE },
+ { .cmd = JFS_DM_IOC_MOVE_EVENT },
+ { .cmd = JFS_DM_IOC_OBJ_REF_HOLD },
+ { .cmd = JFS_DM_IOC_OBJ_REF_QUERY },
+ { .cmd = JFS_DM_IOC_OBJ_REF_RELE },
+ { .cmd = JFS_DM_IOC_PATH_TO_FSHANDLE },
+ { .cmd = JFS_DM_IOC_PATH_TO_HANDLE },
+ { .cmd = JFS_DM_IOC_PENDING },
+ { .cmd = JFS_DM_IOC_PROBE_HOLE },
+ { .cmd = JFS_DM_IOC_PUNCH_HOLE },
+ { .cmd = JFS_DM_IOC_QUERY_RIGHT },
+ { .cmd = JFS_DM_IOC_QUERY_SESSION },
+ { .cmd = JFS_DM_IOC_READ_INVIS },
+ { .cmd = JFS_DM_IOC_RELEASE_RIGHT },
+ { .cmd = JFS_DM_IOC_REMOVE_DMATTR },
+ { .cmd = JFS_DM_IOC_REQUEST_RIGHT },
+ { .cmd = JFS_DM_IOC_RESPOND_EVENT },
+ { .cmd = JFS_DM_IOC_SEND_MSG },
+ { .cmd = JFS_DM_IOC_SET_DISP },
+ { .cmd = JFS_DM_IOC_SET_DMATTR },
+ { .cmd = JFS_DM_IOC_SET_EVENTLIST },
+ { .cmd = JFS_DM_IOC_SET_FILEATTR },
+ { .cmd = JFS_DM_IOC_SET_INHERIT },
+ { .cmd = JFS_DM_IOC_SET_REGION },
+ { .cmd = JFS_DM_IOC_SET_RETURN_ON_DESTROY },
+ { .cmd = JFS_DM_IOC_SYMLINK_BY_HANDLE },
+ { .cmd = JFS_DM_IOC_SYNC_BY_HANDLE },
+ { .cmd = JFS_DM_IOC_UPGRADE_RIGHT },
+ { .cmd = JFS_DM_IOC_WRITE_INVIS },
+ { .cmd = JFS_DM_IOC_OPEN_BY_HANDLE },
+ { .cmd = JFS_DM_IOC_HANDLE_TO_PATH },
+};
+#endif
+
+void __init
+jfs_dm_init(void)
+{
+#ifdef CONFIG_COMPAT
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(ioctl32_cmds); i++) {
+ err = register_ioctl32_conversion(ioctl32_cmds[i].cmd, NULL);
+ if (err >= 0)
+ ioctl32_cmds[i].reg++;
+ else
+ printk(KERN_ERR "jfs_dm_init: unable to register ioctl %x, err = %d\n", ioctl32_cmds[i].cmd, err);
+ }
+#endif
+}
+
+void __exit
+jfs_dm_exit(void)
+{
+#ifdef CONFIG_COMPAT
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ioctl32_cmds); i++) {
+ if (ioctl32_cmds[i].reg) {
+ unregister_ioctl32_conversion(ioctl32_cmds[i].cmd);
+ ioctl32_cmds[i].reg--;
+ }
+ }
+#endif
+}
+
+/*
+ * jfs_iget - called by DMAPI to get inode from file handle
+ */
+int
+jfs_iget(
+ struct super_block *sbp,
+ struct inode **ipp,
+ fid_t *fidp)
+{
+ jfs_fid_t *jfid;
+ struct inode *ip;
+ u32 ino;
+ unsigned int igen;
+
+ jfid = (struct jfs_fid *)fidp;
+ if (jfid->fid_len == 0) {
+ ino = ROOT_I;
+ igen = 0;
+ } else if (jfid->fid_len == sizeof(*jfid) - sizeof(jfid->fid_len)) {
+ ino = jfid->fid_ino;
+ igen = jfid->fid_gen;
+ } else {
+ /*
+ * Invalid. Since handles can be created in user space
+ * and passed in via gethandle(), this is not cause for
+ * a panic.
+ */
+ return -EINVAL;
+ }
+
+ ip = iget(sbp, ino);
+ if (!ip) {
+ *ipp = NULL;
+ return -EIO;
+ }
+
+ if (is_bad_inode(ip)) {
+ iput(ip);
+ *ipp = NULL;
+ return -EIO;
+ }
+
+ if ((ip->i_mode & S_IFMT) == 0 ||
+ (igen && (ip->i_generation != igen)) ||
+ (ip->i_nlink == 0)) {
+ iput(ip);
+ *ipp = NULL;
+ return -ENOENT;
+ }
+
+ *ipp = ip;
+ return 0;
+}
+
+
+/*
+ * jfs_dm_read_pers_data - called by JFS to get DMAPI persistent data from
+ * extended data and copy into inode
+ */
+int
+jfs_dm_read_pers_data(
+ struct jfs_inode_info *jfs_ip)
+{
+#ifndef DM_SUPPORT_ONE_MANAGED_REGION
+ ssize_t size;
+
+ /* See if there are any managed regions */
+ size = __jfs_getxattr(&jfs_ip->vfs_inode, DMATTR_PERS_REGIONS,
+ NULL, 0);
+
+ /* Initialize any managed regions */
+ if (size > 0) {
+ ssize_t bytes_read;
+
+ if ((size % sizeof(dm_region_t)) != 0) {
+ jfs_ip->dmnumrgns = 0;
+ jfs_ip->dmrgns = NULL;
+ return -EINVAL;
+ }
+
+ jfs_ip->dmnumrgns = size / sizeof(dm_region_t);
+
+ jfs_ip->dmrgns = kmalloc(size, SLAB_KERNEL);
+ if (jfs_ip->dmrgns == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ jfs_ip->dmnumrgns = 0;
+ return -ENOMEM;
+ }
+
+ bytes_read = __jfs_getxattr(&jfs_ip->vfs_inode, DMATTR_PERS_REGIONS,
+ jfs_ip->dmrgns, size);
+
+ if (bytes_read != size) {
+ jfs_ip->dmnumrgns = 0;
+ kfree(jfs_ip->dmrgns);
+ jfs_ip->dmrgns = NULL;
+ return -ENOMEM;
+ } else {
+ int i;
+ for (i = 0; i < jfs_ip->dmnumrgns; i++) {
+ jfs_ip->dmattrs.da_dmevmask |= (jfs_ip->dmrgns[i].rg_flags << REGION_MASK_TO_EVENT_MASK);
+ }
+ }
+ } else {
+ jfs_ip->dmnumrgns = 0;
+ jfs_ip->dmrgns = NULL;
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * jfs_dm_write_pers_data - called by JFS to get DMAPI persistent data from
+ * inode and copy into extended data
+ */
+int
+jfs_dm_write_pers_data(
+ struct jfs_inode_info *jfs_ip)
+{
+ int error = 0;
+
+#ifndef DM_SUPPORT_ONE_MANAGED_REGION
+ /* Save or clear any managed regions */
+ if (jfs_ip->dmnumrgns) {
+ error = __jfs_setxattr(&jfs_ip->vfs_inode, DMATTR_PERS_REGIONS,
+ jfs_ip->dmrgns,
+ jfs_ip->dmnumrgns * sizeof(dm_region_t), 0);
+ } else {
+ error = __jfs_setxattr(&jfs_ip->vfs_inode, DMATTR_PERS_REGIONS,
+ 0, 0, XATTR_REPLACE);
+ }
+#endif
+
+ return error;
+}
+
+#if 0
+/* This strategy behind this doesn't work because the name of the JFS mount
+ * point is not yet in the namespace.
+ */
+struct vfsmount *
+jfs_find_vfsmount(struct dentry *d)
+{
+ struct dentry *root = dget(d->d_sb->s_root);
+ struct namespace *ns = current->namespace;
+ struct list_head *head;
+ struct vfsmount *mnt = NULL;
+
+ down_read(&ns->sem);
+ list_for_each(head, &ns->list) {
+ mnt = list_entry(head, struct vfsmount, mnt_list);
+ jfs_info("jfs_find_vfsmount: found %s\n", mnt->mnt_devname);
+ if (mnt->mnt_root == root) {
+ mntget(mnt);
+ break;
+ } else {
+ mnt = NULL;
+ }
+ }
+ up_read(&ns->sem);
+ dput(root);
+ return mnt;
+}
+#endif
+
+int
+jfs_dm_mount(struct super_block *sb)
+{
+ int rc = 0;
+ char *name = JFS_SBI(sb)->dm_mtpt;
+ char b[BDEVNAME_SIZE];
+ /* char *name = (char *)sb->s_root->d_name.name; */
+ /* struct vfsmount *mnt = jfs_find_vfsmount(sb->s_root); */
+
+ rc = dm_send_mount_event(sb, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, sb->s_root->d_inode,
+ DM_RIGHT_NULL, name, (char *)__bdevname(sb->s_dev, b));
+
+ /* Needed because s_root is set to null before preunmount/unmount */
+ if (!rc)
+ JFS_SBI(sb)->dm_root = sb->s_root->d_inode;
+
+ return rc;
+}
+
+int
+jfs_dm_preunmount(struct super_block *sb)
+{
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+ return JFS_SEND_NAMESP(DM_EVENT_PREUNMOUNT, sbi->dm_root,
+ DM_RIGHT_NULL, sbi->dm_root, DM_RIGHT_NULL,
+ NULL, NULL, 0, 0,
+ ((sbi->dm_evmask & (1<<DM_EVENT_PREUNMOUNT)) ?
+ 0 : DM_FLAGS_UNWANTED) |
+ ((sbi->mntflag & JFS_UNMOUNT_FORCE) ?
+ DM_UNMOUNT_FORCE : 0));
+}
+
+void
+jfs_dm_unmount(struct super_block *sb, int rc)
+{
+ struct jfs_sb_info *sbi = JFS_SBI(sb);
+
+ JFS_SEND_UNMOUNT(sb, rc == 0 ? sbi->dm_root : NULL,
+ DM_RIGHT_NULL, 0, rc,
+ ((sbi->dm_evmask & (1<<DM_EVENT_UNMOUNT)) ?
+ 0 : DM_FLAGS_UNWANTED) |
+ ((sbi->mntflag & JFS_UNMOUNT_FORCE) ?
+ DM_UNMOUNT_FORCE : 0));
+}
+
+void
+jfs_dm_umount_begin(struct super_block *sb)
+{
+ /* just remember that this is a forced unmount */
+ if (JFS_SBI(sb)->flag & JFS_DMI) {
+ JFS_SBI(sb)->mntflag |= JFS_UNMOUNT_FORCE;
+
+ jfs_dm_preunmount(sb);
+ }
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_jfs.h linux-jfs-dmapi/fs/jfs/dmapi/dmapi_jfs.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_jfs.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_jfs.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2000-2004
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "sv.h"
+#include "spin.h"
+#include "kmem.h"
+#include <asm/uaccess.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+
+#ifndef __DMAPI_JFS_H__
+#define __DMAPI_JFS_H__
+
+typedef struct {
+ __u32 val[2]; /* file system id type */
+} jfs_fsid_t;
+
+#ifndef MAXFIDSZ
+#define MAXFIDSZ 46
+typedef struct fid {
+ __u16 fid_len; /* length of data in bytes */
+ unsigned char fid_data[MAXFIDSZ]; /* data (variable length) */
+} fid_t;
+#endif
+
+typedef struct jfs_fid {
+ __u16 fid_len; /* length of remainder */
+ __u16 fid_pad; /* padding, must be zero */
+ __u32 fid_gen; /* generation number, dm_igen_t */
+ __u64 fid_ino; /* inode number, dm_ino_t */
+} jfs_fid_t;
+
+typedef struct jfs_handle {
+ union {
+ __s64 align; /* force alignment of ha_fid */
+ jfs_fsid_t _ha_fsid; /* unique file system identifier */
+ } ha_u;
+ jfs_fid_t ha_fid; /* file system specific file ID */
+} jfs_handle_t;
+#define ha_fsid ha_u._ha_fsid
+
+#define JFS_NAME "jfs"
+
+/* __psint_t is the same size as a pointer */
+#if (BITS_PER_LONG == 32)
+typedef __s32 __psint_t;
+typedef __u32 __psunsigned_t;
+#elif (BITS_PER_LONG == 64)
+typedef __s64 __psint_t;
+typedef __u64 __psunsigned_t;
+#else
+#error BITS_PER_LONG must be 32 or 64
+#endif
+
+#define JFS_HSIZE(handle) (((char *) &(handle).ha_fid.fid_pad \
+ - (char *) &(handle)) \
+ + (handle).ha_fid.fid_len)
+
+#define JFS_HANDLE_CMP(h1, h2) memcmp(h1, h2, sizeof(jfs_handle_t))
+
+#define FSHSIZE sizeof(fsid_t)
+
+#define FINVIS 0x0100 /* don't update timestamps */
+
+#define IP_IS_JFS(ip) (ip->i_sb->s_magic == 0x3153464a /*JFS_SUPER_MAGIC*/)
+
+typedef struct dm_attrs_s {
+ __u32 da_dmevmask; /* DMIG event mask */
+ __u16 da_dmstate; /* DMIG state info */
+ __u16 da_pad; /* DMIG extra padding */
+} dm_attrs_t;
+
+int jfs_iget(
+ struct super_block *sbp,
+ struct inode **ipp,
+ fid_t *fidp);
+
+int jfs_dm_mount(struct super_block *sp);
+int jfs_dm_preunmount(struct super_block *sp);
+void jfs_dm_unmount(struct super_block *sp, int rc);
+#endif
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_kern.h linux-jfs-dmapi/fs/jfs/dmapi/dmapi_kern.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_kern.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_kern.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,671 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __DMAPI_KERN_H__
+#define __DMAPI_KERN_H__
+
+
+union sys_dmapi_uarg {
+ void *p;
+ __u64 u;
+};
+typedef union sys_dmapi_uarg sys_dmapi_u;
+
+struct sys_dmapi_args {
+ sys_dmapi_u uarg1, uarg2, uarg3, uarg4, uarg5, uarg6, uarg7, uarg8,
+ uarg9, uarg10, uarg11;
+};
+typedef struct sys_dmapi_args sys_dmapi_args_t;
+
+#define DM_Uarg(uap,i) uap->uarg##i.u
+#define DM_Parg(uap,i) uap->uarg##i.p
+
+
+#ifdef __KERNEL__
+
+/* The first group of definitions and prototypes define the filesystem's
+ interface into the DMAPI code.
+*/
+
+
+/* Definitions used for the flags field on dm_send_data_event(),
+ dm_send_unmount_event(), and dm_send_namesp_event() calls.
+*/
+
+#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
+#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
+
+/* Possible code levels reported by dm_code_level(). */
+
+#define DM_CLVL_INIT 0 /* DMAPI prior to X/Open compliance */
+#define DM_CLVL_XOPEN 1 /* X/Open compliant DMAPI */
+
+
+/* Prototypes used outside of the DMI module/directory. */
+
+int dm_send_data_event(
+ dm_eventtype_t event,
+ struct inode *ip,
+ dm_right_t ip_right,
+ dm_off_t off,
+ size_t len,
+ int flags);
+
+int dm_send_destroy_event(
+ struct inode *ip,
+ dm_right_t ip_right);
+
+int dm_send_mount_event(
+ struct super_block *sbp,
+ dm_right_t sbp_right,
+ struct inode *ip,
+ dm_right_t ip_right,
+ struct inode *rootip,
+ dm_right_t rootip_right,
+ char *name1,
+ char *name2);
+
+int dm_send_namesp_event(
+ dm_eventtype_t event,
+ struct inode *ip1,
+ dm_right_t ip1_right,
+ struct inode *ip2,
+ dm_right_t ip2_right,
+ char *name1,
+ char *name2,
+ mode_t mode,
+ int retcode,
+ int flags);
+
+void dm_send_unmount_event(
+ struct super_block *sbp,
+ struct inode *ip,
+ dm_right_t sbp_right,
+ mode_t mode,
+ int retcode,
+ int flags);
+
+int dm_code_level(void);
+
+int dm_ip_to_handle (
+ struct inode *ip,
+ jfs_handle_t *handlep);
+
+/* The following prototypes and definitions are used by DMAPI as its
+ interface into the filesystem code. Communication between DMAPI and the
+ filesystem are established as follows:
+ 1. DMAPI uses the DMAPI_FSYS_VECTOR to ask for the addresses
+ of all the functions within the filesystem that it may need to call.
+ 2. The filesystem returns an array of function name/address pairs which
+ DMAPI builds into a function vector.
+ The DMAPI_FSYS_VECTOR call is only made one time for a particular
+ filesystem type. From then on, DMAPI uses its function vector to call the
+ filesystem functions directly. Functions in the array which DMAPI doesn't
+ recognize are ignored. A dummy function which returns ENOSYS is used for
+ any function that DMAPI needs but which was not provided by the filesystem.
+ If JFS doesn't recognize the DMAPI_FSYS_VECTOR, DMAPI assumes that it
+ doesn't have the X/Open support code; in this case DMAPI uses the JFS-code
+ originally bundled within DMAPI.
+
+ The goal of this interface is allow incremental changes to be made to
+ both the filesystem and to DMAPI while minimizing inter-patch dependencies,
+ and to eventually allow DMAPI to support multiple filesystem types at the
+ same time should that become necessary.
+*/
+
+typedef enum {
+ DM_FSYS_CLEAR_INHERIT = 0,
+ DM_FSYS_CREATE_BY_HANDLE = 1,
+ DM_FSYS_DOWNGRADE_RIGHT = 2,
+ DM_FSYS_GET_ALLOCINFO_RVP = 3,
+ DM_FSYS_GET_BULKALL_RVP = 4,
+ DM_FSYS_GET_BULKATTR_RVP = 5,
+ DM_FSYS_GET_CONFIG = 6,
+ DM_FSYS_GET_CONFIG_EVENTS = 7,
+ DM_FSYS_GET_DESTROY_DMATTR = 8,
+ DM_FSYS_GET_DIRATTRS_RVP = 9,
+ DM_FSYS_GET_DMATTR = 10,
+ DM_FSYS_GET_EVENTLIST = 11,
+ DM_FSYS_GET_FILEATTR = 12,
+ DM_FSYS_GET_REGION = 13,
+ DM_FSYS_GETALL_DMATTR = 14,
+ DM_FSYS_GETALL_INHERIT = 15,
+ DM_FSYS_INIT_ATTRLOC = 16,
+ DM_FSYS_MKDIR_BY_HANDLE = 17,
+ DM_FSYS_PROBE_HOLE = 18,
+ DM_FSYS_PUNCH_HOLE = 19,
+ DM_FSYS_READ_INVIS_RVP = 20,
+ DM_FSYS_RELEASE_RIGHT = 21,
+ DM_FSYS_REMOVE_DMATTR = 22,
+ DM_FSYS_REQUEST_RIGHT = 23,
+ DM_FSYS_SET_DMATTR = 24,
+ DM_FSYS_SET_EVENTLIST = 25,
+ DM_FSYS_SET_FILEATTR = 26,
+ DM_FSYS_SET_INHERIT = 27,
+ DM_FSYS_SET_REGION = 28,
+ DM_FSYS_SYMLINK_BY_HANDLE = 29,
+ DM_FSYS_SYNC_BY_HANDLE = 30,
+ DM_FSYS_UPGRADE_RIGHT = 31,
+ DM_FSYS_WRITE_INVIS_RVP = 32,
+ DM_FSYS_OBJ_REF_HOLD = 33,
+ DM_FSYS_MAX = 34
+} dm_fsys_switch_t;
+
+
+#define DM_FSYS_OBJ 0x1 /* object refers to a fsys handle */
+
+
+/*
+ * Prototypes for filesystem-specific functions.
+ */
+
+typedef int (*dm_fsys_clear_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep);
+
+typedef int (*dm_fsys_create_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname);
+
+typedef int (*dm_fsys_downgrade_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type); /* DM_FSYS_OBJ or zero */
+
+typedef int (*dm_fsys_get_allocinfo_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t *offp,
+ u_int nelem,
+ dm_extent_t *extentp,
+ u_int *nelemp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_bulkall_rvp_t)(
+ struct inode *ip, /* root inode */
+ dm_right_t right,
+ u_int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_bulkattr_rvp_t)(
+ struct inode *ip, /* root inode */
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_config_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_config_t flagname,
+ dm_size_t *retvalp);
+
+typedef int (*dm_fsys_get_config_events_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp);
+
+typedef int (*dm_fsys_get_destroy_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ char **valuepp,
+ int *vlenp);
+
+typedef int (*dm_fsys_get_dirattrs_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvalp);
+
+typedef int (*dm_fsys_get_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+typedef int (*dm_fsys_get_eventlist_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ u_int nelem,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int *nelemp); /* in kernel space! */
+
+typedef int (*dm_fsys_get_fileattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_stat_t *statp);
+
+typedef int (*dm_fsys_get_region_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ u_int *nelemp);
+
+typedef int (*dm_fsys_getall_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+typedef int (*dm_fsys_getall_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_inherit_t *inheritbufp,
+ u_int *nelemp);
+
+typedef int (*dm_fsys_init_attrloc_t)(
+ struct inode *ip, /* sometimes root inode */
+ dm_right_t right,
+ dm_attrloc_t *locp);
+
+typedef int (*dm_fsys_mkdir_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname);
+
+typedef int (*dm_fsys_probe_hole_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t *roffp,
+ dm_size_t *rlenp);
+
+typedef int (*dm_fsys_punch_hole_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len);
+
+typedef int (*dm_fsys_read_invis_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp);
+
+typedef int (*dm_fsys_release_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type);
+
+typedef int (*dm_fsys_remove_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ int setdtime,
+ dm_attrname_t *attrnamep);
+
+typedef int (*dm_fsys_request_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type, /* DM_FSYS_OBJ or zero */
+ u_int flags,
+ dm_right_t newright);
+
+typedef int (*dm_fsys_set_dmattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void *bufp);
+
+typedef int (*dm_fsys_set_eventlist_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type,
+ dm_eventset_t *eventsetp, /* in kernel space! */
+ u_int maxevent);
+
+typedef int (*dm_fsys_set_fileattr_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int mask,
+ dm_fileattr_t *attrp);
+
+typedef int (*dm_fsys_set_inherit_t)(
+ struct inode *ip,
+ dm_right_t right,
+ dm_attrname_t *attrnamep,
+ mode_t mode);
+
+typedef int (*dm_fsys_set_region_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int nelem,
+ dm_region_t *regbufp,
+ dm_boolean_t *exactflagp);
+
+typedef int (*dm_fsys_symlink_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right,
+ void *hanp,
+ size_t hlen,
+ char *cname,
+ char *path);
+
+typedef int (*dm_fsys_sync_by_handle_t)(
+ struct inode *ip,
+ dm_right_t right);
+
+typedef int (*dm_fsys_upgrade_right_t)(
+ struct inode *ip,
+ dm_right_t right,
+ u_int type); /* DM_FSYS_OBJ or zero */
+
+typedef int (*dm_fsys_write_invis_rvp_t)(
+ struct inode *ip,
+ dm_right_t right,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp);
+
+typedef void (*dm_fsys_obj_ref_hold_t)(
+ struct inode *ip);
+
+
+/* Structure definitions used by the DMAPI_FSYS_VECTOR call. */
+
+typedef struct {
+ dm_fsys_switch_t func_no; /* function number */
+ union {
+ dm_fsys_clear_inherit_t clear_inherit;
+ dm_fsys_create_by_handle_t create_by_handle;
+ dm_fsys_downgrade_right_t downgrade_right;
+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
+ dm_fsys_get_config_t get_config;
+ dm_fsys_get_config_events_t get_config_events;
+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
+ dm_fsys_get_dmattr_t get_dmattr;
+ dm_fsys_get_eventlist_t get_eventlist;
+ dm_fsys_get_fileattr_t get_fileattr;
+ dm_fsys_get_region_t get_region;
+ dm_fsys_getall_dmattr_t getall_dmattr;
+ dm_fsys_getall_inherit_t getall_inherit;
+ dm_fsys_init_attrloc_t init_attrloc;
+ dm_fsys_mkdir_by_handle_t mkdir_by_handle;
+ dm_fsys_probe_hole_t probe_hole;
+ dm_fsys_punch_hole_t punch_hole;
+ dm_fsys_read_invis_rvp_t read_invis_rvp;
+ dm_fsys_release_right_t release_right;
+ dm_fsys_remove_dmattr_t remove_dmattr;
+ dm_fsys_request_right_t request_right;
+ dm_fsys_set_dmattr_t set_dmattr;
+ dm_fsys_set_eventlist_t set_eventlist;
+ dm_fsys_set_fileattr_t set_fileattr;
+ dm_fsys_set_inherit_t set_inherit;
+ dm_fsys_set_region_t set_region;
+ dm_fsys_symlink_by_handle_t symlink_by_handle;
+ dm_fsys_sync_by_handle_t sync_by_handle;
+ dm_fsys_upgrade_right_t upgrade_right;
+ dm_fsys_write_invis_rvp_t write_invis_rvp;
+ dm_fsys_obj_ref_hold_t obj_ref_hold;
+ } u_fc;
+} fsys_function_vector_t;
+
+struct dm_fcntl_vector {
+ int code_level;
+ int count; /* Number of functions in the vector */
+ fsys_function_vector_t *vecp;
+};
+typedef struct dm_fcntl_vector dm_fcntl_vector_t;
+
+struct dm_fcntl_mapevent {
+ size_t length; /* length of transfer */
+ dm_eventtype_t max_event; /* Maximum (WRITE or READ) event */
+ int error; /* returned error code */
+};
+typedef struct dm_fcntl_mapevent dm_fcntl_mapevent_t;
+
+#endif /* __KERNEL__ */
+
+
+/* The following definitions are needed both by the kernel and by the
+ library routines.
+*/
+
+#define DM_MAX_HANDLE_SIZE 56 /* maximum size for a file handle */
+typedef char dm_handle_t[DM_MAX_HANDLE_SIZE];
+
+/*
+ * Opcodes for dmapi ioctl.
+ */
+
+#define DM_CLEAR_INHERIT 1
+#define DM_CREATE_BY_HANDLE 2
+#define DM_CREATE_SESSION 3
+#define DM_CREATE_USEREVENT 4
+#define DM_DESTROY_SESSION 5
+#define DM_DOWNGRADE_RIGHT 6
+#define DM_FD_TO_HANDLE 7
+#define DM_FIND_EVENTMSG 8
+#define DM_GET_ALLOCINFO 9
+#define DM_GET_BULKALL 10
+#define DM_GET_BULKATTR 11
+#define DM_GET_CONFIG 12
+#define DM_GET_CONFIG_EVENTS 13
+#define DM_GET_DIRATTRS 14
+#define DM_GET_DMATTR 15
+#define DM_GET_EVENTLIST 16
+#define DM_GET_EVENTS 17
+#define DM_GET_FILEATTR 18
+#define DM_GET_MOUNTINFO 19
+#define DM_GET_REGION 20
+#define DM_GETALL_DISP 21
+#define DM_GETALL_DMATTR 22
+#define DM_GETALL_INHERIT 23
+#define DM_GETALL_SESSIONS 24
+#define DM_GETALL_TOKENS 25
+#define DM_INIT_ATTRLOC 26
+#define DM_MKDIR_BY_HANDLE 27
+#define DM_MOVE_EVENT 28
+#define DM_OBJ_REF_HOLD 29
+#define DM_OBJ_REF_QUERY 30
+#define DM_OBJ_REF_RELE 31
+#define DM_PATH_TO_FSHANDLE 32
+#define DM_PATH_TO_HANDLE 33
+#define DM_PENDING 34
+#define DM_PROBE_HOLE 35
+#define DM_PUNCH_HOLE 36
+#define DM_QUERY_RIGHT 37
+#define DM_QUERY_SESSION 38
+#define DM_READ_INVIS 39
+#define DM_RELEASE_RIGHT 40
+#define DM_REMOVE_DMATTR 41
+#define DM_REQUEST_RIGHT 42
+#define DM_RESPOND_EVENT 43
+#define DM_SEND_MSG 44
+#define DM_SET_DISP 45
+#define DM_SET_DMATTR 46
+#define DM_SET_EVENTLIST 47
+#define DM_SET_FILEATTR 48
+#define DM_SET_INHERIT 49
+#define DM_SET_REGION 50
+#define DM_SET_RETURN_ON_DESTROY 51
+#define DM_SYMLINK_BY_HANDLE 52
+#define DM_SYNC_BY_HANDLE 53
+#define DM_UPGRADE_RIGHT 54
+#define DM_WRITE_INVIS 55
+#define DM_OPEN_BY_HANDLE 56
+#define DM_HANDLE_TO_PATH 57 // XFS BUG #12
+
+#define JFS_DM_IOCTL_TYPE 0xDB
+
+#define JFS_DM_IOC_CLEAR_INHERIT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_CLEAR_INHERIT, dm_attrname_t)
+#define JFS_DM_IOC_CREATE_BY_HANDLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_CREATE_BY_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_CREATE_SESSION \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_CREATE_SESSION, dm_sessid_t)
+#define JFS_DM_IOC_CREATE_USEREVENT \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_CREATE_USEREVENT, dm_token_t)
+#define JFS_DM_IOC_DESTROY_SESSION \
+ _IO(JFS_DM_IOCTL_TYPE, DM_DESTROY_SESSION)
+#define JFS_DM_IOC_DOWNGRADE_RIGHT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_DOWNGRADE_RIGHT, dm_handle_t)
+#define JFS_DM_IOC_FD_TO_HANDLE \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_FD_TO_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_FIND_EVENTMSG \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_FIND_EVENTMSG, dm_eventmsg_t)
+#define JFS_DM_IOC_GET_ALLOCINFO \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_ALLOCINFO, dm_extent_t)
+#define JFS_DM_IOC_GET_BULKALL \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_BULKALL, dm_stat_t)
+#define JFS_DM_IOC_GET_BULKATTR \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_BULKATTR, dm_stat_t)
+#define JFS_DM_IOC_GET_CONFIG \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_CONFIG, dm_size_t)
+#define JFS_DM_IOC_GET_CONFIG_EVENTS \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_CONFIG_EVENTS, dm_eventset_t)
+#define JFS_DM_IOC_GET_DIRATTRS \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_DIRATTRS, dm_stat_t)
+#define JFS_DM_IOC_GET_DMATTR \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_DMATTR, size_t)
+#define JFS_DM_IOC_GET_EVENTLIST \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_EVENTLIST, dm_eventset_t)
+#define JFS_DM_IOC_GET_EVENTS \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_GET_EVENTS, dm_eventmsg_t)
+#define JFS_DM_IOC_GET_FILEATTR \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_FILEATTR, dm_stat_t)
+#define JFS_DM_IOC_GET_MOUNTINFO \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_MOUNTINFO, dm_mount_event_t)
+#define JFS_DM_IOC_GET_REGION \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GET_REGION, dm_region_t)
+#define JFS_DM_IOC_GETALL_DISP \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_GETALL_DISP, dm_dispinfo_t)
+#define JFS_DM_IOC_GETALL_DMATTR \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GETALL_DMATTR, dm_attrlist_t)
+#define JFS_DM_IOC_GETALL_INHERIT \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_GETALL_INHERIT, dm_inherit_t)
+#define JFS_DM_IOC_GETALL_SESSIONS \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_GETALL_SESSIONS, dm_sessid_t)
+#define JFS_DM_IOC_GETALL_TOKENS \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_GETALL_TOKENS, dm_token_t)
+#define JFS_DM_IOC_INIT_ATTRLOC \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_INIT_ATTRLOC, dm_attrloc_t)
+#define JFS_DM_IOC_MKDIR_BY_HANDLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_MKDIR_BY_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_MOVE_EVENT \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_MOVE_EVENT, dm_token_t)
+#define JFS_DM_IOC_OBJ_REF_HOLD \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_OBJ_REF_HOLD, dm_handle_t)
+#define JFS_DM_IOC_OBJ_REF_QUERY \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_OBJ_REF_QUERY, dm_handle_t)
+#define JFS_DM_IOC_OBJ_REF_RELE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_OBJ_REF_RELE, dm_handle_t)
+#define JFS_DM_IOC_PATH_TO_FSHANDLE \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_PATH_TO_FSHANDLE, dm_handle_t)
+#define JFS_DM_IOC_PATH_TO_HANDLE \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_PATH_TO_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_PENDING \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_PENDING, dm_timestruct_t)
+#define JFS_DM_IOC_PROBE_HOLE \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_PROBE_HOLE, dm_off_t)
+#define JFS_DM_IOC_PUNCH_HOLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_PUNCH_HOLE, dm_handle_t)
+#define JFS_DM_IOC_QUERY_RIGHT \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_QUERY_RIGHT, dm_right_t)
+#define JFS_DM_IOC_QUERY_SESSION \
+ _IOR(JFS_DM_IOCTL_TYPE, DM_QUERY_SESSION, size_t)
+#define JFS_DM_IOC_READ_INVIS \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_READ_INVIS, dm_handle_t)
+#define JFS_DM_IOC_RELEASE_RIGHT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_RELEASE_RIGHT, dm_handle_t)
+#define JFS_DM_IOC_REMOVE_DMATTR \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_REMOVE_DMATTR, dm_attrname_t)
+#define JFS_DM_IOC_REQUEST_RIGHT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_REQUEST_RIGHT, dm_handle_t)
+#define JFS_DM_IOC_RESPOND_EVENT \
+ _IO(JFS_DM_IOCTL_TYPE, DM_RESPOND_EVENT)
+#define JFS_DM_IOC_SEND_MSG \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SEND_MSG, char)
+#define JFS_DM_IOC_SET_DISP \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_DISP, dm_eventset_t)
+#define JFS_DM_IOC_SET_DMATTR \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_DMATTR, dm_attrname_t)
+#define JFS_DM_IOC_SET_EVENTLIST \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_EVENTLIST, dm_eventset_t)
+#define JFS_DM_IOC_SET_FILEATTR \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_FILEATTR, dm_fileattr_t)
+#define JFS_DM_IOC_SET_INHERIT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_INHERIT, dm_attrname_t)
+#define JFS_DM_IOC_SET_REGION \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_SET_REGION, dm_boolean_t)
+#define JFS_DM_IOC_SET_RETURN_ON_DESTROY \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SET_RETURN_ON_DESTROY, dm_attrname_t)
+#define JFS_DM_IOC_SYMLINK_BY_HANDLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SYMLINK_BY_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_SYNC_BY_HANDLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_SYNC_BY_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_UPGRADE_RIGHT \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_UPGRADE_RIGHT, dm_handle_t)
+#define JFS_DM_IOC_WRITE_INVIS \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_WRITE_INVIS, dm_handle_t)
+#define JFS_DM_IOC_OPEN_BY_HANDLE \
+ _IOW(JFS_DM_IOCTL_TYPE, DM_OPEN_BY_HANDLE, dm_handle_t)
+#define JFS_DM_IOC_HANDLE_TO_PATH \
+ _IOWR(JFS_DM_IOCTL_TYPE, DM_HANDLE_TO_PATH, size_t)
+#endif /* __DMAPI_KERN_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_mountinfo.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_mountinfo.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_mountinfo.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_mountinfo.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+/* XXX */
+#define vfsmax 1
+
+typedef struct {
+ int support_type;
+ char name[16];
+ dm_fsys_vector_t *vptr;
+} dm_vector_map_t;
+
+/* Values for the support_type field. */
+
+#define DM_SUPPORT_UNKNOWN 0
+#define DM_SUPPORT_AVAIL 1
+
+
+dm_vector_map_t *dm_fsys_map = NULL;
+
+int jfs_dm_get_fsys_vector(struct inode *, caddr_t);
+
+
+int
+dm_code_level(void)
+{
+ return(DM_CLVL_XOPEN); /* initial X/Open compliant release */
+}
+
+
+/* Dummy routine which is stored in each function vector slot for which the
+ filesystem provides no function of its own. If an application calls the
+ function, he will just get ENOSYS.
+*/
+
+static int
+dm_enosys(void)
+{
+ return(-ENOSYS); /* function not supported by filesystem */
+}
+
+
+/* dm_query_fsys_for_vector() asks a filesystem for its list of supported
+ DMAPI functions, and builds a dm_vector_map_t structure based upon the
+ reply. We ignore functions supported by the filesystem which we do not
+ know about, and we substitute the subroutine 'dm_enosys' for each function
+ we know about but the filesystem does not support.
+*/
+
+static void
+dm_query_fsys_for_vector(
+ struct inode *ip)
+{
+ fsys_function_vector_t *vecp;
+ dm_fcntl_vector_t vecrq;
+ dm_fsys_vector_t *vptr;
+ dm_vector_map_t *map;
+ int fstype;
+ int error;
+ int i;
+
+ /* XXX fstype = vfsp->vfs_fstype */
+ fstype = 0;
+ map = &dm_fsys_map[fstype];
+
+ /* Clear out any information left from a previous filesystem that was
+ in this slot and initialize it for the new filesystem.
+ */
+
+ if (map->vptr) {
+ kfree(map->vptr);
+ map->vptr = NULL;
+ }
+
+ /* XXX strcpy(map->name, vfssw[fstype].vsw_name); */
+ strcpy(map->name, JFS_NAME);
+
+ map->support_type = DM_SUPPORT_AVAIL;
+
+ /* Next allocate a function vector and initialize all fields with a
+ dummy function that returns ENOSYS.
+ */
+
+ vptr = map->vptr = kmalloc(sizeof(*map->vptr), SLAB_KERNEL);
+ if (vptr == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return;
+ }
+
+ strncpy(vptr->fsys_name, map->name, sizeof(vptr->fsys_name));
+ vptr->code_level = 0;
+ vptr->clear_inherit = (dm_fsys_clear_inherit_t)dm_enosys;
+ vptr->create_by_handle = (dm_fsys_create_by_handle_t)dm_enosys;
+ vptr->downgrade_right = (dm_fsys_downgrade_right_t)dm_enosys;
+ vptr->get_allocinfo_rvp = (dm_fsys_get_allocinfo_rvp_t)dm_enosys;
+ vptr->get_bulkall_rvp = (dm_fsys_get_bulkall_rvp_t)dm_enosys;
+ vptr->get_bulkattr_rvp = (dm_fsys_get_bulkattr_rvp_t)dm_enosys;
+ vptr->get_config = (dm_fsys_get_config_t)dm_enosys;
+ vptr->get_config_events = (dm_fsys_get_config_events_t)dm_enosys;
+ vptr->get_destroy_dmattr = (dm_fsys_get_destroy_dmattr_t)dm_enosys;
+ vptr->get_dirattrs_rvp = (dm_fsys_get_dirattrs_rvp_t)dm_enosys;
+ vptr->get_dmattr = (dm_fsys_get_dmattr_t)dm_enosys;
+ vptr->get_eventlist = (dm_fsys_get_eventlist_t)dm_enosys;
+ vptr->get_fileattr = (dm_fsys_get_fileattr_t)dm_enosys;
+ vptr->get_region = (dm_fsys_get_region_t)dm_enosys;
+ vptr->getall_dmattr = (dm_fsys_getall_dmattr_t)dm_enosys;
+ vptr->getall_inherit = (dm_fsys_getall_inherit_t)dm_enosys;
+ vptr->init_attrloc = (dm_fsys_init_attrloc_t)dm_enosys;
+ vptr->mkdir_by_handle = (dm_fsys_mkdir_by_handle_t)dm_enosys;
+ vptr->probe_hole = (dm_fsys_probe_hole_t)dm_enosys;
+ vptr->punch_hole = (dm_fsys_punch_hole_t)dm_enosys;
+ vptr->read_invis_rvp = (dm_fsys_read_invis_rvp_t)dm_enosys;
+ vptr->release_right = (dm_fsys_release_right_t)dm_enosys;
+ vptr->request_right = (dm_fsys_request_right_t)dm_enosys;
+ vptr->remove_dmattr = (dm_fsys_remove_dmattr_t)dm_enosys;
+ vptr->set_dmattr = (dm_fsys_set_dmattr_t)dm_enosys;
+ vptr->set_eventlist = (dm_fsys_set_eventlist_t)dm_enosys;
+ vptr->set_fileattr = (dm_fsys_set_fileattr_t)dm_enosys;
+ vptr->set_inherit = (dm_fsys_set_inherit_t)dm_enosys;
+ vptr->set_region = (dm_fsys_set_region_t)dm_enosys;
+ vptr->symlink_by_handle = (dm_fsys_symlink_by_handle_t)dm_enosys;
+ vptr->sync_by_handle = (dm_fsys_sync_by_handle_t)dm_enosys;
+ vptr->upgrade_right = (dm_fsys_upgrade_right_t)dm_enosys;
+ vptr->write_invis_rvp = (dm_fsys_write_invis_rvp_t)dm_enosys;
+ vptr->obj_ref_hold = (dm_fsys_obj_ref_hold_t)dm_enosys;
+
+ /* Issue a call to the filesystem in order to obtain
+ its vector of filesystem-specific DMAPI routines.
+ */
+
+ vecrq.count = 0;
+ vecrq.vecp = NULL;
+
+ error = jfs_dm_get_fsys_vector(ip, (caddr_t)&vecrq);
+
+ /* If we still have an error at this point, then the filesystem simply
+ does not support DMAPI, so we give up with all functions set to
+ ENOSYS.
+ */
+
+ if (error || vecrq.count == 0)
+ return;
+
+ /* The request succeeded and we were given a vector which we need to
+ map to our current level. Overlay the dummy function with every
+ filesystem function we understand.
+ */
+
+ vptr->code_level = vecrq.code_level;
+ vecp = vecrq.vecp;
+ for (i = 0; i < vecrq.count; i++) {
+ switch (vecp[i].func_no) {
+ case DM_FSYS_CLEAR_INHERIT:
+ vptr->clear_inherit = vecp[i].u_fc.clear_inherit;
+ break;
+ case DM_FSYS_CREATE_BY_HANDLE:
+ vptr->create_by_handle = vecp[i].u_fc.create_by_handle;
+ break;
+ case DM_FSYS_DOWNGRADE_RIGHT:
+ vptr->downgrade_right = vecp[i].u_fc.downgrade_right;
+ break;
+ case DM_FSYS_GET_ALLOCINFO_RVP:
+ vptr->get_allocinfo_rvp = vecp[i].u_fc.get_allocinfo_rvp;
+ break;
+ case DM_FSYS_GET_BULKALL_RVP:
+ vptr->get_bulkall_rvp = vecp[i].u_fc.get_bulkall_rvp;
+ break;
+ case DM_FSYS_GET_BULKATTR_RVP:
+ vptr->get_bulkattr_rvp = vecp[i].u_fc.get_bulkattr_rvp;
+ break;
+ case DM_FSYS_GET_CONFIG:
+ vptr->get_config = vecp[i].u_fc.get_config;
+ break;
+ case DM_FSYS_GET_CONFIG_EVENTS:
+ vptr->get_config_events = vecp[i].u_fc.get_config_events;
+ break;
+ case DM_FSYS_GET_DESTROY_DMATTR:
+ vptr->get_destroy_dmattr = vecp[i].u_fc.get_destroy_dmattr;
+ break;
+ case DM_FSYS_GET_DIRATTRS_RVP:
+ vptr->get_dirattrs_rvp = vecp[i].u_fc.get_dirattrs_rvp;
+ break;
+ case DM_FSYS_GET_DMATTR:
+ vptr->get_dmattr = vecp[i].u_fc.get_dmattr;
+ break;
+ case DM_FSYS_GET_EVENTLIST:
+ vptr->get_eventlist = vecp[i].u_fc.get_eventlist;
+ break;
+ case DM_FSYS_GET_FILEATTR:
+ vptr->get_fileattr = vecp[i].u_fc.get_fileattr;
+ break;
+ case DM_FSYS_GET_REGION:
+ vptr->get_region = vecp[i].u_fc.get_region;
+ break;
+ case DM_FSYS_GETALL_DMATTR:
+ vptr->getall_dmattr = vecp[i].u_fc.getall_dmattr;
+ break;
+ case DM_FSYS_GETALL_INHERIT:
+ vptr->getall_inherit = vecp[i].u_fc.getall_inherit;
+ break;
+ case DM_FSYS_INIT_ATTRLOC:
+ vptr->init_attrloc = vecp[i].u_fc.init_attrloc;
+ break;
+ case DM_FSYS_MKDIR_BY_HANDLE:
+ vptr->mkdir_by_handle = vecp[i].u_fc.mkdir_by_handle;
+ break;
+ case DM_FSYS_PROBE_HOLE:
+ vptr->probe_hole = vecp[i].u_fc.probe_hole;
+ break;
+ case DM_FSYS_PUNCH_HOLE:
+ vptr->punch_hole = vecp[i].u_fc.punch_hole;
+ break;
+ case DM_FSYS_READ_INVIS_RVP:
+ vptr->read_invis_rvp = vecp[i].u_fc.read_invis_rvp;
+ break;
+ case DM_FSYS_RELEASE_RIGHT:
+ vptr->release_right = vecp[i].u_fc.release_right;
+ break;
+ case DM_FSYS_REMOVE_DMATTR:
+ vptr->remove_dmattr = vecp[i].u_fc.remove_dmattr;
+ break;
+ case DM_FSYS_REQUEST_RIGHT:
+ vptr->request_right = vecp[i].u_fc.request_right;
+ break;
+ case DM_FSYS_SET_DMATTR:
+ vptr->set_dmattr = vecp[i].u_fc.set_dmattr;
+ break;
+ case DM_FSYS_SET_EVENTLIST:
+ vptr->set_eventlist = vecp[i].u_fc.set_eventlist;
+ break;
+ case DM_FSYS_SET_FILEATTR:
+ vptr->set_fileattr = vecp[i].u_fc.set_fileattr;
+ break;
+ case DM_FSYS_SET_INHERIT:
+ vptr->set_inherit = vecp[i].u_fc.set_inherit;
+ break;
+ case DM_FSYS_SET_REGION:
+ vptr->set_region = vecp[i].u_fc.set_region;
+ break;
+ case DM_FSYS_SYMLINK_BY_HANDLE:
+ vptr->symlink_by_handle = vecp[i].u_fc.symlink_by_handle;
+ break;
+ case DM_FSYS_SYNC_BY_HANDLE:
+ vptr->sync_by_handle = vecp[i].u_fc.sync_by_handle;
+ break;
+ case DM_FSYS_UPGRADE_RIGHT:
+ vptr->upgrade_right = vecp[i].u_fc.upgrade_right;
+ break;
+ case DM_FSYS_WRITE_INVIS_RVP:
+ vptr->write_invis_rvp = vecp[i].u_fc.write_invis_rvp;
+ break;
+ case DM_FSYS_OBJ_REF_HOLD:
+ vptr->obj_ref_hold = vecp[i].u_fc.obj_ref_hold;
+ break;
+ default: /* ignore ones we don't understand */
+ break;
+ }
+ }
+}
+
+
+/* Given a behavior pointer, dm_fsys_vector() returns a pointer to the DMAPI
+ function vector to be used for the corresponding vnode. There is one possible
+ function vector for each filesystem type, although currently JFS is the
+ only filesystem that actually supports DMAPI with inodes (XFS supports
+ DMAPI using vnodes).
+*/
+
+dm_fsys_vector_t *
+dm_fsys_vector(
+ struct inode *ip)
+{
+ dm_vector_map_t *map;
+ int fstype;
+
+ /* XXX fstype = vp->v_vfsp->vfs_fstype */
+ fstype = 0;
+
+ /* If this is the first call, initialize the filesystem function
+ vector map.
+ */
+
+ if (dm_fsys_map == NULL) {
+ int size = vfsmax * sizeof(*dm_fsys_map);
+
+ dm_fsys_map = (dm_vector_map_t *)kmalloc(size, GFP_KERNEL);
+ if (dm_fsys_map == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+ memset(dm_fsys_map, 0, size);
+ }
+ map = &dm_fsys_map[fstype];
+
+ /* If a new filesystem has been dynamically loaded into a slot
+ previously held by another filesystem, then treat it as a
+ DM_SUPPORT_UNKNOWN.
+ */
+
+ /* XXX if (strcmp(map->name, vfssw[fstype].vsw_name)) */
+ if (strcmp(map->name, JFS_NAME))
+ map->support_type = DM_SUPPORT_UNKNOWN;
+
+ /* If we don't yet know what the filesystem supports, ask it. */
+
+ if (map->support_type == DM_SUPPORT_UNKNOWN)
+ dm_query_fsys_for_vector(ip);
+
+ /* Now return the function vector. */
+
+ return(map->vptr);
+}
+
+
+void
+dm_fsys_vector_free(void)
+{
+ dm_vector_map_t *map;
+ int i;
+
+ if (dm_fsys_map) {
+ for (i = 0; i < vfsmax; i++){
+ map = &dm_fsys_map[i];
+ if (map->vptr)
+ kfree(map->vptr);
+ }
+ kfree(dm_fsys_map);
+ }
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_private.h linux-jfs-dmapi/fs/jfs/dmapi/dmapi_private.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_private.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_private.h 2004-05-28 13:42:42.000000000 -0500
@@ -0,0 +1,707 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef _DMAPI_PRIVATE_H
+#define _DMAPI_PRIVATE_H
+
+#include <linux/statfs.h>
+#include "jfsdmapi.h"
+#include "dmapi_jfs.h"
+#include "dmapi_kern.h"
+
+#ifdef CONFIG_PROC_FS
+#define DMAPI_PROCFS "fs/jfs_dmapi_v1" /* DMAPI device in /proc. */
+#define DMAPI_DBG_PROCFS "fs/jfs_dmapi_d" /* DMAPI debugging dir */
+#endif
+
+#define DMATTR_PREFIXLEN 9
+#define DMATTR_PREFIXSTRING "user.dmi."
+
+#define DMATTR_PERS_REGIONS "system.dmi.persistent.regions"
+
+extern struct kmem_cache_s *dm_fsreg_cachep;
+extern struct kmem_cache_s *dm_tokdata_cachep;
+extern struct kmem_cache_s *dm_session_cachep;
+
+
+
+typedef struct dm_tokdata {
+ struct dm_tokdata *td_next;
+ struct dm_tokevent *td_tevp; /* pointer to owning tevp */
+ int td_app_ref; /* # app threads currently active */
+ dm_right_t td_orig_right; /* original right held when created */
+ dm_right_t td_right; /* current right held for this handle */
+ short td_flags;
+ short td_type; /* object type */
+ int td_icount; /* # of current application iget */
+ struct inode *td_ip; /* inode pointer */
+ jfs_handle_t td_handle; /* handle for ip or sbp */
+} dm_tokdata_t;
+
+/* values for td_type */
+
+#define DM_TDT_NONE 0x00 /* td_handle is empty */
+#define DM_TDT_FS 0x01 /* td_handle points to a file system */
+#define DM_TDT_REG 0x02 /* td_handle points to a file */
+#define DM_TDT_DIR 0x04 /* td_handle points to a directory */
+#define DM_TDT_LNK 0x08 /* td_handle points to a symlink */
+#define DM_TDT_OTH 0x10 /* some other object eg. pipe, socket */
+
+#define DM_TDT_INO (DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
+#define DM_TDT_ANY (DM_TDT_FS|DM_TDT_REG|DM_TDT_DIR|DM_TDT_LNK|DM_TDT_OTH)
+
+/* values for td_flags */
+
+#define DM_TDF_ORIG 0x0001 /* part of the original event */
+#define DM_TDF_EVTREF 0x0002 /* event thread holds inode reference */
+#define DM_TDF_STHREAD 0x0004 /* only one app can use this handle */
+#define DM_TDF_RIGHT 0x0008 /* icount bumped for dm_request_right */
+#define DM_TDF_HOLD 0x0010 /* icount bumped for dm_obj_ref_hold */
+
+
+/* Because some events contain __u64 fields, we force te_msg and te_event
+ to always be 8-byte aligned. In order to send more than one message in
+ a single dm_get_events() call, we also ensure that each message is an
+ 8-byte multiple.
+*/
+
+typedef struct dm_tokevent {
+ struct dm_tokevent *te_next;
+ struct dm_tokevent *te_hashnext; /* hash chain */
+ lock_t te_lock; /* lock for all fields but te_*next.
+ * te_next and te_hashnext are
+ * protected by the session lock.
+ */
+ short te_flags;
+ short te_allocsize; /* alloc'ed size of this structure */
+ sv_t te_evt_queue; /* queue waiting for dm_respond_event */
+ sv_t te_app_queue; /* queue waiting for handle access */
+ int te_evt_ref; /* number of event procs using token */
+ int te_app_ref; /* number of app procs using token */
+ int te_app_slp; /* number of app procs sleeping */
+ int te_reply; /* return errno for sync messages */
+ dm_tokdata_t *te_tdp; /* list of handle/right pairs */
+ union {
+ __u64 align; /* force alignment of te_msg */
+ dm_eventmsg_t te_msg; /* user visible part */
+ } te_u;
+ __u64 te_event; /* start of dm_xxx_event_t message */
+} dm_tokevent_t;
+
+#define te_msg te_u.te_msg
+
+/* values for te_flags */
+
+#define DM_TEF_LOCKED 0x0001 /* event "locked" by dm_get_events() */
+#define DM_TEF_INTERMED 0x0002 /* a dm_pending reply was received */
+#define DM_TEF_FINAL 0x0004 /* dm_respond_event has been received */
+#ifdef DM_USE_SHASH
+#define DM_TEF_HASHED 0x0010 /* event is on hash chain */
+#endif
+
+
+#ifdef DM_USE_SHASH
+#ifdef DEBUG
+#define DM_SHASH_DEBUG
+#endif
+
+typedef struct dm_sesshash {
+ dm_tokevent_t *h_next; /* ptr to chain of tokevents */
+#ifdef DM_SHASH_DEBUG
+ int maxlength;
+ int curlength;
+ int num_adds;
+ int num_dels;
+ int dup_hits;
+#endif
+} dm_sesshash_t;
+#endif
+
+
+typedef struct dm_eventq {
+ dm_tokevent_t *eq_head;
+ dm_tokevent_t *eq_tail;
+ int eq_count; /* size of queue */
+} dm_eventq_t;
+
+
+typedef struct dm_session {
+ struct dm_session *sn_next; /* sessions linkage */
+ dm_sessid_t sn_sessid; /* user-visible session number */
+ u_int sn_flags;
+ lock_t sn_qlock; /* lock for newq/delq related fields */
+ sv_t sn_readerq; /* waiting for message on sn_newq */
+ sv_t sn_writerq; /* waiting for room on sn_newq */
+ u_int sn_readercnt; /* count of waiting readers */
+ u_int sn_writercnt; /* count of waiting readers */
+ dm_eventq_t sn_newq; /* undelivered event queue */
+ dm_eventq_t sn_delq; /* delivered event queue */
+ dm_eventq_t sn_evt_writerq; /* events of thrds in sn_writerq */
+#ifdef DM_USE_SHASH
+ dm_sesshash_t *sn_sesshash; /* buckets for tokevent hash chains */
+#ifdef DM_SHASH_DEBUG
+ int sn_buckets_in_use;
+ int sn_max_buckets_in_use;
+#endif
+#endif
+ char sn_info[DM_SESSION_INFO_LEN]; /* user-supplied info */
+} dm_session_t;
+
+/* values for sn_flags */
+
+#define DM_SN_WANTMOUNT 0x0001 /* session wants to get mount events */
+
+
+typedef enum {
+ DM_STATE_MOUNTING,
+ DM_STATE_MOUNTED,
+ DM_STATE_UNMOUNTING,
+ DM_STATE_UNMOUNTED
+} dm_fsstate_t;
+
+
+typedef struct dm_fsreg {
+ struct dm_fsreg *fr_next;
+ struct super_block *fr_sb; /* filesystem pointer */
+ dm_tokevent_t *fr_tevp;
+ fsid_t fr_fsid; /* filesystem ID */
+ void *fr_msg; /* dm_mount_event_t for filesystem */
+ int fr_msgsize; /* size of dm_mount_event_t */
+ dm_fsstate_t fr_state;
+ sv_t fr_dispq;
+ int fr_dispcnt;
+ dm_eventq_t fr_evt_dispq; /* events of thrds in fr_dispq */
+ sv_t fr_queue; /* queue for hdlcnt/vfscnt/unmount */
+ lock_t fr_lock;
+ int fr_hdlcnt; /* threads blocked during unmount */
+ int fr_vfscnt; /* threads in VFS_VGET or VFS_ROOT */
+ int fr_unmount; /* if non-zero, umount is sleeping */
+ dm_attrname_t fr_rattr; /* dm_set_return_on_destroy attribute */
+ dm_session_t *fr_sessp [DM_EVENT_MAX];
+} dm_fsreg_t;
+
+
+
+
+/* events valid in dm_set_disp() when called with a filesystem handle. */
+
+#define DM_VALID_DISP_EVENTS ( \
+ (1 << DM_EVENT_PREUNMOUNT) | \
+ (1 << DM_EVENT_UNMOUNT) | \
+ (1 << DM_EVENT_NOSPACE) | \
+ (1 << DM_EVENT_CREATE) | \
+ (1 << DM_EVENT_POSTCREATE) | \
+ (1 << DM_EVENT_REMOVE) | \
+ (1 << DM_EVENT_POSTREMOVE) | \
+ (1 << DM_EVENT_RENAME) | \
+ (1 << DM_EVENT_POSTRENAME) | \
+ (1 << DM_EVENT_LINK) | \
+ (1 << DM_EVENT_POSTLINK) | \
+ (1 << DM_EVENT_SYMLINK) | \
+ (1 << DM_EVENT_POSTSYMLINK) | \
+ (1 << DM_EVENT_READ) | \
+ (1 << DM_EVENT_WRITE) | \
+ (1 << DM_EVENT_TRUNCATE) | \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_CLOSE) | \
+ (1 << DM_EVENT_DESTROY) )
+
+#define DM_JFS_VALID_FS_EVENTS ( \
+ (1 << DM_EVENT_PREUNMOUNT) | \
+ (1 << DM_EVENT_UNMOUNT) | \
+ (1 << DM_EVENT_NOSPACE) | \
+ (1 << DM_EVENT_CREATE) | \
+ (1 << DM_EVENT_POSTCREATE) | \
+ (1 << DM_EVENT_REMOVE) | \
+ (1 << DM_EVENT_POSTREMOVE) | \
+ (1 << DM_EVENT_RENAME) | \
+ (1 << DM_EVENT_POSTRENAME) | \
+ (1 << DM_EVENT_LINK) | \
+ (1 << DM_EVENT_POSTLINK) | \
+ (1 << DM_EVENT_SYMLINK) | \
+ (1 << DM_EVENT_POSTSYMLINK) | \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_CLOSE) | \
+ (1 << DM_EVENT_DESTROY) )
+
+/* Events valid in dm_set_eventlist() when called with a file handle for
+ a regular file or a symlink. These events are persistent.
+*/
+
+#define DM_JFS_VALID_FILE_EVENTS ( \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_CLOSE) | \
+ (1 << DM_EVENT_DESTROY) )
+
+/* Events valid in dm_set_region(). These events are persistent.
+*/
+
+#define DM_JFS_VALID_REGION_EVENTS ( \
+ (1 << DM_EVENT_READ) | \
+ (1 << DM_EVENT_WRITE) | \
+ (1 << DM_EVENT_TRUNCATE) )
+
+/* Events valid in dm_set_eventlist() when called with a file handle for
+ a directory. These events are persistent.
+*/
+
+#define DM_JFS_VALID_DIRECTORY_EVENTS ( \
+ (1 << DM_EVENT_CREATE) | \
+ (1 << DM_EVENT_POSTCREATE) | \
+ (1 << DM_EVENT_REMOVE) | \
+ (1 << DM_EVENT_POSTREMOVE) | \
+ (1 << DM_EVENT_RENAME) | \
+ (1 << DM_EVENT_POSTRENAME) | \
+ (1 << DM_EVENT_LINK) | \
+ (1 << DM_EVENT_POSTLINK) | \
+ (1 << DM_EVENT_SYMLINK) | \
+ (1 << DM_EVENT_POSTSYMLINK) | \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_CLOSE) | \
+ (1 << DM_EVENT_DESTROY) )
+
+/* Events supported by the JFS filesystem. */
+#define DM_JFS_SUPPORTED_EVENTS ( \
+ (1 << DM_EVENT_MOUNT) | \
+ (1 << DM_EVENT_PREUNMOUNT) | \
+ (1 << DM_EVENT_UNMOUNT) | \
+ (1 << DM_EVENT_NOSPACE) | \
+ (1 << DM_EVENT_CREATE) | \
+ (1 << DM_EVENT_POSTCREATE) | \
+ (1 << DM_EVENT_REMOVE) | \
+ (1 << DM_EVENT_POSTREMOVE) | \
+ (1 << DM_EVENT_RENAME) | \
+ (1 << DM_EVENT_POSTRENAME) | \
+ (1 << DM_EVENT_LINK) | \
+ (1 << DM_EVENT_POSTLINK) | \
+ (1 << DM_EVENT_SYMLINK) | \
+ (1 << DM_EVENT_POSTSYMLINK) | \
+ (1 << DM_EVENT_READ) | \
+ (1 << DM_EVENT_WRITE) | \
+ (1 << DM_EVENT_TRUNCATE) | \
+ (1 << DM_EVENT_ATTRIBUTE) | \
+ (1 << DM_EVENT_CLOSE) | \
+ (1 << DM_EVENT_DESTROY) | \
+ (1 << DM_EVENT_USER) )
+
+
+/* isolate the read/write/trunc events of a dm_tokevent_t */
+
+#define DM_EVENT_RDWRTRUNC(tevp) ( \
+ ((tevp)->te_msg.ev_type == DM_EVENT_READ) || \
+ ((tevp)->te_msg.ev_type == DM_EVENT_WRITE) || \
+ ((tevp)->te_msg.ev_type == DM_EVENT_TRUNCATE) )
+
+
+/*
+ * Global handle hack isolation.
+ */
+
+#define DM_GLOBALHAN(hanp, hlen) (((hanp) == DM_GLOBAL_HANP) && \
+ ((hlen) == DM_GLOBAL_HLEN))
+
+
+#define DM_MAX_MSG_DATA 3960
+
+
+
+/* Supported filesystem function vector functions. */
+
+
+typedef struct {
+ int code_level;
+ char fsys_name[16];
+ dm_fsys_clear_inherit_t clear_inherit;
+ dm_fsys_create_by_handle_t create_by_handle;
+ dm_fsys_downgrade_right_t downgrade_right;
+ dm_fsys_get_allocinfo_rvp_t get_allocinfo_rvp;
+ dm_fsys_get_bulkall_rvp_t get_bulkall_rvp;
+ dm_fsys_get_bulkattr_rvp_t get_bulkattr_rvp;
+ dm_fsys_get_config_t get_config;
+ dm_fsys_get_config_events_t get_config_events;
+ dm_fsys_get_destroy_dmattr_t get_destroy_dmattr;
+ dm_fsys_get_dirattrs_rvp_t get_dirattrs_rvp;
+ dm_fsys_get_dmattr_t get_dmattr;
+ dm_fsys_get_eventlist_t get_eventlist;
+ dm_fsys_get_fileattr_t get_fileattr;
+ dm_fsys_get_region_t get_region;
+ dm_fsys_getall_dmattr_t getall_dmattr;
+ dm_fsys_getall_inherit_t getall_inherit;
+ dm_fsys_init_attrloc_t init_attrloc;
+ dm_fsys_mkdir_by_handle_t mkdir_by_handle;
+ dm_fsys_probe_hole_t probe_hole;
+ dm_fsys_punch_hole_t punch_hole;
+ dm_fsys_read_invis_rvp_t read_invis_rvp;
+ dm_fsys_release_right_t release_right;
+ dm_fsys_remove_dmattr_t remove_dmattr;
+ dm_fsys_request_right_t request_right;
+ dm_fsys_set_dmattr_t set_dmattr;
+ dm_fsys_set_eventlist_t set_eventlist;
+ dm_fsys_set_fileattr_t set_fileattr;
+ dm_fsys_set_inherit_t set_inherit;
+ dm_fsys_set_region_t set_region;
+ dm_fsys_symlink_by_handle_t symlink_by_handle;
+ dm_fsys_sync_by_handle_t sync_by_handle;
+ dm_fsys_upgrade_right_t upgrade_right;
+ dm_fsys_write_invis_rvp_t write_invis_rvp;
+ dm_fsys_obj_ref_hold_t obj_ref_hold;
+} dm_fsys_vector_t;
+
+
+extern dm_session_t *dm_sessions; /* head of session list */
+extern dm_fsreg_t *dm_registers;
+extern lock_t dm_reg_lock; /* lock for registration list */
+
+/*
+ * Kernel only prototypes.
+ */
+
+int dm_find_session_and_lock(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp,
+ unsigned long *lcp);
+
+int dm_find_msg_and_lock(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_tokevent_t **tevpp,
+ unsigned long *lcp);
+
+dm_tokevent_t * dm_evt_create_tevp(
+ dm_eventtype_t event,
+ int variable_size,
+ void **msgpp);
+
+int dm_app_get_tdp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ short types,
+ dm_right_t right,
+ dm_tokdata_t **tdpp);
+
+int dm_get_config_tdp(
+ void *hanp,
+ size_t hlen,
+ dm_tokdata_t **tdpp);
+
+void dm_app_put_tdp(
+ dm_tokdata_t *tdp);
+
+void dm_put_tevp(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp);
+
+void dm_evt_rele_tevp(
+ dm_tokevent_t *tevp,
+ int droprights);
+
+int dm_enqueue_normal_event(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp,
+ int flags);
+
+int dm_enqueue_mount_event(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp);
+
+int dm_enqueue_sendmsg_event(
+ dm_sessid_t targetsid,
+ dm_tokevent_t *tevp,
+ int synch);
+
+int dm_enqueue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t *tokenp);
+
+int dm_obj_ref_query_rvp(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ int *rvp);
+
+int dm_read_invis_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp);
+
+int dm_write_invis_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp,
+ int *rvp);
+
+int dm_get_bulkattr_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp);
+
+int dm_get_bulkall_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp);
+
+int dm_get_dirattrs_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp,
+ int *rvp);
+
+int dm_get_allocinfo_rvp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t *offp,
+ u_int nelem,
+ dm_extent_t *extentp,
+ u_int *nelemp,
+ int *rvp);
+
+int dm_waitfor_destroy_attrname(
+ struct super_block *sbp,
+ dm_attrname_t *attrnamep);
+
+void dm_clear_fsreg(
+ dm_session_t *s);
+
+int dm_add_fsys_entry(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp);
+
+void dm_change_fsys_entry(
+ struct super_block *sbp,
+ dm_fsstate_t newstate);
+
+void dm_remove_fsys_entry(
+ struct super_block *sbp);
+
+dm_fsys_vector_t *dm_fsys_vector(
+ struct inode *ip);
+
+void dm_fsys_vector_free(void);
+
+int dm_waitfor_disp_session(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp,
+ dm_session_t **sessionpp,
+ unsigned long *lcp);
+
+struct inode * dm_handle_to_ip (
+ jfs_handle_t *handlep,
+ short *typep);
+
+int dm_check_dmapi_ip(
+ struct inode *ip);
+
+dm_tokevent_t * dm_find_mount_tevp_and_lock(
+ fsid_t *fsidp,
+ unsigned long *lcp);
+
+int dm_path_to_hdl(
+ char *path,
+ void *hanp,
+ size_t *hlenp);
+
+int dm_path_to_fshdl(
+ char *path,
+ void *hanp,
+ size_t *hlenp);
+
+int dm_fd_to_hdl(
+ int fd,
+ void *hanp,
+ size_t *hlenp);
+
+int dm_upgrade_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_downgrade_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_request_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int flags,
+ dm_right_t right);
+
+int dm_release_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+int dm_query_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t *rightp);
+
+
+int dm_set_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ u_int maxevent);
+
+int dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+int dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+int dm_get_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp);
+
+
+int dm_set_disp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ u_int maxevent);
+
+
+int dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ dm_boolean_t enable);
+
+
+int dm_get_mountinfo(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+void dm_link_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue);
+
+void dm_unlink_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue);
+
+int dm_open_by_handle_rvp(
+ unsigned int fd,
+ void *hanp,
+ size_t hlen,
+ int mode,
+ int *rvp);
+
+int dm_copyin_handle(
+ void *hanp,
+ size_t hlen,
+ jfs_handle_t *handlep);
+
+// XFS BUG #11 BEGIN
+int dm_dequeue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t token);
+// XFS BUG #11 END
+
+// XFS BUG #12 BEGIN
+int dm_hdl_to_path(
+ void *dirhanp,
+ size_t dirhlen,
+ void *targhanp,
+ size_t targhlen,
+ size_t buflen,
+ char *pathbufp,
+ size_t *rlenp);
+// XFS BUG #12 END
+
+#endif /* _DMAPI_PRIVATE_H */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_region.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_region.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_region.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_region.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+
+
+int
+dm_get_region(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_region_t *regbufp,
+ u_int *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_region(tdp->td_ip, tdp->td_right,
+ nelem, regbufp, nelemp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+
+int
+dm_set_region(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_region_t *regbufp,
+ dm_boolean_t *exactflagp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_REG,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_region(tdp->td_ip, tdp->td_right,
+ nelem, regbufp, exactflagp);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_register.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_register.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_register.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_register.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,1690 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+#include "jfs_incore.h"
+#include "jfs_debug.h"
+#include "jfs_filsys.h"
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+
+dm_fsreg_t *dm_registers; /* head of filesystem registration list */
+int dm_fsys_cnt; /* number of filesystems on dm_registers list */
+lock_t dm_reg_lock = SPIN_LOCK_UNLOCKED;/* lock for dm_registers */
+
+
+
+#ifdef CONFIG_PROC_FS
+static int
+fsreg_read_pfs(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+ int i;
+ dm_fsreg_t *fsrp = (dm_fsreg_t*)data;
+ char statebuf[30];
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ switch (fsrp->fr_state) {
+ case DM_STATE_MOUNTING: sprintf(statebuf, "mounting"); break;
+ case DM_STATE_MOUNTED: sprintf(statebuf, "mounted"); break;
+ case DM_STATE_UNMOUNTING: sprintf(statebuf, "unmounting"); break;
+ case DM_STATE_UNMOUNTED: sprintf(statebuf, "unmounted"); break;
+ default:
+ sprintf(statebuf, "unknown:%d", (int)fsrp->fr_state);
+ break;
+ }
+
+ len=0;
+ while(1){
+ ADDBUF("fsrp=0x%p\n", fsrp);
+ ADDBUF("fr_next=0x%p\n", fsrp->fr_next);
+ /*ADDBUF("fr_vfsp=0x%p\n", fsrp->fr_vfsp);*/
+ ADDBUF("fr_tevp=0x%p\n", fsrp->fr_tevp);
+ ADDBUF("fr_fsid=%c\n", '?');
+ ADDBUF("fr_msg=0x%p\n", fsrp->fr_msg);
+ ADDBUF("fr_msgsize=%d\n", fsrp->fr_msgsize);
+ ADDBUF("fr_state=%s\n", statebuf);
+ ADDBUF("fr_dispq=%c\n", '?');
+ ADDBUF("fr_dispcnt=%d\n", fsrp->fr_dispcnt);
+
+ ADDBUF("fr_evt_dispq.eq_head=0x%p\n", fsrp->fr_evt_dispq.eq_head);
+ ADDBUF("fr_evt_dispq.eq_tail=0x%p\n", fsrp->fr_evt_dispq.eq_tail);
+ ADDBUF("fr_evt_dispq.eq_count=%d\n", fsrp->fr_evt_dispq.eq_count);
+
+ ADDBUF("fr_queue=%c\n", '?');
+ ADDBUF("fr_lock=%c\n", '?');
+ ADDBUF("fr_hdlcnt=%d\n", fsrp->fr_hdlcnt);
+ ADDBUF("fr_vfscnt=%d\n", fsrp->fr_vfscnt);
+ ADDBUF("fr_unmount=%d\n", fsrp->fr_unmount);
+
+ len += sprintf(buffer + len, "fr_rattr=");
+ CHKFULL;
+ for(i = 0; i <= DM_ATTR_NAME_SIZE; ++i){
+ ADDBUF("%c", fsrp->fr_rattr.an_chars[i]);
+ }
+ CHKFULL;
+ len += sprintf(buffer + len, "\n");
+ CHKFULL;
+
+ for(i = 0; i < DM_EVENT_MAX; i++){
+ if( fsrp->fr_sessp[i] != NULL ){
+ ADDBUF("fr_sessp[%d]=", i);
+ ADDBUF("0x%p\n", fsrp->fr_sessp[i]);
+ }
+ }
+ CHKFULL;
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+/* Returns a pointer to the filesystem structure for the filesystem
+ referenced by fsidp. The caller is responsible for obtaining dm_reg_lock
+ before calling this routine.
+*/
+
+static dm_fsreg_t *
+dm_find_fsreg(
+ fsid_t *fsidp)
+{
+ dm_fsreg_t *fsrp;
+
+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
+ if (!memcmp(&fsrp->fr_fsid, fsidp, sizeof(*fsidp)))
+ break;
+ }
+ return(fsrp);
+}
+
+
+/* Given a fsid_t, dm_find_fsreg_and_lock() finds the dm_fsreg_t structure
+ for that filesytem if one exists, and returns a pointer to the structure
+ after obtaining its 'fr_lock' so that the caller can safely modify the
+ dm_fsreg_t. The caller is responsible for releasing 'fr_lock'.
+*/
+
+static dm_fsreg_t *
+dm_find_fsreg_and_lock(
+ fsid_t *fsidp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_fsreg_t *fsrp;
+
+ for (;;) {
+ *lcp = mutex_spinlock(&dm_reg_lock);
+
+ if ((fsrp = dm_find_fsreg(fsidp)) == NULL) {
+ mutex_spinunlock(&dm_reg_lock, *lcp);
+ return(NULL);
+ }
+ if (spin_trylock(&fsrp->fr_lock)) {
+ nested_spinunlock(&dm_reg_lock);
+ return(fsrp); /* success */
+ }
+
+ /* If the second lock is not available, drop the first and
+ start over. This gives the CPU a chance to process any
+ interrupts, and also allows processes which want a fr_lock
+ for a different filesystem to proceed.
+ */
+
+ mutex_spinunlock(&dm_reg_lock, *lcp);
+ }
+}
+
+
+/* dm_add_fsys_entry() is called when a DM_EVENT_MOUNT event is about to be
+ sent. It creates a dm_fsreg_t structure for the filesystem and stores a
+ pointer to a copy of the mount event within that structure so that it is
+ available for subsequent dm_get_mountinfo() calls.
+*/
+
+int
+dm_add_fsys_entry(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp)
+{
+ dm_fsreg_t *fsrp;
+ int msgsize;
+ void *msg;
+ unsigned long lc; /* lock cookie */
+ struct jfs_sb_info *sbi = JFS_SBI(sbp);
+
+ /* Allocate and initialize a dm_fsreg_t structure for the filesystem. */
+
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event);
+ msg = kmalloc(msgsize, GFP_KERNEL);
+ if (msg == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memcpy(msg, &tevp->te_event, msgsize);
+
+ fsrp = kmem_cache_alloc(dm_fsreg_cachep, SLAB_KERNEL);
+ if (fsrp == NULL) {
+ kfree(msg);
+ printk("%s/%d: kmem_cache_alloc(dm_fsreg_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memset(fsrp, 0, sizeof(*fsrp));
+
+ fsrp->fr_sb = sbp;
+ fsrp->fr_tevp = tevp;
+ memcpy(&fsrp->fr_fsid, &sbi->dm_fsid, sizeof(fsid_t));
+ fsrp->fr_msg = msg;
+ fsrp->fr_msgsize = msgsize;
+ fsrp->fr_state = DM_STATE_MOUNTING;
+ sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq");
+ sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue");
+ spinlock_init(&fsrp->fr_lock, "fr_lock");
+
+ /* If no other mounted DMAPI filesystem already has this same
+ fsid_t, then add this filesystem to the list.
+ */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ if (!dm_find_fsreg((fsid_t *)&sbi->dm_fsid)) {
+ fsrp->fr_next = dm_registers;
+ dm_registers = fsrp;
+ dm_fsys_cnt++;
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ struct proc_dir_entry *entry;
+
+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
+ entry = create_proc_read_entry(buf, 0, 0, fsreg_read_pfs, fsrp);
+ entry->owner = THIS_MODULE;
+ }
+#endif
+ mutex_spinunlock(&dm_reg_lock, lc);
+ return(0);
+ }
+
+ /* A fsid_t collision occurred, so prevent this new filesystem from
+ mounting.
+ */
+
+ mutex_spinunlock(&dm_reg_lock, lc);
+
+ sv_destroy(&fsrp->fr_dispq);
+ sv_destroy(&fsrp->fr_queue);
+ spinlock_destroy(&fsrp->fr_lock);
+ kfree(msg);
+ kmem_cache_free(dm_fsreg_cachep, fsrp);
+ return(-EBUSY);
+}
+
+
+/* dm_change_fsys_entry() is called whenever a filesystem's mount state is
+ about to change. The state is changed to DM_STATE_MOUNTED after a
+ successful DM_EVENT_MOUNT event or after a failed unmount. It is changed
+ to DM_STATE_UNMOUNTING after a successful DM_EVENT_PREUNMOUNT event.
+ Finally, the state is changed to DM_STATE_UNMOUNTED after a successful
+ unmount. It stays in this state until the DM_EVENT_UNMOUNT event is
+ queued, at which point the filesystem entry is removed.
+*/
+
+void
+dm_change_fsys_entry(
+ struct super_block *sbp,
+ dm_fsstate_t newstate)
+{
+ dm_fsreg_t *fsrp;
+ int seq_error;
+ unsigned long lc; /* lock cookie */
+ struct jfs_sb_info *sbi = JFS_SBI(sbp);
+
+ /* Find the filesystem referenced by the sbp's fsid_t. This should
+ always succeed.
+ */
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t *)&sbi->dm_fsid, &lc)) == NULL) {
+ panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
+ "sbp %p\n", sbp);
+ }
+
+ /* Make sure that the new state is acceptable given the current state
+ of the filesystem. Any error here is a major DMAPI/filesystem
+ screwup.
+ */
+
+ seq_error = 0;
+ switch (newstate) {
+ case DM_STATE_MOUNTED:
+ if (fsrp->fr_state != DM_STATE_MOUNTING &&
+ fsrp->fr_state != DM_STATE_UNMOUNTING) {
+ seq_error++;
+ }
+ break;
+ case DM_STATE_UNMOUNTING:
+ if (fsrp->fr_state != DM_STATE_MOUNTED)
+ seq_error++;
+ break;
+ case DM_STATE_UNMOUNTED:
+ if (fsrp->fr_state != DM_STATE_UNMOUNTING)
+ seq_error++;
+ break;
+ default:
+ seq_error++;
+ break;
+ }
+ if (seq_error) {
+ panic("dm_change_fsys_entry: DMAPI sequence error: old state "
+ "%d, new state %d, fsrp %p\n", fsrp->fr_state,
+ newstate, fsrp);
+ }
+
+ /* If the old state was DM_STATE_UNMOUNTING, then processes could be
+ sleeping in dm_handle_to_ip() waiting for their DM_NO_TOKEN handles
+ to be translated to inodes. Wake them up so that they either
+ continue (new state is DM_STATE_MOUNTED) or fail (new state is
+ DM_STATE_UNMOUNTED).
+ */
+
+ if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
+ if (fsrp->fr_hdlcnt)
+ sv_broadcast(&fsrp->fr_queue);
+ }
+
+ /* Change the filesystem's mount state to its new value. */
+
+ fsrp->fr_state = newstate;
+ fsrp->fr_tevp = NULL; /* not valid after DM_STATE_MOUNTING */
+
+ /* If the new state is DM_STATE_UNMOUNTING, wait until any application
+ threads currently in the process of making VFS_VGET and VFS_ROOT
+ calls are done before we let this unmount thread continue the
+ unmount. (We want to make sure that the unmount will see these
+ inode references during its scan.)
+ */
+
+ if (newstate == DM_STATE_UNMOUNTING) {
+ while (fsrp->fr_vfscnt) {
+ fsrp->fr_unmount++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_unmount--;
+ }
+ }
+
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+}
+
+
+/* dm_remove_fsys_entry() gets called after a failed mount or after an
+ DM_EVENT_UNMOUNT event has been queued. (The filesystem entry must stay
+ until the DM_EVENT_UNMOUNT reply is queued so that the event can use the
+ 'fr_sessp' list to see which session to send the event to.)
+*/
+
+void
+dm_remove_fsys_entry(
+ struct super_block *sbp)
+{
+ dm_fsreg_t **fsrpp;
+ dm_fsreg_t *fsrp;
+ unsigned long lc; /* lock cookie */
+ struct jfs_sb_info *sbi = JFS_SBI(sbp);
+
+ /* Find the filesystem referenced by the sbp's fsid_t and dequeue
+ it after verifying that the fr_state shows a filesystem that is
+ either mounting or unmounted.
+ */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ fsrpp = &dm_registers;
+ while ((fsrp = *fsrpp) != NULL) {
+ if (!memcmp(&fsrp->fr_fsid, &sbi->dm_fsid, sizeof(fsrp->fr_fsid)))
+ break;
+ fsrpp = &fsrp->fr_next;
+ }
+ if (fsrp == NULL) {
+ mutex_spinunlock(&dm_reg_lock, lc);
+ panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
+ "sbp %p\n", sbp);
+ }
+
+ nested_spinlock(&fsrp->fr_lock);
+
+ /* Verify that it makes sense to remove this entry. */
+
+ if (fsrp->fr_state != DM_STATE_MOUNTING &&
+ fsrp->fr_state != DM_STATE_UNMOUNTED) {
+ nested_spinunlock(&fsrp->fr_lock);
+ mutex_spinunlock(&dm_reg_lock, lc);
+ panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
+ "%d, fsrp %p\n", fsrp->fr_state, fsrp);
+ }
+
+ *fsrpp = fsrp->fr_next;
+ dm_fsys_cnt--;
+
+ nested_spinunlock(&dm_reg_lock);
+
+ /* Since the filesystem is about to finish unmounting, we must be sure
+ that no inodes are being referenced within the filesystem before we
+ let this event thread continue. If the filesystem is currently in
+ state DM_STATE_MOUNTING, then we know by definition that there can't
+ be any references. If the filesystem is DM_STATE_UNMOUNTED, then
+ any application threads referencing handles with DM_NO_TOKEN should
+ have already been awakened by dm_change_fsys_entry and should be
+ long gone by now. Just in case they haven't yet left, sleep here
+ until they are really gone.
+ */
+
+ while (fsrp->fr_hdlcnt) {
+ fsrp->fr_unmount++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_unmount--;
+ }
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Release all memory. */
+
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+ sv_destroy(&fsrp->fr_dispq);
+ sv_destroy(&fsrp->fr_queue);
+ spinlock_destroy(&fsrp->fr_lock);
+ kfree(fsrp->fr_msg);
+ kmem_cache_free(dm_fsreg_cachep, fsrp);
+}
+
+
+/* Get a inode for the object referenced by handlep. We cannot use
+ altgetvfs() because it fails if the VFS_OFFLINE bit is set, which means
+ that any call to dm_handle_to_ip() while a umount is in progress would
+ return an error, even if the umount can't possibly succeed because users
+ are in the filesystem. The requests would start to fail as soon as the
+ umount begins, even before the application receives the DM_EVENT_PREUNMOUNT
+ event.
+
+ dm_handle_to_ip() emulates the behavior of lookup() while an unmount is
+ in progress. Any call to dm_handle_to_ip() while the filesystem is in the
+ DM_STATE_UNMOUNTING state will block. If the unmount eventually succeeds,
+ the requests will wake up and fail. If the unmount fails, the requests will
+ wake up and complete normally.
+
+ While a filesystem is in state DM_STATE_MOUNTING, dm_handle_to_ip() will
+ fail all requests. Per the DMAPI spec, the only handles in the filesystem
+ which are valid during a mount event are the handles within the event
+ itself.
+*/
+
+struct inode *
+dm_handle_to_ip(
+ jfs_handle_t *handlep,
+ short *typep)
+{
+ dm_fsreg_t *fsrp;
+ struct inode *ip;
+ short type;
+ unsigned long lc; /* lock cookie */
+ int error;
+ fid_t *fidp;
+ int filetype;
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL)
+ return(NULL);
+
+ fidp = (fid_t*)&handlep->ha_fid;
+ /* If mounting, and we are not asking for a filesystem handle,
+ * then fail the request. (fid_len==0 for fshandle)
+ */
+ if ((fsrp->fr_state == DM_STATE_MOUNTING) &&
+ (fidp->fid_len != 0)) {
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return(NULL);
+ }
+
+ for (;;) {
+ if (fsrp->fr_state == DM_STATE_MOUNTING)
+ break;
+ if (fsrp->fr_state == DM_STATE_MOUNTED)
+ break;
+ if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
+ if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
+ sv_broadcast(&fsrp->fr_queue);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return(NULL);
+ }
+
+ /* Must be DM_STATE_UNMOUNTING. */
+
+ fsrp->fr_hdlcnt++;
+ sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
+ lc = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_hdlcnt--;
+ }
+
+ fsrp->fr_vfscnt++;
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Now that the mutex is released, wait until we have access to the
+ inode.
+ */
+
+ error = jfs_iget(fsrp->fr_sb, &ip, fidp);
+
+ lc = mutex_spinlock(&fsrp->fr_lock);
+
+ fsrp->fr_vfscnt--;
+ if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
+ sv_broadcast(&fsrp->fr_queue);
+
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ if (error || ip == NULL)
+ return(NULL);
+
+ filetype = ip->i_mode & S_IFMT;
+ if (fidp->fid_len == 0) {
+ type = DM_TDT_FS;
+ } else if (filetype == S_IFREG) {
+ type = DM_TDT_REG;
+ } else if (filetype == S_IFDIR) {
+ type = DM_TDT_DIR;
+ } else if (filetype == S_IFLNK) {
+ type = DM_TDT_LNK;
+ } else {
+ type = DM_TDT_OTH;
+ }
+ *typep = type;
+ return(ip);
+}
+
+
+int
+dm_ip_to_handle(
+ struct inode *ip,
+ jfs_handle_t *handlep)
+{
+ struct jfs_fid fid;
+ int hsize;
+ struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+ dm_ino_t ino;
+
+ if ((sbi == NULL) || (sbi->dm_fsid == 0))
+ return(-EINVAL);
+
+ fid.fid_len = sizeof(jfs_fid_t) - sizeof(fid.fid_len);
+ fid.fid_pad = 0;
+ fid.fid_gen = ip->i_generation;
+ ino = ip->i_ino;
+ memcpy(&fid.fid_ino, &ino, sizeof(fid.fid_ino));
+
+ memcpy(&handlep->ha_fsid, &sbi->dm_fsid, sizeof(fsid_t));
+ memcpy(&handlep->ha_fid, &fid, fid.fid_len + sizeof fid.fid_len);
+ hsize = JFS_HSIZE(*handlep);
+ memset((char *)handlep + hsize, 0, sizeof(*handlep) - hsize);
+ return(0);
+}
+
+
+/* Given a inode, check if that inode resides in filesystem that supports
+ DMAPI. Returns zero if the inode is in a DMAPI filesystem, otherwise
+ returns an errno.
+*/
+
+int
+dm_check_dmapi_ip(
+ struct inode *ip)
+{
+ jfs_handle_t handle;
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if (!IP_IS_JFS(ip)) {
+ return -ENXIO;
+ }
+
+ if ((error = dm_ip_to_handle(ip, &handle)) != 0)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+ return(0);
+}
+
+
+/* Return a pointer to the DM_EVENT_MOUNT event while a mount is still in
+ progress. This is only called by dm_get_config and dm_get_config_events
+ which need to access the filesystem during a mount but which don't have
+ a session and token to use.
+*/
+
+dm_tokevent_t *
+dm_find_mount_tevp_and_lock(
+ fsid_t *fsidp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_fsreg_t *fsrp;
+
+ if ((fsrp = dm_find_fsreg_and_lock(fsidp, lcp)) == NULL)
+ return(NULL);
+
+ if (!fsrp->fr_tevp || fsrp->fr_state != DM_STATE_MOUNTING) {
+ mutex_spinunlock(&fsrp->fr_lock, *lcp);
+ return(NULL);
+ }
+ nested_spinlock(&fsrp->fr_tevp->te_lock);
+ nested_spinunlock(&fsrp->fr_lock);
+ return(fsrp->fr_tevp);
+}
+
+
+/* Wait interruptibly until a session registers disposition for 'event' in
+ filesystem 'sbp'. Upon successful exit, both the filesystem's dm_fsreg_t
+ structure and the session's dm_session_t structure are locked. The caller
+ is responsible for unlocking both structures using the returned cookies.
+
+ Warning: The locks can be dropped in any order, but the 'lc2p' cookie MUST
+ BE USED FOR THE FIRST UNLOCK, and the lc1p cookie must be used for the
+ second unlock. If this is not done, the CPU will be interruptible while
+ holding a mutex, which could deadlock the machine!
+*/
+
+static int
+dm_waitfor_disp(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp,
+ dm_fsreg_t **fsrpp,
+ unsigned long *lc1p, /* addr of first returned lock cookie */
+ dm_session_t **sessionpp,
+ unsigned long *lc2p) /* addr of 2nd returned lock cookie */
+{
+ dm_eventtype_t event = tevp->te_msg.ev_type;
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ struct jfs_sb_info *sbi = JFS_SBI(sbp);
+
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t *)&sbi->dm_fsid, lc1p)) == NULL)
+ return(-ENOENT);
+
+ /* If no session is registered for this event in the specified
+ filesystem, then sleep interruptibly until one does.
+ */
+
+ for (;;) {
+ int rc = 0;
+
+ /* The dm_find_session_and_lock() call is needed because a
+ session that is in the process of being removed might still
+ be in the dm_fsreg_t structure but won't be in the
+ dm_sessions list.
+ */
+
+ if ((s = fsrp->fr_sessp[event]) != NULL &&
+ dm_find_session_and_lock(s->sn_sessid, &s, lc2p) == 0) {
+ break;
+ }
+
+ /* Noone is currently registered. DM_EVENT_UNMOUNT events
+ don't wait for anyone to register because the unmount is
+ already past the point of no return.
+ */
+
+ if (event == DM_EVENT_UNMOUNT) {
+ mutex_spinunlock(&fsrp->fr_lock, *lc1p);
+ return(-ENOENT);
+ }
+
+ /* Wait until a session registers for disposition of this
+ event.
+ */
+
+ fsrp->fr_dispcnt++;
+ dm_link_event(tevp, &fsrp->fr_evt_dispq);
+
+ sv_wait_sig(&fsrp->fr_dispq, 1, &fsrp->fr_lock, *lc1p);
+ rc = signal_pending(current);
+
+ *lc1p = mutex_spinlock(&fsrp->fr_lock);
+ fsrp->fr_dispcnt--;
+ dm_unlink_event(tevp, &fsrp->fr_evt_dispq);
+ if (rc) { /* if signal was received */
+ mutex_spinunlock(&fsrp->fr_lock, *lc1p);
+ return(-EINTR);
+ }
+ }
+ *sessionpp = s;
+ *fsrpp = fsrp;
+ return(0);
+}
+
+
+/* Returns the session pointer for the session registered for an event
+ in the given sbp. If successful, the session is locked upon return. The
+ caller is responsible for releasing the lock. If no session is currently
+ registered for the event, dm_waitfor_disp_session() will sleep interruptibly
+ until a registration occurs.
+*/
+
+int
+dm_waitfor_disp_session(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp,
+ dm_session_t **sessionpp,
+ unsigned long *lcp)
+{
+ dm_fsreg_t *fsrp;
+ unsigned long lc2;
+ int error;
+
+ if (tevp->te_msg.ev_type < 0 || tevp->te_msg.ev_type > DM_EVENT_MAX)
+ return(-EIO);
+
+ error = dm_waitfor_disp(sbp, tevp, &fsrp, lcp, sessionpp, &lc2);
+ if (!error)
+ mutex_spinunlock(&fsrp->fr_lock, lc2); /* rev. cookie order*/
+ return(error);
+}
+
+
+/* Find the session registered for the DM_EVENT_DESTROY event on the specified
+ filesystem, sleeping if necessary until registration occurs. Once found,
+ copy the session's return-on-destroy attribute name, if any, back to the
+ caller.
+*/
+
+int
+dm_waitfor_destroy_attrname(
+ struct super_block *sbp,
+ dm_attrname_t *attrnamep)
+{
+ dm_tokevent_t *tevp;
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ void *msgp;
+
+ tevp = dm_evt_create_tevp(DM_EVENT_DESTROY, 1, (void**)&msgp);
+ error = dm_waitfor_disp(sbp, tevp, &fsrp, &lc1, &s, &lc2);
+ if (!error) {
+ *attrnamep = fsrp->fr_rattr; /* attribute or zeros */
+ mutex_spinunlock(&s->sn_qlock, lc2); /* rev. cookie order */
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ }
+ dm_evt_rele_tevp(tevp,0);
+ return(error);
+}
+
+
+/* Unregisters the session for the disposition of all events on all
+ filesystems. This routine is not called until the session has been
+ dequeued from the session list and its session lock has been dropped,
+ but before the actual structure is freed, so it is safe to grab the
+ 'dm_reg_lock' here. If dm_waitfor_disp_session() happens to be called
+ by another thread, it won't find this session on the session list and
+ will wait until a new session registers.
+*/
+
+void
+dm_clear_fsreg(
+ dm_session_t *s)
+{
+ dm_fsreg_t *fsrp;
+ int event;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&dm_reg_lock);
+
+ for (fsrp = dm_registers; fsrp != NULL; fsrp = fsrp->fr_next) {
+ nested_spinlock(&fsrp->fr_lock);
+ for (event = 0; event < DM_EVENT_MAX; event++) {
+ if (fsrp->fr_sessp[event] != s)
+ continue;
+ fsrp->fr_sessp[event] = NULL;
+ if (event == DM_EVENT_DESTROY)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ }
+ nested_spinunlock(&fsrp->fr_lock);
+ }
+
+ mutex_spinunlock(&dm_reg_lock, lc);
+}
+
+
+/*
+ * Return the handle for the object named by path.
+ */
+
+int
+dm_path_to_hdl(
+ char *path, /* any path name */
+ void *hanp, /* user's data buffer */
+ size_t *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ jfs_handle_t handle;
+ struct inode *ip;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct nameidata nd;
+ size_t len;
+ char *name;
+
+ /* XXX get things straightened out so getname() works here? */
+ len = strnlen_user(path, 2000);
+ if (len == 0) // XFS BUG #3
+ return(-EFAULT); // XFS BUG #3
+ name = kmalloc(len, GFP_KERNEL);
+ if (name == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ if (copy_from_user(name, path, len)) {
+ kfree(name);
+ return(-EFAULT);
+ }
+
+ error = path_lookup(name, 0, &nd);
+ kfree(name);
+ if (error)
+ return error;
+
+ ASSERT(nd.dentry);
+ ASSERT(nd.dentry->d_inode);
+ ip = igrab(nd.dentry->d_inode);
+ path_release(&nd);
+
+ if (!IP_IS_JFS(ip)) {
+ /* we're not in JFS anymore, Toto */
+ iput(ip);
+ return -ENXIO; // XFS BUG #4
+ }
+
+ /* we need the inode */
+ error = dm_ip_to_handle(ip, &handle);
+ iput(ip);
+ if (error)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = JFS_HSIZE(handle);
+
+ if (copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ if (put_user(hlen,hlenp))
+ return(-EFAULT);
+ return(0);
+}
+
+
+// XFS BUG #12 BEGIN
+/*
+ * Return the path for the object represented by handles.
+ */
+
+int
+dm_hdl_to_path(
+ void *dirhanp, /* directory handle */
+ size_t dirhlen, /* directory handle length */
+ void *targhanp, /* target handle */
+ size_t targhlen, /* target handle length */
+ size_t buflen, /* length of pathbufp */
+ char *pathbufp, /* buffer in which name is returned */
+ size_t *rlenp) /* length of name */
+{
+ /* REFERENCED */
+ jfs_handle_t dir_handle, targ_handle;
+ struct inode *dir_ip, *targ_ip;
+ struct dentry *dir_dentry, *targ_dentry, *d;
+ size_t pathlen = 0;
+ short td_type;
+ int error;
+ size_t len = JFS_NAME_MAX + 1;
+ char *name1, *name2, *totpath, *newpath, *temp;
+
+ /* Copy handles from user space */
+ if (((error = dm_copyin_handle(dirhanp, dirhlen, &dir_handle)) != 0) ||
+ ((error = dm_copyin_handle(targhanp, targhlen, &targ_handle)) != 0)) {
+ return(error);
+ }
+
+ /* Find directory inode */
+ if ((dir_ip = dm_handle_to_ip(&dir_handle, &td_type)) == NULL) {
+ return(-EBADF);
+ }
+
+ /* Make sure inode is directory on JFS */
+ if ((td_type != DM_TDT_DIR) || (!IP_IS_JFS(dir_ip))) {
+ iput(dir_ip);
+ return(-EBADF);
+ }
+
+ /* Find file inode */
+ if ((targ_ip = dm_handle_to_ip(&targ_handle, &td_type)) == NULL) {
+ iput(dir_ip);
+ return(-EBADF);
+ }
+
+ /* Make sure inode is file or link on JFS */
+ if ((td_type == DM_TDT_FS) || (td_type == DM_TDT_DIR) ||
+ (td_type == DM_TDT_OTH) || (!IP_IS_JFS(targ_ip))) {
+ iput(dir_ip);
+ iput(targ_ip);
+ return(-EBADF);
+ }
+
+ /* Now to find dentrys. If possible, get well-connected ones. */
+ if ((dir_dentry = d_alloc_anon(dir_ip)) == NULL) {
+ iput(dir_ip);
+ iput(targ_ip);
+ return(-ENOMEM);
+ }
+ if ((targ_dentry = d_alloc_anon(targ_ip)) == NULL) {
+ iput(targ_ip);
+ error = -ENOMEM;
+ goto dput_dir;
+ }
+
+ /* Make sure dentrys match inodes and file is in directory */
+ if (( dir_ip->i_ino != dir_dentry->d_inode->i_ino ) ||
+ ( targ_ip->i_ino != targ_dentry->d_inode->i_ino ) ||
+ ( targ_dentry->d_parent != dir_dentry)) {
+ error = -EINVAL;
+ goto dput_targ;
+ }
+
+ /* Allocate two character buffers */
+ name1 = kmalloc(len, GFP_KERNEL);
+ if (name1 == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ error = -ENOMEM;
+ goto dput_targ;
+ }
+ name2 = kmalloc(len, GFP_KERNEL);
+ if (name2 == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ error = -ENOMEM;
+ goto free_name1;
+ }
+
+ /* Walk up the directory chain, adding name to path */
+ totpath = (char *)targ_dentry->d_name.name;
+ pathlen = strlen(totpath);
+ newpath = name2;
+ d = targ_dentry->d_parent;
+ while ((d != d->d_parent) && (pathlen <= buflen)) {
+ pathlen = sprintf(newpath, "%s/%s", d->d_name.name, totpath);
+ temp = totpath;
+ totpath = newpath;
+ newpath = temp;
+ d = d->d_parent;
+ }
+
+ /* Make sure entire path fits in user buffer */
+ if (pathlen > buflen) {
+ error = -E2BIG;
+ goto free_name2;
+ }
+
+ /* Copy information back to user space */
+ if ((copy_to_user(pathbufp, totpath, pathlen)) ||
+ (put_user(pathlen, rlenp))) {
+ error = -EFAULT;
+ goto free_name2;
+ }
+
+ error = 0;
+
+free_name2:
+ kfree(name2);
+free_name1:
+ kfree(name1);
+dput_targ:
+ dput(targ_dentry);
+dput_dir:
+ dput(dir_dentry);
+ return(error);
+
+}
+// XFS BUG #12 END
+
+
+/*
+ * Return the handle for the file system containing the object named by path.
+ */
+
+int
+dm_path_to_fshdl(
+ char *path, /* any path name */
+ void *hanp, /* user's data buffer */
+ size_t *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ jfs_handle_t handle;
+ struct inode *ip;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct nameidata nd;
+ size_t len;
+ char *name;
+
+ /* XXX get things straightened out so getname() works here? */
+ len = strnlen_user(path, 2000);
+ if (len == 0) // XFS BUG #5
+ return(-EFAULT); // XFS BUG #5
+ name = kmalloc(len, GFP_KERNEL);
+ if (name == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ if (copy_from_user(name, path, len)) {
+ kfree(name);
+ return(-EFAULT);
+ }
+
+ error = path_lookup(name, LOOKUP_FOLLOW, &nd);
+ kfree(name);
+ if (error)
+ return error;
+
+ ASSERT(nd.dentry);
+ ASSERT(nd.dentry->d_inode);
+
+ ip = igrab(nd.dentry->d_inode);
+ path_release(&nd);
+
+ if (!IP_IS_JFS(ip)) {
+ /* we're not in JFS anymore, Toto */
+ iput(ip);
+ return -ENXIO; // XFS BUG #6
+ }
+
+ /* we need the inode */
+ error = dm_ip_to_handle(ip, &handle);
+ iput(ip);
+
+ if (error)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = FSHSIZE;
+ if(copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ if (put_user(hlen,hlenp))
+ return(-EFAULT);
+ return(0);
+}
+
+
+int
+dm_fd_to_hdl(
+ int fd, /* any file descriptor */
+ void *hanp, /* user's data buffer */
+ size_t *hlenp) /* set to size of data copied */
+{
+ /* REFERENCED */
+ dm_fsreg_t *fsrp;
+ jfs_handle_t handle;
+ size_t hlen;
+ int error;
+ unsigned long lc; /* lock cookie */
+ struct file *filep = fget(fd);
+ struct inode *ip;
+
+ if (!filep)
+ return(-EBADF);
+
+ // XFS BUG #27 START
+ ip = filep->f_dentry->d_inode;
+ if (!IP_IS_JFS(ip)) {
+ return -ENXIO;
+ }
+ // XFS BUG #27 END
+
+ if ((error = dm_ip_to_handle(ip, &handle)) != 0)
+ return(error);
+
+ if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ hlen = JFS_HSIZE(handle);
+ if (copy_to_user(hanp, &handle, (int)hlen))
+ return(-EFAULT);
+ fput(filep);
+ if (put_user(hlen,hlenp))
+ return(-EFAULT);
+ return(0);
+}
+
+
+/* Enable events on an object. */
+
+int
+dm_set_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ u_int maxevent)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_eventset_t eventset;
+ dm_tokdata_t *tdp;
+ int error;
+
+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
+ return(-EFAULT);
+
+ /* Do some minor sanity checking. */
+
+ if (maxevent == 0 || maxevent > DM_EVENT_MAX)
+ return(-EINVAL);
+
+ if (hanp == DM_GLOBAL_HANP && hlen == DM_GLOBAL_HLEN) // XFS BUG #15
+ return(-EINVAL); // XFS BUG #15
+
+ /* Access the specified object. */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->set_eventlist(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0),
+ &eventset, maxevent);
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+/* Return the list of enabled events for an object. */
+
+int
+dm_get_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int nelem,
+ dm_eventset_t *eventsetp,
+ u_int *nelemp)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ dm_eventset_t eventset;
+ u_int elem;
+ int error;
+
+ //if (nelem == 0) // XFS BUG #17
+ // return(-EINVAL); // XFS BUG #17
+
+ /* Access the specified object. */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, (DM_TDT_FS|DM_TDT_REG), // XFS BUG #16
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Get the object's event list. */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->get_eventlist(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0),
+ nelem, &eventset, &elem);
+
+ dm_app_put_tdp(tdp);
+
+ if (error && error != -E2BIG) // XFS BUG #17
+ return(error);
+
+ if (copy_to_user(eventsetp, &eventset, sizeof(eventset)))
+ return(-EFAULT);
+ if (put_user(elem, nelemp)) // XFS BUG #17
+ return(-EFAULT);
+ return(error); // XFS BUG #17
+}
+
+
+/* Register for disposition of events. The handle must either be the
+ global handle or must be the handle of a file system. The list of events
+ is pointed to by eventsetp.
+*/
+
+int
+dm_set_disp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ u_int maxevent)
+{
+ dm_session_t *s;
+ dm_fsreg_t *fsrp;
+ dm_tokdata_t *tdp;
+ dm_eventset_t eventset;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ u_int i;
+
+ /* Copy in and validate the event mask. Only the lower maxevent bits
+ are meaningful, so clear any bits set above maxevent.
+ */
+
+ if (maxevent == 0 || maxevent > DM_EVENT_MAX)
+ return(-EINVAL);
+ if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
+ return(-EFAULT);
+ eventset &= (1 << maxevent) - 1;
+
+ /* If the caller specified the global handle, then the only valid token
+ is DM_NO_TOKEN, and the only valid event in the event mask is
+ DM_EVENT_MOUNT. If it is set, add the session to the list of
+ sessions that want to receive mount events. If it is clear, remove
+ the session from the list. Since DM_EVENT_MOUNT events never block
+ waiting for a session to register, there is noone to wake up if we
+ do add the session to the list.
+ */
+
+ if (DM_GLOBALHAN(hanp, hlen)) {
+ if (token != DM_NO_TOKEN)
+ return(-EINVAL);
+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
+ return(error);
+ if (eventset == 0) {
+ s->sn_flags &= ~DM_SN_WANTMOUNT;
+ error = 0;
+ } else if (eventset == 1 << DM_EVENT_MOUNT) {
+ s->sn_flags |= DM_SN_WANTMOUNT;
+ error = 0;
+ } else {
+ error = -EINVAL;
+ }
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(error);
+ }
+
+ /* Since it's not the global handle, it had better be a filesystem
+ handle. Verify that the first 'maxevent' events in the event list
+ are all valid for a filesystem handle.
+ */
+
+ if (eventset & ~DM_VALID_DISP_EVENTS)
+ return(-EINVAL);
+
+ /* Verify that the session is valid, that the handle is a filesystem
+ handle, and that the filesystem is capable of sending events. (If
+ a dm_fsreg_t structure exists, then the filesystem can issue events.)
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+
+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
+ it can't disappear while we add it to the filesystem's event mask.
+ */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+ }
+
+ /* Update the event disposition array for this filesystem, adding
+ and/or removing the session as appropriate. If this session is
+ dropping registration for DM_EVENT_DESTROY, or is overriding some
+ other session's registration for DM_EVENT_DESTROY, then clear any
+ any attr-on-destroy attribute name also.
+ */
+
+ for (i = 0; i < DM_EVENT_MAX; i++) {
+ if (DMEV_ISSET(i, eventset)) {
+ if (i == DM_EVENT_DESTROY && fsrp->fr_sessp[i] != s)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ fsrp->fr_sessp[i] = s;
+ } else if (fsrp->fr_sessp[i] == s) {
+ if (i == DM_EVENT_DESTROY)
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ fsrp->fr_sessp[i] = NULL;
+ }
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+
+ /* Wake up all processes waiting for a disposition on this filesystem
+ in case any of them happen to be waiting for an event which we just
+ added.
+ */
+
+ if (fsrp->fr_dispcnt)
+ sv_broadcast(&fsrp->fr_dispq);
+
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+
+ dm_app_put_tdp(tdp);
+ return(0);
+}
+
+
+/*
+ * Register a specific attribute name with a filesystem. The value of
+ * the attribute is to be returned with an asynchronous destroy event.
+ */
+
+int
+dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ dm_boolean_t enable)
+{
+ dm_attrname_t attrname;
+ dm_tokdata_t *tdp;
+ dm_fsreg_t *fsrp;
+ dm_session_t *s;
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+
+ /* If a dm_attrname_t is provided, copy it in and validate it. */
+
+ if (enable && copy_from_user(&attrname, attrnamep, sizeof(attrname))) // XFS BUG #14
+ return(-EFAULT); // XFS BUG #14
+
+ /* Validate the filesystem handle and use it to get the filesystem's
+ disposition structure.
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_EXCL, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+
+ /* Now that we own 'fsrp->fr_lock', get the lock on the session so that
+ it can't disappear while we add it to the filesystem's event mask.
+ */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+ }
+
+ /* A caller cannot disable return-on-destroy if he is not registered
+ for DM_EVENT_DESTROY. Enabling return-on-destroy is an implicit
+ dm_set_disp() for DM_EVENT_DESTROY; we wake up all processes
+ waiting for a disposition in case any was waiting for a
+ DM_EVENT_DESTROY event.
+ */
+
+ error = 0;
+ if (enable) {
+ fsrp->fr_sessp[DM_EVENT_DESTROY] = s;
+ fsrp->fr_rattr = attrname;
+ if (fsrp->fr_dispcnt)
+ sv_broadcast(&fsrp->fr_dispq);
+ } else if (fsrp->fr_sessp[DM_EVENT_DESTROY] != s) {
+ error = -EINVAL;
+ } else {
+ memset(&fsrp->fr_rattr, 0, sizeof(fsrp->fr_rattr));
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&fsrp->fr_lock, lc1);
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_get_mountinfo(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_fsreg_t *fsrp;
+ dm_tokdata_t *tdp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Make sure that the caller's buffer is 8-byte aligned. */
+
+ if (((__psint_t)bufp & (sizeof(u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Verify that the handle is a filesystem handle, and that the
+ filesystem is capable of sending events. If not, return an error.
+ */
+
+ error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_FS,
+ DM_RIGHT_SHARED, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Find the filesystem entry. This should always succeed as the
+ dm_app_get_tdp call created a filesystem reference. Once we find
+ the entry, drop the lock. The mountinfo message is never modified,
+ the filesystem entry can't disappear, and we don't want to hold a
+ spinlock while doing copyout calls.
+ */
+
+ fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc);
+ if (fsrp == NULL) {
+ dm_app_put_tdp(tdp);
+ return(-EINVAL);
+ }
+ mutex_spinunlock(&fsrp->fr_lock, lc);
+
+ /* Copy the message into the user's buffer and update his 'rlenp'. */
+
+ if (put_user(fsrp->fr_msgsize, rlenp)) {
+ error = -EFAULT;
+ } else if (fsrp->fr_msgsize > buflen) { /* user buffer not big enough */
+ error = -E2BIG;
+ } else if (copy_to_user(bufp, fsrp->fr_msg, fsrp->fr_msgsize)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_getall_disp(
+ dm_sessid_t sid,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2; /* second lock cookie */
+ int totalsize;
+ int msgsize;
+ int fsyscnt;
+ dm_dispinfo_t *prevmsg;
+ dm_fsreg_t *fsrp;
+ int error;
+ char *kbuf;
+
+ int tmp3;
+ int tmp4;
+
+ /* Because the dm_getall_disp structure contains a u64 field,
+ make sure that the buffer provided by the caller is aligned so
+ that he can read such fields successfully.
+ */
+
+ if (((__psint_t)bufp & (sizeof(u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Compute the size of a dm_dispinfo structure, rounding up to an
+ 8-byte boundary so that any subsequent structures will also be
+ aligned.
+ */
+
+#if 0
+ /* XXX ug, what is going on here? */
+ msgsize = (sizeof(dm_dispinfo_t) + FSHSIZE + sizeof(uint64_t) - 1) &
+ ~(sizeof(uint64_t) - 1);
+#else
+ tmp3 = sizeof(dm_dispinfo_t) + FSHSIZE;
+ tmp3 += sizeof(u64);
+ tmp3 -= 1;
+ tmp4 = ~((int)sizeof(u64) - 1);
+ msgsize = tmp3 & tmp4;
+#endif
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((fsyscnt = dm_fsys_cnt) == 0) {
+ /*if (dm_cpoutsizet(rlenp, 0))*/
+ if (put_user(0,rlenp))
+ return(-EFAULT);
+ return(0);
+ }
+ kbuf = kmalloc(fsyscnt * msgsize, GFP_KERNEL);
+ if (kbuf == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ lc1 = mutex_spinlock(&dm_reg_lock);
+ if (fsyscnt == dm_fsys_cnt)
+ break;
+
+ mutex_spinunlock(&dm_reg_lock, lc1);
+ kfree(kbuf);
+ }
+
+ /* Find the indicated session and lock it. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
+ mutex_spinunlock(&dm_reg_lock, lc1);
+ kfree(kbuf);
+ return(error);
+ }
+
+ /* Create a dm_dispinfo structure for each filesystem in which
+ this session has at least one event selected for disposition.
+ */
+
+ totalsize = 0; /* total bytes to transfer to the user */
+ prevmsg = NULL;
+
+ for (fsrp = dm_registers; fsrp; fsrp = fsrp->fr_next) {
+ dm_dispinfo_t *disp;
+ int event;
+ int found;
+
+ disp = (dm_dispinfo_t *)(kbuf + totalsize);
+
+ DMEV_ZERO(disp->di_eventset);
+
+ for (event = 0, found = 0; event < DM_EVENT_MAX; event++) {
+ if (fsrp->fr_sessp[event] != s)
+ continue;
+ DMEV_SET(event, disp->di_eventset);
+ found++;
+ }
+ if (!found)
+ continue;
+
+ disp->_link = 0;
+ disp->di_fshandle.vd_offset = sizeof(dm_dispinfo_t);
+ disp->di_fshandle.vd_length = FSHSIZE;
+
+ memcpy((char *)disp + disp->di_fshandle.vd_offset,
+ &fsrp->fr_fsid, disp->di_fshandle.vd_length);
+
+ if (prevmsg)
+ prevmsg->_link = msgsize;
+
+ prevmsg = disp;
+ totalsize += msgsize;
+ }
+ mutex_spinunlock(&s->sn_qlock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&dm_reg_lock, lc1);
+
+ if (put_user(totalsize, rlenp)) {
+ error = -EFAULT;
+ } else if (totalsize > buflen) { /* no more room */
+ error = -E2BIG;
+ } else if (totalsize && copy_to_user(bufp, kbuf, totalsize)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+
+ kfree(kbuf);
+ return(error);
+}
+
+int
+dm_open_by_handle_rvp(
+ unsigned int fd,
+ void *hanp,
+ size_t hlen,
+ int flags,
+ int *rvp)
+{
+ jfs_handle_t handle;
+ int error;
+ struct inode *ip;
+ short td_type;
+ struct dentry *dentry;
+ int new_fd;
+ struct file *mfilp;
+ struct file *filp;
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0) {
+ return(error);
+ }
+
+ if ((ip = dm_handle_to_ip(&handle, &td_type)) == NULL) {
+ return(-EBADF);
+ }
+ if ((td_type == DM_TDT_FS) || (td_type == DM_TDT_OTH)) {
+ iput(ip);
+ return(-EBADF);
+ }
+
+ if ((new_fd = get_unused_fd()) < 0) {
+ iput(ip);
+ return(-EMFILE);
+ }
+
+ /* Now to find a dentry. If possible, get a well-connected one. */
+ dentry = d_alloc_root(ip);
+ if (dentry == NULL) {
+ iput(ip);
+ put_unused_fd(new_fd);
+ return(-ENOMEM);
+ }
+
+ if( ip->i_ino != dentry->d_inode->i_ino ){
+ dput(dentry);
+ put_unused_fd(new_fd);
+ return(-EINVAL);
+ }
+
+ mfilp = fget(fd);
+ if (!mfilp) {
+ dput(dentry);
+ put_unused_fd(new_fd);
+ return(-EBADF);
+ }
+
+ mntget(mfilp->f_vfsmnt);
+
+ /* Create file pointer */
+ filp = dentry_open(dentry, mfilp->f_vfsmnt, flags);
+ if (IS_ERR(filp)) {
+ put_unused_fd(new_fd);
+ fput(mfilp);
+ return -PTR_ERR(filp);
+ }
+
+ if (td_type == DM_TDT_REG)
+ filp->f_mode |= FINVIS;
+ fd_install(new_fd, filp);
+ fput(mfilp);
+ *rvp = new_fd;
+ return 0;
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_right.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_right.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_right.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_right.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "dmapi_private.h"
+#include "jfs_debug.h"
+
+
+#define DM_FG_STHREAD 0x001 /* keep other threads from using tdp */
+#define DM_FG_MUSTEXIST 0x002 /* handle must exist in the event */
+#define DM_FG_DONTADD 0x004 /* don't add handle if not in event */
+
+/* Get a handle of the form (void *, size_t) from user space and convert it to
+ a handle_t. Do as much validation of the result as possible; any error
+ other than a bad address should return EBADF per the DMAPI spec.
+*/
+
+int
+dm_copyin_handle(
+ void *hanp, /* input, handle data */
+ size_t hlen, /* input, size of handle data */
+ jfs_handle_t *handlep) /* output, copy of data */
+{
+ u_short len;
+ fid_t *fidp;
+
+ fidp = (fid_t*)&handlep->ha_fid;
+
+ if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
+ return(-EBADF);
+
+ if (copy_from_user(handlep, hanp, hlen))
+ return(-EFAULT);
+
+ if (hlen < sizeof(*handlep))
+ memset((char *)handlep + hlen, 0, sizeof(*handlep) - hlen);
+
+ if (hlen == sizeof(handlep->ha_fsid))
+ return(0); /* FS handle, nothing more to check */
+
+ len = hlen - sizeof(handlep->ha_fsid) - sizeof(fidp->fid_len);
+
+ if (fidp->fid_len != len ||
+ *((short *) fidp->fid_data)) {
+ return(-EBADF);
+ }
+ return(0);
+}
+
+/* Allocate and initialize a tevp structure. Called from both application and
+ event threads.
+*/
+
+static dm_tokevent_t *
+dm_init_tevp(
+ int ev_size, /* size of event structure */
+ int var_size) /* size of variable-length data */
+{
+ dm_tokevent_t *tevp;
+ int msgsize;
+
+ /* Calculate the size of the event in bytes and allocate memory for it.
+ Zero all but the variable portion of the message, which will be
+ eventually overlaid by the caller with data.
+ */
+
+ msgsize = offsetof(dm_tokevent_t, te_event) + ev_size + var_size;
+ tevp = kmalloc(msgsize, GFP_KERNEL);
+ if (tevp == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return NULL;
+ }
+ memset(tevp, 0, msgsize - var_size);
+
+ /* Now initialize all the non-zero fields. */
+
+ spinlock_init(&tevp->te_lock, "te_lock");
+ sv_init(&tevp->te_evt_queue, SV_DEFAULT, "te_evt_queue");
+ sv_init(&tevp->te_app_queue, SV_DEFAULT, "te_app_queue");
+ tevp->te_allocsize = msgsize;
+ tevp->te_msg.ev_type = DM_EVENT_INVALID;
+ tevp->te_flags = 0;
+
+ return(tevp);
+}
+
+
+/* Given the event type and the number of bytes of variable length data that
+ will follow the event, dm_evt_create_tevp() creates a dm_tokevent_t
+ structure to hold the event and initializes all the common event fields.
+
+ No locking is required for this routine because the caller is an event
+ thread, and is therefore the only thread that can see the event.
+*/
+
+dm_tokevent_t *
+dm_evt_create_tevp(
+ dm_eventtype_t event,
+ int variable_size,
+ void **msgpp)
+{
+ dm_tokevent_t *tevp;
+ int evsize;
+
+ switch (event) {
+ case DM_EVENT_READ:
+ case DM_EVENT_WRITE:
+ case DM_EVENT_TRUNCATE:
+ evsize = sizeof(dm_data_event_t);
+ break;
+
+ case DM_EVENT_DESTROY:
+ evsize = sizeof(dm_destroy_event_t);
+ break;
+
+ case DM_EVENT_MOUNT:
+ evsize = sizeof(dm_mount_event_t);
+ break;
+
+ case DM_EVENT_PREUNMOUNT:
+ case DM_EVENT_UNMOUNT:
+ case DM_EVENT_NOSPACE:
+ case DM_EVENT_CREATE:
+ case DM_EVENT_REMOVE:
+ case DM_EVENT_RENAME:
+ case DM_EVENT_SYMLINK:
+ case DM_EVENT_LINK:
+ case DM_EVENT_POSTCREATE:
+ case DM_EVENT_POSTREMOVE:
+ case DM_EVENT_POSTRENAME:
+ case DM_EVENT_POSTSYMLINK:
+ case DM_EVENT_POSTLINK:
+ case DM_EVENT_ATTRIBUTE:
+ case DM_EVENT_DEBUT: /* currently not supported */
+ case DM_EVENT_CLOSE:
+ evsize = sizeof(dm_namesp_event_t);
+ break;
+
+ case DM_EVENT_CANCEL: /* currently not supported */
+ evsize = sizeof(dm_cancel_event_t);
+ break;
+
+ case DM_EVENT_USER:
+ evsize = 0;
+ break;
+
+ default:
+ panic("dm_create_tevp: called with unknown event type %d\n",
+ event);
+ }
+
+ /* Allocate and initialize an event structure of the correct size. */
+
+ tevp = dm_init_tevp(evsize, variable_size);
+ if (tevp == NULL)
+ return NULL;
+ tevp->te_evt_ref = 1;
+
+ /* Fields ev_token, ev_sequence, and _link are all filled in when the
+ event is queued onto a session. Initialize all other fields here.
+ */
+
+ tevp->te_msg.ev_type = event;
+ tevp->te_msg.ev_data.vd_offset = offsetof(dm_tokevent_t, te_event) -
+ offsetof(dm_tokevent_t, te_msg);
+ tevp->te_msg.ev_data.vd_length = evsize + variable_size;
+
+ /* Give the caller a pointer to the event-specific structure. */
+
+ *msgpp = ((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
+ return(tevp);
+}
+
+
+/* Given a pointer to an event (tevp) and a pointer to a handle_t, look for a
+ tdp structure within the event which contains the handle_t. Either verify
+ that the event contains the tdp, or optionally add the tdp to the
+ event. Called only from application threads.
+
+ On entry, tevp->te_lock is held; it is dropped prior to return.
+*/
+
+static int
+dm_app_lookup_tdp(
+ jfs_handle_t *handlep, /* the handle we are looking for */
+ dm_tokevent_t *tevp, /* the event to search for the handle */
+ unsigned long *lcp, /* address of active lock cookie */
+ short types, /* acceptable object types */
+ dm_right_t right, /* minimum right the object must have */
+ u_int flags,
+ dm_tokdata_t **tdpp) /* if ! NULL, pointer to matching tdp */
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ struct inode *ip;
+ int error;
+
+ /* Bump the tevp application reference counter so that the event
+ can't disappear in case we have to drop the lock for a while.
+ */
+
+ tevp->te_app_ref++;
+ *tdpp = NULL; /* assume failure */
+
+ for (;;) {
+ /* Look for a matching tdp in the tevp. */
+
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if (JFS_HANDLE_CMP(&tdp->td_handle, handlep) == 0)
+ break;
+ }
+
+ /* If the tdp exists, but either we need single-thread access
+ to the handle and can't get it, or some other thread already
+ has single-thread access, then sleep until we can try again.
+ */
+
+ if (tdp != NULL && tdp->td_app_ref &&
+ ((flags & DM_FG_STHREAD) ||
+ (tdp->td_flags & DM_TDF_STHREAD))) {
+ tevp->te_app_slp++;
+ sv_wait(&tevp->te_app_queue, 1,
+ &tevp->te_lock, *lcp);
+ *lcp = mutex_spinlock(&tevp->te_lock);
+ tevp->te_app_slp--;
+ continue;
+ }
+
+ if (tdp != NULL &&
+ (tdp->td_icount > 0 || tdp->td_flags & DM_TDF_EVTREF)) {
+ /* We have an existing tdp with a non-zero inode
+ reference count. If it's the wrong type, return
+ an appropriate errno.
+ */
+
+ if (!(tdp->td_type & types)) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EINVAL); // XFS BUG #7
+ }
+
+ /* If the current access right isn't high enough,
+ complain.
+ */
+
+ if (tdp->td_right < right) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EACCES);
+ }
+
+ /* The handle is acceptable. Increment the tdp
+ application and inode references and mark the tdp
+ as single-threaded if necessary.
+ */
+
+ tdp->td_app_ref++;
+ if (flags & DM_FG_STHREAD)
+ tdp->td_flags |= DM_TDF_STHREAD;
+ tdp->td_icount++;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ *tdpp = tdp;
+ return(0);
+ }
+
+ /* If the tdp is not in the tevp or does not have a inode
+ reference, check to make sure it is okay to add/update it.
+ */
+
+ if (flags & DM_FG_MUSTEXIST) {
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ dm_put_tevp(tevp, NULL); /* no destroy events */
+ return(-EACCES); /* i.e. an insufficient right */
+ }
+ if (flags & DM_FG_DONTADD) {
+ tevp->te_app_ref--;
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ return(0);
+ }
+
+ /* If a tdp structure doesn't yet exist, create one and link
+ it into the tevp. Drop the lock while we are doing this as
+ zallocs can go to sleep. Once we have the memory, make
+ sure that another thread didn't simultaneously add the same
+ handle to the same event. If so, toss ours and start over.
+ */
+
+ if (tdp == NULL) {
+ dm_tokdata_t *tmp;
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+
+ tdp = kmem_cache_alloc(dm_tokdata_cachep, SLAB_KERNEL);
+ if (tdp == NULL){
+ printk("%s/%d: kmem_cache_alloc(dm_tokdata_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return(-ENOMEM);
+ }
+ memset(tdp, 0, sizeof(*tdp));
+
+ *lcp = mutex_spinlock(&tevp->te_lock);
+
+ for (tmp = tevp->te_tdp; tmp; tmp = tmp->td_next) {
+ if (JFS_HANDLE_CMP(&tmp->td_handle, handlep) == 0)
+ break;
+ }
+ if (tmp) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ continue;
+ }
+
+ tdp->td_next = tevp->te_tdp;
+ tevp->te_tdp = tdp;
+ tdp->td_tevp = tevp;
+ tdp->td_handle = *handlep;
+ }
+
+ /* Temporarily single-thread access to the tdp so that other
+ threads don't touch it while we are filling the rest of the
+ fields in.
+ */
+
+ tdp->td_app_ref = 1;
+ tdp->td_flags |= DM_TDF_STHREAD;
+
+ /* Drop the spinlock while we access, validate, and obtain the
+ proper rights to the object. This can take a very long time
+ if the inode is not in memory, if the filesystem is
+ unmounting, or if the request_right() call should block
+ because some other tdp or kernel thread is holding a right.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+
+ if ((ip = dm_handle_to_ip(handlep, &tdp->td_type)) == NULL) {
+ error = -EBADF;
+ } else {
+ tdp->td_icount = 1;
+ tdp->td_ip = ip;
+
+ /* The handle is usable. Check that the type of the
+ object matches one of the types that the caller
+ will accept.
+ */
+
+ if (!(types & tdp->td_type)) {
+ error = -EINVAL; // XFS BUG #7
+ } else if (right > DM_RIGHT_NULL) {
+ /* Attempt to get the rights required by the
+ caller. If rights can't be obtained, return
+ an error.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->request_right(tdp->td_ip,
+ DM_RIGHT_NULL,
+ (tdp->td_type == DM_TDT_FS ?
+ DM_FSYS_OBJ : 0),
+ DM_RR_WAIT, right);
+ if (!error) {
+ tdp->td_right = right;
+ }
+ } else {
+ error = 0;
+ }
+ }
+ if (error != 0) {
+ dm_put_tevp(tevp, tdp); /* destroy event risk, although tiny */
+ return(error);
+ }
+
+ *lcp = mutex_spinlock(&tevp->te_lock);
+
+ /* Wake up any threads which may have seen our tdp while we
+ were filling it in.
+ */
+
+ if (!(flags & DM_FG_STHREAD)) {
+ tdp->td_flags &= ~DM_TDF_STHREAD;
+ if (tevp->te_app_slp)
+ sv_broadcast(&tevp->te_app_queue);
+ }
+
+ mutex_spinunlock(&tevp->te_lock, *lcp);
+ *tdpp = tdp;
+ return(0);
+ }
+}
+
+
+/* dm_app_get_tdp_by_token() is called whenever the application request
+ contains a session ID and contains a token other than DM_NO_TOKEN.
+ Most of the callers provide a right that is either DM_RIGHT_SHARED or
+ DM_RIGHT_EXCL, but a few of the callers such as dm_obj_ref_hold() may
+ specify a right of DM_RIGHT_NULL.
+*/
+
+static int
+dm_app_get_tdp_by_token(
+ dm_sessid_t sid, /* an existing session ID */
+ void *hanp,
+ size_t hlen,
+ dm_token_t token, /* an existing token */
+ short types, /* acceptable object types */
+ dm_right_t right, /* minimum right the object must have */
+ u_int flags,
+ dm_tokdata_t **tdpp)
+{
+ dm_tokevent_t *tevp;
+ jfs_handle_t handle;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if (right < DM_RIGHT_NULL || right > DM_RIGHT_EXCL)
+ return(-EINVAL);
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ /* Find and lock the event which corresponds to the specified
+ session/token pair.
+ */
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types,
+ right, flags, tdpp));
+}
+
+
+/* Function dm_app_get_tdp() must ONLY be called from routines associated with
+ application calls, e.g. dm_read_invis, dm_set_disp, etc. It must not be
+ called by a thread responsible for generating an event such as
+ dm_send_data_event()!
+
+ dm_app_get_tdp() is the interface used by all application calls other than
+ dm_get_events, dm_respond_event, dm_get_config, dm_get_config_events, and by
+ the dm_obj_ref_* and dm_*_right families of requests.
+
+ dm_app_get_tdp() converts a sid/hanp/hlen/token quad into a tdp pointer,
+ increments the number of active application threads in the event, and
+ increments the number of active application threads using the tdp. The
+ 'right' parameter must be either DM_RIGHT_SHARED or DM_RIGHT_EXCL. The
+ token may either be DM_NO_TOKEN, or can be a token received in a synchronous
+ event.
+*/
+
+int
+dm_app_get_tdp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ short types,
+ dm_right_t right, /* minimum right */
+ dm_tokdata_t **tdpp)
+{
+ dm_session_t *s;
+ jfs_handle_t handle;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ ASSERT(right >= DM_RIGHT_SHARED);
+
+ /* If a token other than DM_NO_TOKEN is specified, find the event on
+ this session which owns the token and increment its reference count.
+ */
+
+ if (token != DM_NO_TOKEN) { /* look up existing tokevent struct */
+ return(dm_app_get_tdp_by_token(sid, hanp, hlen, token, types,
+ right, DM_FG_MUSTEXIST, tdpp));
+ }
+
+ /* The token is DM_NO_TOKEN. In this case we only want to verify that
+ the session ID is valid, and do not need to continue holding the
+ session lock after we know that to be true.
+ */
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* When DM_NO_TOKEN is used, we simply block until we can obtain the
+ right that we want (since the tevp contains no tdp structures).
+ The blocking when we eventually support it will occur within
+ fsys_vector->request_right().
+ */
+
+ tevp = dm_init_tevp(0, 0);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, types, right, 0, tdpp));
+}
+
+
+/* dm_get_config_tdp() is only called by dm_get_config() and
+ dm_get_config_events(), which neither have a session ID nor a token.
+ Both of these calls are supposed to work even if the filesystem is in the
+ process of being mounted, as long as the caller only uses handles within
+ the mount event.
+*/
+
+int
+dm_get_config_tdp(
+ void *hanp,
+ size_t hlen,
+ dm_tokdata_t **tdpp)
+{
+ jfs_handle_t handle;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_copyin_handle(hanp, hlen, &handle)) != 0)
+ return(error);
+
+ tevp = dm_init_tevp(0, 0);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* Try to use the handle provided by the caller and assume DM_NO_TOKEN.
+ This will fail if the filesystem is in the process of being mounted.
+ */
+
+ error = dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
+ DM_RIGHT_NULL, 0, tdpp);
+
+ if (!error) {
+ return(0);
+ }
+
+ /* Perhaps the filesystem is still mounting, in which case we need to
+ see if this is one of the handles in the DM_EVENT_MOUNT tevp.
+ */
+
+ if ((tevp = dm_find_mount_tevp_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL)
+ return(-EBADF);
+
+ return(dm_app_lookup_tdp(&handle, tevp, &lc, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_MUSTEXIST, tdpp));
+}
+
+
+/* dm_put_tdp() is called to release any right held on the inode, and to
+ iput() all references held on the inode. It is the caller's
+ responsibility to ensure that no other application threads are using the
+ tdp, and if necessary to unlink the tdp from the tevp before calling
+ this routine and to free the tdp afterwards.
+*/
+
+static void
+dm_put_tdp(
+ dm_tokdata_t *tdp)
+{
+ ASSERT(tdp->td_app_ref <= 1);
+
+ /* If the application thread is holding a right, or if the event
+ thread had a right but it has disappeared because of a dm_pending
+ or Cntl-C, then we need to release it here.
+ */
+
+ if (tdp->td_right != DM_RIGHT_NULL) {
+ dm_fsys_vector_t *fsys_vector;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->release_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0));
+ tdp->td_right = DM_RIGHT_NULL;
+ }
+
+ /* Given that we wouldn't be here if there was still an event thread,
+ this iput loop has the potential of generating a DM_EVENT_DESTROY
+ event if some other thread has unlinked the file.
+ */
+
+ while (tdp->td_icount > 0) {
+ iput(tdp->td_ip);
+ tdp->td_icount--;
+ }
+
+ tdp->td_flags &= ~(DM_TDF_HOLD|DM_TDF_RIGHT);
+ tdp->td_ip = NULL;
+}
+
+
+/* Function dm_put_tevp() must ONLY be called from routines associated with
+ application threads, e.g. dm_read_invis, dm_get_events, etc. It must not be
+ called by a thread responsible for generating an event, such as
+ dm_send_data_event.
+
+ PLEASE NOTE: It is possible for this routine to generate DM_EVENT_DESTROY
+ events, because its calls to dm_put_tdp drop inode references, and another
+ thread may have already unlinked a file whose inode we are de-referencing.
+ This sets the stage for various types of deadlock if the thread calling
+ dm_put_tevp is the same thread that calls dm_respond_event! In particular,
+ the dm_sent_destroy_event routine needs to obtain the dm_reg_lock,
+ dm_session_lock, and sn_qlock in order to queue the destroy event. No
+ caller of dm_put_tevp can hold any of these locks!
+
+ Other possible deadlocks are that dm_send_destroy_event could block waiting
+ for a thread to register for the event using dm_set_disp() and/or
+ dm_set_return_on_destroy, or it could block because the session's sn_newq
+ is at the dm_max_queued_msgs event limit. The only safe solution
+ (unimplemented) is to have a separate kernel thread for each filesystem
+ whose only job is to do the inode-dereferencing. That way dm_respond_event
+ will not block, so the application can keep calling dm_get_events to read
+ events even if the filesystem thread should block. (If the filesystem
+ thread blocks, so will all subsequent destroy events for the same
+ filesystem.)
+*/
+
+void
+dm_put_tevp(
+ dm_tokevent_t *tevp,
+ dm_tokdata_t *tdp)
+{
+ int free_tdp = 0;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ if (tdp != NULL) {
+ if (tdp->td_icount > 1 || (tdp->td_flags & DM_TDF_EVTREF)) {
+ ASSERT(tdp->td_app_ref > 0);
+
+ iput(tdp->td_ip);
+ tdp->td_icount--;
+ } else {
+ ASSERT(tdp->td_app_ref == 1);
+
+ /* The inode reference count is either already at
+ zero (e.g. a failed dm_handle_to_ip() call in
+ dm_app_lookup_tdp()) or is going to zero. We can't
+ hold the lock while we decrement the count because
+ we could potentially end up being busy for a long
+ time in VOP_INACTIVATE. Use single-threading to
+ lock others out while we clean house.
+ */
+
+ tdp->td_flags |= DM_TDF_STHREAD;
+
+ /* WARNING - A destroy event is possible here if we are
+ giving up the last reference on a inode which has
+ been previously unlinked by some other thread!
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+ dm_put_tdp(tdp);
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* If this tdp is not one of the original tdps in the
+ event, then remove it from the tevp.
+ */
+
+ if (!(tdp->td_flags & DM_TDF_ORIG)) {
+ dm_tokdata_t **tdpp = &tevp->te_tdp;
+
+ while (*tdpp && *tdpp != tdp) {
+ tdpp = &(*tdpp)->td_next;
+ }
+ if (*tdpp == NULL) {
+ panic("dm_remove_tdp_from_tevp: tdp "
+ "%p not in tevp %p\n", tdp,
+ tevp);
+ }
+ *tdpp = tdp->td_next;
+ free_tdp++;
+ }
+ }
+
+ /* If this is the last app thread actively using the tdp, clear
+ any single-threading and wake up any other app threads who
+ might be waiting to use this tdp, single-threaded or
+ otherwise.
+ */
+
+ if (--tdp->td_app_ref == 0) {
+ if (tdp->td_flags & DM_TDF_STHREAD) {
+ tdp->td_flags &= ~DM_TDF_STHREAD;
+ if (tevp->te_app_slp)
+ sv_broadcast(&tevp->te_app_queue);
+ }
+ }
+
+ if (free_tdp) {
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ }
+
+ /* If other application threads are using this token/event, they will
+ do the cleanup.
+ */
+
+ if (--tevp->te_app_ref > 0) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return;
+ }
+
+ /* If event generation threads are waiting for this thread to go away,
+ wake them up and let them do the cleanup.
+ */
+
+ if (tevp->te_evt_ref > 0) {
+ sv_broadcast(&tevp->te_evt_queue);
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return;
+ }
+
+ /* This thread is the last active thread using the token/event. No
+ lock can be held while we disassemble the tevp because we could
+ potentially end up being busy for a long time in VOP_INACTIVATE.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ /* WARNING - One or more destroy events are possible here if we are
+ giving up references on inodes which have been previously unlinked
+ by other kernel threads!
+ */
+
+ while ((tdp = tevp->te_tdp) != NULL) {
+ tevp->te_tdp = tdp->td_next;
+ dm_put_tdp(tdp);
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ spinlock_destroy(&tevp->te_lock);
+ sv_destroy(&tevp->te_evt_queue);
+ sv_destroy(&tevp->te_app_queue);
+ kfree(tevp);
+}
+
+
+/* No caller of dm_app_put_tevp can hold either of the locks dm_reg_lock,
+ dm_session_lock, or any sn_qlock! (See dm_put_tevp for details.)
+*/
+
+void
+dm_app_put_tdp(
+ dm_tokdata_t *tdp)
+{
+ dm_put_tevp(tdp->td_tevp, tdp);
+}
+
+
+/* dm_change_right is only called if the event thread is the one doing the
+ cleanup on a completed event. It looks at the current rights of a tdp
+ and compares that with the rights it had on the tdp when the event was
+ created. If different, it reaquires the original rights, then transfers
+ the rights back to being thread-based.
+*/
+
+static void
+dm_change_right(
+ dm_tokdata_t *tdp)
+{
+#ifdef HAVE_DMAPI_RIGHTS
+ dm_fsys_vector_t *fsys_vector;
+ int error;
+ u_int type;
+#endif
+
+ /* If the event doesn't have a inode reference, if the original right
+ was DM_RIGHT_NULL, or if the rights were never switched from being
+ thread-based to tdp-based, then there is nothing to do.
+ */
+
+ if (!(tdp->td_flags & DM_TDF_EVTREF))
+ return;
+
+ if (tdp->td_orig_right == DM_RIGHT_NULL)
+ return;
+
+ /* DEBUG - Need a check here for event-based rights. */
+
+#ifdef HAVE_DMAPI_RIGHTS
+ /* The "rights" vectors are stubs now anyway. When they are
+ * implemented then bhv locking will have to be sorted out.
+ */
+
+ /* If the current right is not the same as it was when the event was
+ created, first get back the original right.
+ */
+
+ if (tdp->td_right != tdp->td_orig_right) {
+ fsys_vector = dm_fsys_vector(tdp->td_vp);
+ type = (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0);
+
+ switch (tdp->td_orig_right) {
+ case DM_RIGHT_SHARED:
+ if (tdp->td_right == DM_RIGHT_EXCL) {
+ error = fsys_vector->downgrade_right(
+ tdp->td_vp, tdp->td_right, type);
+ if (!error)
+ break;
+ (void)fsys_vector->release_right(tdp->td_vp,
+ tdp->td_right, type);
+ }
+ (void)fsys_vector->request_right(tdp->td_vp,
+ tdp->td_right, type, DM_RR_WAIT,
+ tdp->td_orig_right);
+ break;
+
+ case DM_RIGHT_EXCL:
+ if (tdp->td_right == DM_RIGHT_SHARED) {
+ error = fsys_vector->upgrade_right(tdp->td_vp,
+ tdp->td_right, type);
+ if (!error)
+ break;
+ (void)fsys_vector->release_right(tdp->td_vp,
+ tdp->td_right, type);
+ }
+ (void)fsys_vector->request_right(tdp->td_vp,
+ tdp->td_right, type, DM_RR_WAIT,
+ tdp->td_orig_right);
+ break;
+ case DM_RIGHT_NULL:
+ break;
+ }
+ }
+#endif
+
+ /* We now have back the same level of rights as we had when the event
+ was generated. Now transfer the rights from being tdp-based back
+ to thread-based.
+ */
+
+ /* DEBUG - Add a call here to transfer rights back to thread-based. */
+
+ /* Finally, update the tdp so that we don't mess with the rights when
+ we eventually call dm_put_tdp.
+ */
+
+ tdp->td_right = DM_RIGHT_NULL;
+}
+
+
+/* This routine is only called by event threads. The calls to dm_put_tdp
+ are not a deadlock risk here because this is an event thread, and it is
+ okay for such a thread to block on an induced destroy event. Okay, maybe
+ there is a slight risk; say that the event contains three inodes all of
+ which have DM_RIGHT_EXCL, and say that we are at the dm_max_queued_msgs
+ limit, and that the first inode is already unlinked. In that case the
+ destroy event will block waiting to be queued, and the application thread
+ could happen to reference one of the other locked inodes. Deadlock.
+*/
+
+void
+dm_evt_rele_tevp(
+ dm_tokevent_t *tevp,
+ int droprights) /* non-zero, evt thread loses rights */
+{
+ dm_tokdata_t *tdp;
+ unsigned long lc; /* lock cookie */
+
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ /* If we are here without DM_TEF_FINAL set and with at least one
+ application reference still remaining, then one of several
+ possibilities is true:
+ 1. This is an asynchronous event which has been queued but has not
+ yet been delivered, or which is in the process of being delivered.
+ 2. This is an unmount event (pseudo-asynchronous) yet to be
+ delivered or in the process of being delivered.
+ 3. This event had DM_FLAGS_NDELAY specified, and the application
+ has sent a dm_pending() reply for the event.
+ 4. This is a DM_EVENT_READ, DM_EVENT_WRITE, or DM_EVENT_TRUNCATE
+ event and the user typed a Cntl-C.
+ In all of these cases, the correct behavior is to leave the
+ responsibility of releasing any rights to the application threads
+ when they are done.
+ */
+
+ if (tevp->te_app_ref > 0 && !(tevp->te_flags & DM_TEF_FINAL)) {
+ tevp->te_evt_ref--;
+ for (tdp = tevp->te_tdp; tdp; tdp = tdp->td_next) {
+ if (tdp->td_flags & DM_TDF_EVTREF) {
+ tdp->td_flags &= ~DM_TDF_EVTREF;
+ if (tdp->td_icount == 0) {
+ tdp->td_ip = NULL;
+ }
+ }
+ }
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return; /* not the last thread */
+ }
+
+ /* If the application reference count is non-zero here, that can only
+ mean that dm_respond_event() has been called, but the application
+ still has one or more threads in the kernel that haven't let go of
+ the tevp. In these cases, the event thread must wait until all
+ application threads have given up their references, and their
+ rights to handles within the event.
+ */
+
+ while (tevp->te_app_ref) {
+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ lc = mutex_spinlock(&tevp->te_lock);
+ }
+
+ /* This thread is the last active thread using the token/event. Reset
+ the rights of any inode that was part of the original event back
+ to their initial values before returning to the filesystem. The
+ exception is if the event failed (droprights is non-zero), in which
+ case we chose to return to the filesystem with all rights released.
+ Release the rights on any inode that was not part of the original
+ event. Give up all remaining application vnode references
+ regardless of whether or not the inode was part of the original
+ event.
+ */
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ while ((tdp = tevp->te_tdp) != NULL) {
+ tevp->te_tdp = tdp->td_next;
+ if ((tdp->td_flags & DM_TDF_ORIG) &&
+ (tdp->td_flags & DM_TDF_EVTREF) &&
+ (!droprights)) {
+ dm_change_right(tdp);
+ }
+ dm_put_tdp(tdp);
+ kmem_cache_free(dm_tokdata_cachep, tdp);
+ }
+ spinlock_destroy(&tevp->te_lock);
+ sv_destroy(&tevp->te_evt_queue);
+ sv_destroy(&tevp->te_app_queue);
+ kfree(tevp);
+}
+
+
+/* dm_obj_ref_hold() is just a fancy way to get a inode reference on an object
+ to hold it in kernel memory.
+*/
+
+int
+dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ if (tdp->td_flags & DM_TDF_HOLD) { /* if already held */
+ error = -EBUSY;
+ } else {
+ tdp->td_flags |= DM_TDF_HOLD;
+ tdp->td_icount++;
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+ }
+ dm_app_put_tdp(tdp);
+ }
+ return(error);
+}
+
+
+int
+dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen)
+{
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_NULL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ if (!(tdp->td_flags & DM_TDF_HOLD)) { /* if not held */
+ error = -EACCES; /* use the DM_FG_MUSTEXIST errno */
+ } else {
+ tdp->td_flags &= ~DM_TDF_HOLD;
+ iput(tdp->td_ip);
+ tdp->td_icount--;
+ }
+ dm_app_put_tdp(tdp);
+ }
+ return(error);
+}
+
+
+int
+dm_obj_ref_query_rvp(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ int *rvp)
+{
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_INO,
+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* If the request is valid but the handle just isn't present in the
+ event or the hold flag isn't set, return zero, else return one.
+ */
+
+ if (tdp) {
+ if (tdp->td_flags & DM_TDF_HOLD) { /* if held */
+ *rvp = 1;
+ } else {
+ *rvp = 0;
+ }
+ dm_app_put_tdp(tdp);
+ } else {
+ *rvp = 0;
+ }
+ return(0);
+}
+
+
+int
+dm_downgrade_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_EXCL, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0) {
+ if (error == -EACCES) // XFS BUG #32
+ error = -EPERM; // XFS BUG #32
+ return(error);
+ }
+
+ /* Attempt the downgrade. Filesystems which support rights but not
+ the downgrading of rights will return ENOSYS.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->downgrade_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0)
+ tdp->td_right = DM_RIGHT_SHARED;
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_query_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t *rightp)
+{
+ dm_tokdata_t *tdp;
+ dm_right_t right;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_DONTADD|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ /* Get the current right and copy it to the caller. The tdp is
+ single-threaded, so no mutex lock is needed. If the tdp is not in
+ the event we are supposed to return DM_RIGHT_NULL in order to be
+ compatible with Veritas.
+ */
+
+ if (tdp) {
+ right = tdp->td_right;
+ dm_app_put_tdp(tdp);
+ } else {
+ right = DM_RIGHT_NULL;
+ }
+ if (copy_to_user(rightp, &right, sizeof(right)))
+ return(-EFAULT);
+ return(0);
+}
+
+
+int
+dm_release_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->release_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0) {
+ tdp->td_right = DM_RIGHT_NULL;
+ if (tdp->td_flags & DM_TDF_RIGHT) {
+ tdp->td_flags &= ~DM_TDF_RIGHT;
+ iput(tdp->td_ip);
+ tdp->td_icount--;
+ }
+ }
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_request_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ u_int flags,
+ dm_right_t right)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ if (right != DM_RIGHT_SHARED && right != DM_RIGHT_EXCL) // XFS BUG #29
+ return(-EINVAL); // XFS BUG #29
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_NULL, DM_FG_STHREAD, &tdp);
+ if (error != 0)
+ return(error);
+
+ // XFS BUG #30 START
+ if ((tdp->td_right == DM_RIGHT_SHARED) && (right == DM_RIGHT_EXCL) &&
+ (flags & DM_RR_WAIT)) {
+ dm_app_put_tdp(tdp);
+ return(-EACCES);
+ }
+ // XFS BUG #30 END
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->request_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0), flags, right);
+
+ /* The tdp is single-threaded, so no mutex lock is needed for update.
+
+ If this is the first dm_request_right call for this inode, then we
+ need to bump the inode reference count for two reasons. First of
+ all, it is supposed to be impossible for the file to disappear or
+ for the filesystem to be unmounted while a right is held on a file;
+ bumping the file's inode reference count ensures this. Second, if
+ rights are ever actually implemented, it will most likely be done
+ without changes to the on-disk inode, which means that we can't let
+ the inode become unreferenced while a right on it is held.
+ */
+
+ if (error == 0) {
+ if (!(tdp->td_flags & DM_TDF_RIGHT)) { /* if first call */
+ tdp->td_flags |= DM_TDF_RIGHT;
+ tdp->td_icount++;
+ (void)fsys_vector->obj_ref_hold(tdp->td_ip);
+ }
+ tdp->td_right = right;
+ }
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
+
+
+int
+dm_upgrade_right(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token)
+{
+ dm_fsys_vector_t *fsys_vector;
+ dm_tokdata_t *tdp;
+ int error;
+
+ error = dm_app_get_tdp_by_token(sid, hanp, hlen, token, DM_TDT_ANY,
+ DM_RIGHT_SHARED, DM_FG_MUSTEXIST|DM_FG_STHREAD, &tdp);
+ if (error != 0) {
+ if (error == -EACCES) // XFS BUG #31
+ error = -EPERM; // XFS BUG #31
+ return(error);
+ }
+
+ /* If the object already has the DM_RIGHT_EXCL right, no need to
+ attempt an upgrade.
+ */
+
+ if (tdp->td_right == DM_RIGHT_EXCL) {
+ dm_app_put_tdp(tdp);
+ return(0);
+ }
+
+ /* Attempt the upgrade. Filesystems which support rights but not
+ the upgrading of rights will return ENOSYS.
+ */
+
+ fsys_vector = dm_fsys_vector(tdp->td_ip);
+ error = fsys_vector->upgrade_right(tdp->td_ip, tdp->td_right,
+ (tdp->td_type == DM_TDT_FS ? DM_FSYS_OBJ : 0));
+
+ /* The tdp is single-threaded, so no mutex lock needed for update. */
+
+ if (error == 0)
+ tdp->td_right = DM_RIGHT_EXCL;
+
+ dm_app_put_tdp(tdp);
+ return(error);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_session.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_session.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_session.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_session.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,1616 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include "jfs_debug.h"
+#include "jfs_xattr.h"
+#include "dmapi_private.h"
+
+dm_session_t *dm_sessions = NULL; /* head of session list */
+u_int dm_sessions_active = 0; /* # sessions currently active */
+dm_sessid_t dm_next_sessid = 1; /* next session ID to use */
+lock_t dm_session_lock = SPIN_LOCK_UNLOCKED;/* lock for session list */
+
+dm_token_t dm_next_token = 1; /* next token ID to use */
+dm_sequence_t dm_next_sequence = 1; /* next sequence number to use */
+lock_t dm_token_lock = SPIN_LOCK_UNLOCKED;/* dm_next_token/dm_next_sequence lock */
+
+int dm_max_queued_msgs = 2048; /* max # undelivered msgs/session */
+
+#ifdef DM_USE_SHASH
+int dm_hash_buckets = 1009; /* prime -- number of buckets */
+
+/* XXX floating point not allowed in Linux kernel. */
+#define DM_SHASH(sess,inodenum) ((sess)->sn_sesshash + \
+ ((int)(((unsigned long)(inodenum)) % \
+ dm_hash_buckets)))
+#endif
+
+
+#ifdef CONFIG_PROC_FS
+static int
+sessions_read_pfs(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+ dm_session_t *sessp = (dm_session_t*)data;
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ len=0;
+ while(1){
+ ADDBUF("sessp=0x%p\n", sessp);
+ ADDBUF("sn_next=0x%p\n", sessp->sn_next);
+ ADDBUF("sn_sessid=%d\n", sessp->sn_sessid);
+ ADDBUF("sn_flags=%x\n", sessp->sn_flags);
+ ADDBUF("sn_qlock=%c\n", '?');
+ ADDBUF("sn_readerq=%c\n", '?');
+ ADDBUF("sn_writerq=%c\n", '?');
+ ADDBUF("sn_readercnt=%u\n", sessp->sn_readercnt);
+ ADDBUF("sn_writercnt=%u\n", sessp->sn_writercnt);
+
+ ADDBUF("sn_newq.eq_head=0x%p\n", sessp->sn_newq.eq_head);
+ ADDBUF("sn_newq.eq_tail=0x%p\n", sessp->sn_newq.eq_tail);
+ ADDBUF("sn_newq.eq_count=%d\n", sessp->sn_newq.eq_count);
+
+ ADDBUF("sn_delq.eq_head=0x%p\n", sessp->sn_delq.eq_head);
+ ADDBUF("sn_delq.eq_tail=0x%p\n", sessp->sn_delq.eq_tail);
+ ADDBUF("sn_delq.eq_count=%d\n", sessp->sn_delq.eq_count);
+
+ ADDBUF("sn_evt_writerq.eq_head=0x%p\n", sessp->sn_evt_writerq.eq_head);
+ ADDBUF("sn_evt_writerq.eq_tail=0x%p\n", sessp->sn_evt_writerq.eq_tail);
+ ADDBUF("sn_evt_writerq.eq_count=%d\n", sessp->sn_evt_writerq.eq_count);
+
+ ADDBUF("sn_info=\"%s\"\n", sessp->sn_info);
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+/* Link a session to the end of the session list. New sessions are always
+ added at the end of the list so that dm_enqueue_mount_event() doesn't
+ miss a session. The caller must have obtained dm_session_lock before
+ calling this routine.
+*/
+
+static void
+link_session(
+ dm_session_t *s)
+{
+ dm_session_t *tmp;
+
+ if ((tmp = dm_sessions) == NULL) {
+ dm_sessions = s;
+ } else {
+ while (tmp->sn_next != NULL)
+ tmp = tmp->sn_next;
+ tmp->sn_next = s;
+ }
+ s->sn_next = NULL;
+ dm_sessions_active++;
+}
+
+
+/* Remove a session from the session list. The caller must have obtained
+ dm_session_lock before calling this routine. unlink_session() should only
+ be used in situations where the session is known to be on the dm_sessions
+ list; otherwise it panics.
+*/
+
+static void
+unlink_session(
+ dm_session_t *s)
+{
+ dm_session_t *tmp;
+
+ if (dm_sessions == s) {
+ dm_sessions = dm_sessions->sn_next;
+ } else {
+ for (tmp = dm_sessions; tmp; tmp = tmp->sn_next) {
+ if (tmp->sn_next == s)
+ break;
+ }
+ if (tmp == NULL) {
+ panic("unlink_session: corrupt DMAPI session list, "
+ "dm_sessions %p, session %p\n",
+ dm_sessions, s);
+ }
+ tmp->sn_next = s->sn_next;
+ }
+ s->sn_next = NULL;
+ dm_sessions_active--;
+}
+
+
+/* Link an event to the end of an event queue. The caller must have obtained
+ the session's sn_qlock before calling this routine.
+*/
+
+void
+dm_link_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue)
+{
+
+ if (queue->eq_tail) {
+ queue->eq_tail->te_next = tevp;
+ queue->eq_tail = tevp;
+ } else {
+ queue->eq_head = queue->eq_tail = tevp;
+ }
+ tevp->te_next = NULL;
+ queue->eq_count++;
+}
+
+
+/* Remove an event from an event queue. The caller must have obtained the
+ session's sn_qlock before calling this routine. dm_unlink_event() should
+ only be used in situations where the event is known to be on the queue;
+ otherwise it panics.
+*/
+
+void
+dm_unlink_event(
+ dm_tokevent_t *tevp,
+ dm_eventq_t *queue)
+{
+ dm_tokevent_t *tmp;
+
+ if (queue->eq_head == tevp) {
+ queue->eq_head = tevp->te_next;
+ if (queue->eq_head == NULL)
+ queue->eq_tail = NULL;
+ } else {
+ tmp = queue->eq_head;
+ while (tmp && tmp->te_next != tevp)
+ tmp = tmp->te_next;
+ if (tmp == NULL) {
+ panic("dm_unlink_event: corrupt DMAPI queue %p, "
+ "tevp %p\n", queue, tevp);
+ }
+ tmp->te_next = tevp->te_next;
+ if (tmp->te_next == NULL)
+ queue->eq_tail = tmp;
+ }
+ tevp->te_next = NULL;
+ queue->eq_count--;
+
+}
+
+/* Link a regular file event to a hash bucket. The caller must have obtained
+ the session's sn_qlock before calling this routine.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+*/
+
+#ifdef DM_USE_SHASH
+static void
+hash_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_ino_t ino;
+
+ if (s->sn_sesshash == NULL)
+ s->sn_sesshash = kmem_zalloc(dm_hash_buckets * sizeof(dm_sesshash_t), KM_SLEEP);
+
+ ino = ((jfs_fid_t*)&tevp->te_tdp->td_handle.ha_fid)->fid_ino;
+ sh = DM_SHASH(s, ino);
+
+#ifdef DM_SHASH_DEBUG
+ if (sh->h_next == NULL) {
+ s->sn_buckets_in_use++;
+ if (s->sn_buckets_in_use > s->sn_max_buckets_in_use)
+ s->sn_max_buckets_in_use++;
+ }
+ sh->maxlength++;
+ sh->curlength++;
+ sh->num_adds++;
+#endif
+
+ tevp->te_flags |= DM_TEF_HASHED;
+ tevp->te_hashnext = sh->h_next;
+ sh->h_next = tevp;
+}
+#endif
+
+
+/* Remove a regular file event from a hash bucket. The caller must have
+ obtained the session's sn_qlock before calling this routine.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+*/
+
+#ifdef DM_USE_SHASH
+static void
+unhash_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_tokevent_t *tmp;
+ dm_ino_t ino;
+
+ if (s->sn_sesshash == NULL)
+ return;
+
+ ino = ((jfs_fid_t*)&tevp->te_tdp->td_handle.ha_fid)->fid_ino;
+ sh = DM_SHASH(s, ino);
+
+ if (sh->h_next == tevp) {
+ sh->h_next = tevp->te_hashnext; /* leap frog */
+ } else {
+ tmp = sh->h_next;
+ while (tmp->te_hashnext != tevp) {
+ tmp = tmp->te_hashnext;
+ }
+ tmp->te_hashnext = tevp->te_hashnext; /* leap frog */
+ }
+ tevp->te_hashnext = NULL;
+ tevp->te_flags &= ~DM_TEF_HASHED;
+
+#ifdef DM_SHASH_DEBUG
+ if (sh->h_next == NULL)
+ s->sn_buckets_in_use--;
+ sh->curlength--;
+ sh->num_dels++;
+#endif
+}
+#endif
+
+
+/* Determine if this is a repeat event. The caller MUST be holding
+ the session lock.
+ The tokevent must be for a regular file object--DM_TDT_REG.
+ Returns:
+ 0 == match not found
+ 1 == match found
+*/
+
+#ifdef DM_USE_SHASH
+static int
+repeated_event(
+ dm_session_t *s,
+ dm_tokevent_t *tevp)
+{
+ dm_sesshash_t *sh;
+ dm_data_event_t *d_event1;
+ dm_data_event_t *d_event2;
+ dm_tokevent_t *tevph;
+ dm_ino_t ino1;
+ dm_ino_t ino2;
+
+ if ((!s->sn_newq.eq_tail) && (!s->sn_delq.eq_tail)) {
+ return(0);
+ }
+ if (s->sn_sesshash == NULL) {
+ return(0);
+ }
+
+ ino1 = ((jfs_fid_t*)&tevp->te_tdp->td_handle.ha_fid)->fid_ino;
+ sh = DM_SHASH(s, ino1);
+
+ if (sh->h_next == NULL) {
+ /* bucket is empty, no match here */
+ return(0);
+ }
+
+ d_event1 = (dm_data_event_t *)((char *)&tevp->te_msg + tevp->te_msg.ev_data.vd_offset);
+ tevph = sh->h_next;
+ while (tevph) {
+ /* find something with the same event type and handle type */
+ if ((tevph->te_msg.ev_type == tevp->te_msg.ev_type) &&
+ (tevph->te_tdp->td_type == tevp->te_tdp->td_type)) {
+
+ ino2 = ((jfs_fid_t*)&tevp->te_tdp->td_handle.ha_fid)->fid_ino;
+ d_event2 = (dm_data_event_t *)((char *)&tevph->te_msg + tevph->te_msg.ev_data.vd_offset);
+
+ /* If the two events are operating on the same file,
+ and the same part of that file, then we have a
+ match.
+ */
+ if ((ino1 == ino2) &&
+ (d_event2->de_offset == d_event1->de_offset) &&
+ (d_event2->de_length == d_event1->de_length)) {
+ /* found a match */
+#ifdef DM_SHASH_DEBUG
+ sh->dup_hits++;
+#endif
+ return(1);
+ }
+ }
+ tevph = tevph->te_hashnext;
+ }
+
+ /* No match found */
+ return(0);
+}
+#endif
+
+
+/* Return a pointer to a session given its session ID, or EINVAL if no session
+ has the session ID (per the DMAPI spec). The caller must have obtained
+ dm_session_lock before calling this routine.
+*/
+
+static int
+dm_find_session(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp)
+{
+ dm_session_t *s;
+
+ for (s = dm_sessions; s; s = s->sn_next) {
+ if (s->sn_sessid == sid) {
+ *sessionpp = s;
+ return(0);
+ }
+ }
+ return(-EINVAL);
+}
+
+
+/* Return a pointer to a locked session given its session ID. '*lcp' is
+ used to obtain the session's sn_qlock. Caller is responsible for eventually
+ unlocking it.
+*/
+
+int
+dm_find_session_and_lock(
+ dm_sessid_t sid,
+ dm_session_t **sessionpp,
+ unsigned long *lcp) /* addr of returned lock cookie */
+{
+ int error;
+
+ for (;;) {
+ *lcp = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(sid, sessionpp)) != 0) {
+ mutex_spinunlock(&dm_session_lock, *lcp);
+ return(error);
+ }
+ if (spin_trylock(&(*sessionpp)->sn_qlock)) {
+ nested_spinunlock(&dm_session_lock);
+ return(0); /* success */
+ }
+
+ /* If the second lock is not available, drop the first and
+ start over. This gives the CPU a chance to process any
+ interrupts, and also allows processes which want a sn_qlock
+ for a different session to proceed.
+ */
+
+ mutex_spinunlock(&dm_session_lock, *lcp);
+ }
+}
+
+
+/* Return a pointer to the event on the specified session's sn_delq which
+ contains the given token. The caller must have obtained the session's
+ sn_qlock before calling this routine.
+*/
+
+static int
+dm_find_msg(
+ dm_session_t *s,
+ dm_token_t token,
+ dm_tokevent_t **tevpp)
+{
+ dm_tokevent_t *tevp;
+
+ if (token <= DM_INVALID_TOKEN)
+ return(-EINVAL);
+
+ for (tevp = s->sn_delq.eq_head; tevp; tevp = tevp->te_next) {
+ if (tevp->te_msg.ev_token == token) {
+ *tevpp = tevp;
+ return(0);
+ }
+ }
+ return(-ESRCH);
+}
+
+
+/* Given a session ID and token, find the tevp on the specified session's
+ sn_delq which corresponds to that session ID/token pair. If a match is
+ found, lock the tevp's te_lock and return a pointer to the tevp.
+ '*lcp' is used to obtain the tevp's te_lock. The caller is responsible
+ for eventually unlocking it.
+*/
+
+int
+dm_find_msg_and_lock(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_tokevent_t **tevpp,
+ unsigned long *lcp) /* address of returned lock cookie */
+{
+ dm_session_t *s;
+ int error;
+
+ if ((error = dm_find_session_and_lock(sid, &s, lcp)) != 0)
+ return(error);
+
+ if ((error = dm_find_msg(s, token, tevpp)) != 0) {
+ mutex_spinunlock(&s->sn_qlock, *lcp);
+ return(error);
+ }
+ nested_spinlock(&(*tevpp)->te_lock);
+ nested_spinunlock(&s->sn_qlock);
+ return(0);
+}
+
+
+/* Create a new session, or resume an old session if one is given. */
+
+int
+dm_create_session(
+ dm_sessid_t old,
+ char *info,
+ dm_sessid_t *new)
+{
+ dm_session_t *s;
+ dm_sessid_t sid;
+ char sessinfo[DM_SESSION_INFO_LEN];
+ size_t len;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ len = strnlen_user(info, DM_SESSION_INFO_LEN); // XFS BUG #1
+ if (len == 0) // XFS BUG #2
+ return(-EFAULT); // XFS BUG #2
+ if (len > DM_SESSION_INFO_LEN) // XFS BUG #1
+ return(-E2BIG); // XFS BUG #1
+ if (copy_from_user(sessinfo, info, len))
+ return(-EFAULT);
+ lc = mutex_spinlock(&dm_session_lock);
+ sid = dm_next_sessid++;
+ mutex_spinunlock(&dm_session_lock, lc);
+ if (copy_to_user(new, &sid, sizeof(sid)))
+ return(-EFAULT);
+
+ if (old == DM_NO_SESSION) {
+ s = kmem_cache_alloc(dm_session_cachep, SLAB_KERNEL);
+ if (s == NULL) {
+ printk("%s/%d: kmem_cache_alloc(dm_session_cachep) returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+ memset(s, 0, sizeof(*s));
+
+ sv_init(&s->sn_readerq, SV_DEFAULT, "dmreadq");
+ sv_init(&s->sn_writerq, SV_DEFAULT, "dmwritq");
+ spinlock_init(&s->sn_qlock, "sn_qlock");
+ lc = mutex_spinlock(&dm_session_lock);
+ } else {
+ lc = mutex_spinlock(&dm_session_lock);
+ if ((error = dm_find_session(old, &s)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error);
+ }
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+ unlink_session(s);
+ }
+ memcpy(s->sn_info, sessinfo, len);
+ s->sn_info[len-1] = 0; /* if not NULL, then now 'tis */
+ s->sn_sessid = sid;
+ link_session(s);
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ struct proc_dir_entry *entry;
+
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ entry = create_proc_read_entry(buf, 0, 0, sessions_read_pfs, s);
+ /*entry->owner = THIS_MODULE;*/
+ }
+#endif
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(0);
+}
+
+
+int
+dm_destroy_session(
+ dm_sessid_t sid)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* The dm_session_lock must be held until the session is unlinked. */
+
+ lc = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(sid, &s)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error);
+ }
+ nested_spinlock(&s->sn_qlock);
+
+ /* The session exists. Check to see if it is still in use. If any
+ messages still exist on the sn_newq or sn_delq, or if any processes
+ are waiting for messages to arrive on the session, then the session
+ must not be destroyed.
+ */
+
+ if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
+ nested_spinunlock(&s->sn_qlock);
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(-EBUSY);
+ }
+
+#ifdef CONFIG_PROC_FS
+ {
+ char buf[100];
+ sprintf(buf, DMAPI_DBG_PROCFS "/sessions/0x%p", s);
+ remove_proc_entry(buf, NULL);
+ }
+#endif
+
+ /* The session is not in use. Dequeue it from the session chain. */
+
+ unlink_session(s);
+ nested_spinunlock(&s->sn_qlock);
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ /* Now clear the sessions's disposition registration, and then destroy
+ the session structure.
+ */
+
+ dm_clear_fsreg(s);
+
+ spinlock_destroy(&s->sn_qlock);
+ sv_destroy(&s->sn_readerq);
+ sv_destroy(&s->sn_writerq);
+#ifdef DM_USE_SHASH
+ if (s->sn_sesshash)
+ kmem_free(s->sn_sesshash, dm_hash_buckets * sizeof(dm_sesshash_t));
+#endif
+ kmem_cache_free(dm_session_cachep, s);
+ return(0);
+}
+
+
+/*
+ * Return a list of all active sessions.
+ */
+
+int
+dm_getall_sessions(
+ u_int nelem,
+ dm_sessid_t *sidp,
+ u_int *nelemp)
+{
+ dm_session_t *s;
+ u_int sesscnt;
+ dm_sessid_t *sesslist;
+ unsigned long lc; /* lock cookie */
+ int error;
+ int i;
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((sesscnt = dm_sessions_active) == 0) {
+ /*if (suword(nelemp, 0))*/
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ return(0);
+ }
+ sesslist = kmalloc(sesscnt * sizeof(*sidp), GFP_KERNEL);
+ if (sesslist == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ lc = mutex_spinlock(&dm_session_lock);
+ if (sesscnt == dm_sessions_active)
+ break;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+ kfree(sesslist);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ for (i = 0, s = dm_sessions; i < sesscnt; i++, s = s->sn_next)
+ sesslist[i] = s->sn_sessid;
+
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ /* Now copy the data to the user. */
+
+ if(put_user(sesscnt, nelemp)) {
+ error = -EFAULT;
+ } else if (sesscnt > nelem) {
+ error = -E2BIG;
+ } else if (copy_to_user(sidp, sesslist, sesscnt * sizeof(*sidp))) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(sesslist);
+ return(error);
+}
+
+
+/*
+ * Return the descriptive string associated with a session.
+ */
+
+int
+dm_query_session(
+ dm_sessid_t sid,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ int len; /* length of session info string */
+ int error;
+ char sessinfo[DM_SESSION_INFO_LEN];
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ len = strlen(s->sn_info) + 1; /* NULL terminated when created */
+ memcpy(sessinfo, s->sn_info, len);
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now that the mutex is released, copy the sessinfo to the user. */
+
+ if (put_user(len, rlenp)) {
+ error = -EFAULT;
+ } else if (len > buflen) {
+ error = -E2BIG;
+ } else if (copy_to_user(bufp, sessinfo, len)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ return(error);
+}
+
+
+/*
+ * Return all of the previously delivered tokens (that is, their IDs)
+ * for the given session.
+ */
+
+int
+dm_getall_tokens(
+ dm_sessid_t sid, /* session obtaining tokens from */
+ u_int nelem, /* size of tokenbufp */
+ dm_token_t *tokenbufp, /* buffer to copy token IDs to */
+ u_int *nelemp) /* return number copied to tokenbufp */
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* event message queue traversal */
+ unsigned long lc; /* lock cookie */
+ int tokcnt;
+ dm_token_t *toklist;
+ int error;
+ int i;
+
+ /* Loop until we can get the right amount of temp space, being careful
+ not to hold a mutex during the allocation. Usually only one trip.
+ */
+
+ for (;;) {
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+ tokcnt = s->sn_delq.eq_count;
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ if (tokcnt == 0) {
+ /*if (suword(nelemp, 0))*/
+ if (put_user(0, nelemp))
+ return(-EFAULT);
+ return(0);
+ }
+ toklist = kmalloc(tokcnt * sizeof(*tokenbufp), GFP_KERNEL);
+ if (toklist == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0) {
+ kfree(toklist);
+ return(error);
+ }
+
+ if (tokcnt == s->sn_delq.eq_count)
+ break;
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+ kfree(toklist);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ tevp = s->sn_delq.eq_head;
+ for (i = 0; i < tokcnt; i++, tevp = tevp->te_next)
+ toklist[i] = tevp->te_msg.ev_token;
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now copy the data to the user. */
+
+ if (put_user(tokcnt, nelemp)) {
+ error = -EFAULT;
+ } else if (tokcnt > nelem) {
+ error = -E2BIG;
+ } else if (copy_to_user(tokenbufp,toklist,tokcnt*sizeof(*tokenbufp))) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(toklist);
+ return(error);
+}
+
+
+/*
+ * Return the message identified by token.
+ */
+
+int
+dm_find_eventmsg(
+ dm_sessid_t sid,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_tokevent_t *tevp; /* message identified by token */
+ int msgsize; /* size of message to copy out */
+ void *msg;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Because some of the events (dm_data_event_t in particular) contain
+ u64 fields, we need to make sure that the buffer provided by the
+ caller is aligned such that he can read those fields successfully.
+ */
+
+ if (((__psint_t)bufp & (sizeof(u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Allocate the right amount of temp space, being careful not to hold
+ a mutex during the allocation.
+ */
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error == -ESRCH ? -EINVAL : error); // XFS BUG #35
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ msg = kmalloc(msgsize, GFP_KERNEL);
+ if (msg == NULL) {
+ printk("%s/%d: kmalloc returned NULL\n", __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0) {
+ kfree(msg);
+ return(error);
+ }
+
+ /* Make a temp copy of the data, then release the mutex. */
+
+ memcpy(msg, &tevp->te_msg, msgsize);
+ mutex_spinunlock(&tevp->te_lock, lc);
+
+ /* Now copy the data to the user. */
+
+ if (put_user(msgsize,rlenp)) {
+ error = -EFAULT;
+ } else if (msgsize > buflen) { /* user buffer not big enough */
+ error = -E2BIG;
+ } else if (copy_to_user( bufp, msg, msgsize )) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+ kfree(msg);
+ return(error);
+}
+
+
+int
+dm_move_event(
+ dm_sessid_t srcsid,
+ dm_token_t token,
+ dm_sessid_t targetsid,
+ dm_token_t *rtokenp)
+{
+ dm_session_t *s1;
+ dm_session_t *s2;
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+#ifdef DM_USE_SHASH
+ int hash_it = 0;
+#endif
+
+ lc = mutex_spinlock(&dm_session_lock);
+
+ if ((error = dm_find_session(srcsid, &s1)) != 0 ||
+ (error = dm_find_session(targetsid, &s2)) != 0 ||
+ (error = dm_find_msg(s1, token, &tevp)) != 0) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ return(error == -ESRCH ? -ENOENT : error); // XFS BUG #34
+ }
+ dm_unlink_event(tevp, &s1->sn_delq);
+#ifdef DM_USE_SHASH
+ if (tevp->te_flags & DM_TEF_HASHED) {
+ unhash_event(s1, tevp);
+ hash_it = 1;
+ }
+#endif
+ dm_link_event(tevp, &s2->sn_delq);
+#ifdef DM_USE_SHASH
+ if (hash_it)
+ hash_event(s2, tevp);
+#endif
+ mutex_spinunlock(&dm_session_lock, lc);
+
+ if (copy_to_user(rtokenp, &token, sizeof(token))) {
+ // XFS BUG #36 START
+ /* Need to put things back where they belong */
+ lc = mutex_spinlock(&dm_session_lock);
+ dm_unlink_event(tevp, &s2->sn_delq);
+#ifdef DM_USE_SHASH
+ if (hash_it)
+ unhash_event(s2, tevp);
+#endif
+ dm_link_event(tevp, &s1->sn_delq);
+#ifdef DM_USE_SHASH
+ if (hash_it)
+ hash_event(s1, tevp);
+#endif
+ mutex_spinunlock(&dm_session_lock, lc);
+ // XFS BUG #36 END
+ return(-EFAULT);
+ }
+ return(0);
+}
+
+
+/* ARGSUSED */
+int
+dm_pending(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_timestruct_t *delay) /* unused */
+{
+ dm_tokevent_t *tevp;
+ int error;
+ unsigned long lc; /* lock cookie */
+ dm_timestruct_t localdelay; // XFS BUG #38
+
+ if ((error = dm_find_msg_and_lock(sid, token, &tevp, &lc)) != 0)
+ return(error);
+
+ // XFS BUG #38 START
+ if (copy_from_user(&localdelay, delay, sizeof(dm_timestruct_t))) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return(-EFAULT);
+ }
+ // XFS BUG #38 END
+
+ tevp->te_flags |= DM_TEF_INTERMED;
+ if (tevp->te_evt_ref > 0) /* if event generation threads exist */
+ sv_broadcast(&tevp->te_evt_queue);
+
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return(0);
+}
+
+
+int
+dm_get_events(
+ dm_sessid_t sid,
+ u_int maxmsgs,
+ u_int flags,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp)
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* next event message on queue */
+ int error;
+ unsigned long lc1; /* first lock cookie */
+ unsigned long lc2 = 0; /* second lock cookie */
+ int totalsize;
+ int msgsize;
+ dm_eventmsg_t *prevmsg;
+ int prev_msgsize = 0;
+ u_int msgcnt;
+
+ /* Because some of the events (dm_data_event_t in particular) contain
+ u64 fields, we need to make sure that the buffer provided by the
+ caller is aligned such that he can read those fields successfully.
+ */
+
+ if (((__psint_t)bufp & (sizeof(u64) - 1)) != 0)
+ return(-EFAULT);
+
+ /* Find the indicated session and lock it. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
+ return(error);
+
+ /* Check for messages on sn_newq. If there aren't any that haven't
+ already been grabbed by another process, and if we are supposed to
+ to wait until one shows up, then go to sleep interruptibly on the
+ sn_readerq semaphore. The session can't disappear out from under
+ us as long as sn_readerq is non-zero.
+ */
+
+ for (;;) {
+ int rc;
+
+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ if (!(tevp->te_flags & DM_TEF_LOCKED))
+ break;
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ }
+ if (tevp)
+ break; /* got one! */
+
+ if (!(flags & DM_EV_WAIT)) {
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(-EAGAIN);
+ }
+ s->sn_readercnt++;
+
+ sv_wait_sig(&s->sn_readerq, 1, &s->sn_qlock, lc1);
+ rc = signal_pending(current);
+
+ lc1 = mutex_spinlock(&s->sn_qlock);
+ s->sn_readercnt--;
+ if (rc) { /* if signal was received */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ return(-EINTR);
+ }
+ }
+
+ /* At least one message is available for delivery, and we have both the
+ session lock and event lock. Mark the event so that it is not
+ grabbed by other daemons, then drop both locks prior copying the
+ data to the caller's buffer. Leaving the event on the queue in a
+ marked state prevents both the session and the event from
+ disappearing out from under us while we don't have the locks.
+ */
+
+ tevp->te_flags |= DM_TEF_LOCKED;
+ mutex_spinunlock(&tevp->te_lock, lc2); /* reverse cookie order */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+
+ /* Continue to deliver messages until there are no more, the
+ user's buffer becomes full, or we hit his maxmsgs limit.
+ */
+
+ totalsize = 0; /* total bytes transferred to the user */
+ prevmsg = NULL;
+ msgcnt = 0;
+
+ while (tevp) {
+ /* Compute the number of bytes to be moved, rounding up to an
+ 8-byte boundary so that any subsequent messages will also be
+ aligned.
+ */
+
+ msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_msg);
+ msgsize = (msgsize + sizeof(u64) - 1) & ~(sizeof(u64) - 1);
+ totalsize += msgsize;
+
+ /* If it fits, copy the message into the user's buffer and
+ update his 'rlenp'. Update the _link pointer for any
+ previous message.
+ */
+
+ if (totalsize > buflen) { /* no more room */
+ error = -E2BIG;
+ } else if (put_user(totalsize, rlenp)) {
+ error = -EFAULT;
+ } else if (copy_to_user(bufp, &tevp->te_msg, msgsize)) {
+ error = -EFAULT;
+ } else if (prevmsg && put_user(prev_msgsize, &prevmsg->_link)) {
+ error = -EFAULT;
+ } else {
+ error = 0;
+ }
+
+ /* If an error occurred, just unmark the event and leave it on
+ the queue for someone else. Note that other daemons may
+ have gone to sleep because this event was marked, so wake
+ them up. Also, if at least one message has already been
+ delivered, then an error here is not really an error.
+ */
+
+ lc1 = mutex_spinlock(&s->sn_qlock);
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ tevp->te_flags &= ~DM_TEF_LOCKED; /* drop the mark */
+
+ if (error) {
+ if (s->sn_readercnt)
+ sv_signal(&s->sn_readerq);
+
+ mutex_spinunlock(&tevp->te_lock, lc2); /* rev. order */
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ if (prevmsg)
+ return(0);
+ if (error == -E2BIG && put_user(totalsize,rlenp))
+ error = -EFAULT;
+ return(error);
+ }
+
+ /* The message was successfully delivered. Unqueue it. */
+
+ dm_unlink_event(tevp, &s->sn_newq);
+
+ /* Wake up the first of any processes waiting for room on the
+ sn_newq.
+ */
+
+ if (s->sn_writercnt)
+ sv_signal(&s->sn_writerq);
+
+ /* If the message is synchronous, add it to the sn_delq while
+ still holding the lock. If it is asynchronous, free it.
+ */
+
+ if (tevp->te_msg.ev_token != DM_INVALID_TOKEN) { /* synch */
+ dm_link_event(tevp, &s->sn_delq);
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ } else {
+ tevp->te_flags |= DM_TEF_FINAL;
+#ifdef DM_USE_SHASH
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+#endif
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ dm_put_tevp(tevp, NULL);/* can't cause destroy events */
+ }
+
+ /* Update our notion of where we are in the user's buffer. If
+ he doesn't want any more messages, then stop.
+ */
+
+ prevmsg = (dm_eventmsg_t *)bufp;
+ prev_msgsize = msgsize;
+ bufp = (char *)bufp + msgsize;
+
+ msgcnt++;
+ if (maxmsgs && msgcnt >= maxmsgs) {
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ break;
+ }
+
+ /* While still holding the sn_qlock, see if any additional
+ messages are available for delivery.
+ */
+
+ for (tevp = s->sn_newq.eq_head; tevp; tevp = tevp->te_next) {
+ lc2 = mutex_spinlock(&tevp->te_lock);
+ if (!(tevp->te_flags & DM_TEF_LOCKED)) {
+ tevp->te_flags |= DM_TEF_LOCKED;
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ break;
+ }
+ mutex_spinunlock(&tevp->te_lock, lc2);
+ }
+ mutex_spinunlock(&s->sn_qlock, lc1);
+ }
+ return(0);
+}
+
+
+/*
+ * Remove an event message from the delivered queue, set the returned
+ * error where the event generator wants it, and wake up the generator.
+ * Also currently have the user side release any locks it holds...
+ */
+
+/* ARGSUSED */
+int
+dm_respond_event(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_response_t response,
+ int reterror,
+ size_t buflen, /* unused */
+ void *respbufp) /* unused */
+{
+ dm_session_t *s; /* pointer to session given by sid */
+ dm_tokevent_t *tevp; /* event message queue traversal */
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Sanity check the input parameters. */
+
+ if (buflen > MAXEASIZE) // XFS BUG #37
+ return(-E2BIG); // XFS BUG #37
+
+ switch (response) {
+ case DM_RESP_CONTINUE: /* continue must have reterror == 0 */
+ if (reterror != 0)
+ return(-EINVAL);
+ break;
+ case DM_RESP_ABORT: /* abort must have errno set */
+ if (reterror <= 0)
+ return(-EINVAL);
+ break;
+ case DM_RESP_DONTCARE:
+ if (reterror > 0)
+ return(-EINVAL);
+ reterror = -1; /* to distinguish DM_RESP_DONTCARE */
+ break;
+ default:
+ return(-EINVAL);
+ }
+
+ /* Hold session lock until the event is unqueued. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ if ((error = dm_find_msg(s, token, &tevp)) != 0) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return(error);
+ }
+ nested_spinlock(&tevp->te_lock);
+
+ if (reterror == -1 && tevp->te_msg.ev_type != DM_EVENT_MOUNT) {
+ error = -EINVAL;
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ } else {
+ dm_unlink_event(tevp, &s->sn_delq);
+#ifdef DM_USE_SHASH
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+#endif
+ tevp->te_reply = reterror;
+ tevp->te_flags |= DM_TEF_FINAL;
+ if (tevp->te_evt_ref)
+ sv_broadcast(&tevp->te_evt_queue);
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+ error = 0;
+
+ /* Absolutely no locks can be held when calling dm_put_tevp! */
+
+ dm_put_tevp(tevp, NULL); /* this can generate destroy events */
+ }
+ return(error);
+}
+
+
+/* Queue the filled in event message pointed to by tevp on the session s, and
+ (if a synchronous event) wait for the reply from the DMAPI application.
+ The caller MUST be holding the session lock before calling this routine!
+ The session lock is always released upon exit.
+ Returns:
+ -1 == don't care
+ 0 == success (or async event)
+ > 0 == errno describing reason for failure
+*/
+
+static int
+dm_enqueue(
+ dm_session_t *s,
+ unsigned long lc, /* input lock cookie */
+ dm_tokevent_t *tevp, /* in/out parameter */
+ int sync,
+ int flags,
+ int interruptable)
+{
+ int is_unmount = 0;
+#ifdef DM_USE_SHASH
+ int is_hashable = 0;
+#endif
+ int reply;
+
+#ifdef DM_USE_SHASH
+ /* If the caller isn't planning to stick around for the result
+ and this request is identical to one that is already on the
+ queues then just give the caller an EAGAIN. Release the
+ session lock before returning.
+
+ We look only at NDELAY requests with an event type of READ,
+ WRITE, or TRUNCATE on objects that are regular files.
+ */
+
+ if ((flags & DM_FLAGS_NDELAY) && DM_EVENT_RDWRTRUNC(tevp) &&
+ (tevp->te_tdp->td_type == DM_TDT_REG)) {
+ if (repeated_event(s, tevp)) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return(-EAGAIN);
+ }
+ is_hashable = 1;
+ }
+#endif
+
+ if (tevp->te_msg.ev_type == DM_EVENT_UNMOUNT)
+ is_unmount = 1;
+
+ /* Check for room on sn_newq. If there is no room for new messages,
+ then go to sleep on the sn_writerq semaphore. The
+ session cannot disappear out from under us as long as sn_writercnt
+ is non-zero.
+ */
+
+ while (s->sn_newq.eq_count >= dm_max_queued_msgs) { /* no room */
+ s->sn_writercnt++;
+ dm_link_event(tevp, &s->sn_evt_writerq);
+ if (interruptable) {
+ sv_wait_sig(&s->sn_writerq, 1, &s->sn_qlock, lc);
+ if (signal_pending(current)) {
+ s->sn_writercnt--;
+ return(-EINTR);
+ }
+ } else {
+ sv_wait(&s->sn_writerq, 1, &s->sn_qlock, lc);
+ }
+ lc = mutex_spinlock(&s->sn_qlock);
+ s->sn_writercnt--;
+ dm_unlink_event(tevp, &s->sn_evt_writerq);
+ }
+
+ /* Assign a sequence number and token to the event and bump the
+ application reference count by one. We don't need 'te_lock' here
+ because this thread is still the only thread that can see the event.
+ */
+
+ nested_spinlock(&dm_token_lock);
+ tevp->te_msg.ev_sequence = dm_next_sequence++;
+ if (sync) {
+ tevp->te_msg.ev_token = dm_next_token++;
+ } else {
+ tevp->te_msg.ev_token = DM_INVALID_TOKEN;
+ }
+ nested_spinunlock(&dm_token_lock);
+
+ tevp->te_app_ref++;
+
+ /* Room exists on the sn_newq queue, so add this request. If the
+ queue was previously empty, wake up the first of any processes
+ that are waiting for an event.
+ */
+
+ dm_link_event(tevp, &s->sn_newq);
+#ifdef DM_USE_SHASH
+ if (is_hashable)
+ hash_event(s, tevp);
+#endif
+
+ if (s->sn_readercnt)
+ sv_signal(&s->sn_readerq);
+
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ /* Now that the message is queued, processes issuing asynchronous
+ events or DM_EVENT_UNMOUNT events are ready to continue.
+ */
+
+ if (!sync || is_unmount)
+ return(0);
+
+ /* Synchronous requests wait until a final reply is received. If the
+ caller supplied the DM_FLAGS_NDELAY flag, the process will return
+ EAGAIN if dm_pending() sets DM_TEF_INTERMED. We also let users
+ Cntl-C out of a read, write, and truncate requests.
+ */
+
+ lc = mutex_spinlock(&tevp->te_lock);
+
+ while (!(tevp->te_flags & DM_TEF_FINAL)) {
+ if ((tevp->te_flags & DM_TEF_INTERMED) &&
+ (flags & DM_FLAGS_NDELAY)) {
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return(-EAGAIN);
+ }
+ if (tevp->te_msg.ev_type == DM_EVENT_READ ||
+ tevp->te_msg.ev_type == DM_EVENT_WRITE ||
+ tevp->te_msg.ev_type == DM_EVENT_TRUNCATE) {
+ sv_wait_sig(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ if (signal_pending(current)){
+ return(-EINTR);
+ }
+ } else {
+ sv_wait(&tevp->te_evt_queue, 1, &tevp->te_lock, lc);
+ }
+ lc = mutex_spinlock(&tevp->te_lock);
+ }
+
+ /* Return both the tevp and the reply which was stored in the tevp by
+ dm_respond_event. The tevp structure has already been removed from
+ the reply queue by this point in dm_respond_event().
+ */
+
+ reply = tevp->te_reply;
+ mutex_spinunlock(&tevp->te_lock, lc);
+ return(reply);
+}
+
+
+/* The filesystem is guaranteed to stay mounted while this event is
+ outstanding.
+*/
+
+int
+dm_enqueue_normal_event(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp,
+ int flags)
+{
+ dm_session_t *s;
+ int error;
+ int sync;
+ unsigned long lc; /* lock cookie */
+
+ switch (tevp->te_msg.ev_type) {
+ case DM_EVENT_READ:
+ case DM_EVENT_WRITE:
+ case DM_EVENT_TRUNCATE:
+ case DM_EVENT_PREUNMOUNT:
+ case DM_EVENT_UNMOUNT:
+ case DM_EVENT_NOSPACE:
+ case DM_EVENT_CREATE:
+ case DM_EVENT_REMOVE:
+ case DM_EVENT_RENAME:
+ case DM_EVENT_SYMLINK:
+ case DM_EVENT_LINK:
+ case DM_EVENT_DEBUT: /* not currently supported */
+ sync = 1;
+ break;
+
+ case DM_EVENT_DESTROY:
+ case DM_EVENT_POSTCREATE:
+ case DM_EVENT_POSTREMOVE:
+ case DM_EVENT_POSTRENAME:
+ case DM_EVENT_POSTSYMLINK:
+ case DM_EVENT_POSTLINK:
+ case DM_EVENT_ATTRIBUTE:
+ case DM_EVENT_CANCEL: /* not currently supported */
+ case DM_EVENT_CLOSE:
+ sync = 0;
+ break;
+
+ default:
+ return(-EIO); /* garbage event number */
+ }
+
+ /* Wait until a session selects disposition for the event. The session
+ is locked upon return from dm_waitfor_disp_session().
+ */
+
+ if ((error = dm_waitfor_disp_session(sbp, tevp, &s, &lc)) != 0)
+ return(error);
+
+ return(dm_enqueue(s, lc, tevp, sync, flags, 0));
+}
+
+
+/* Traverse the session list checking for sessions with the WANTMOUNT flag
+ set. When one is found, send it the message. Possible responses to the
+ message are one of DONTCARE, CONTINUE, or ABORT. The action taken in each
+ case is:
+ DONTCARE (-1) - Send the event to the next session with WANTMOUNT set
+ CONTINUE ( 0) - Proceed with the mount, errno zero.
+ ABORT (>0) - Fail the mount, return the returned errno.
+
+ The mount request is sent to sessions in ascending session ID order.
+ Since the session list can change dramatically while this process is
+ sleeping in dm_enqueue(), this routine must use session IDs rather than
+ session pointers when keeping track of where it is in the list. Since
+ new sessions are always added at the end of the queue, and have increasing
+ session ID values, we don't have to worry about missing any session.
+*/
+
+int
+dm_enqueue_mount_event(
+ struct super_block *sbp,
+ dm_tokevent_t *tevp)
+{
+ dm_session_t *s;
+ dm_sessid_t sid;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Make the mounting filesystem visible to other DMAPI calls. */
+
+ if ((error = dm_add_fsys_entry(sbp, tevp)) != 0){
+ return(error);
+ }
+
+ /* Walk through the session list presenting the mount event to each
+ session that is interested until a session accepts or rejects it,
+ or until all sessions ignore it.
+ */
+
+ for (sid = DM_NO_SESSION, error = -1; error < 0; sid = s->sn_sessid) {
+
+ lc = mutex_spinlock(&dm_session_lock);
+ for (s = dm_sessions; s; s = s->sn_next) {
+ if (s->sn_sessid > sid && s->sn_flags & DM_SN_WANTMOUNT) {
+ nested_spinlock(&s->sn_qlock);
+ nested_spinunlock(&dm_session_lock);
+ break;
+ }
+ }
+ if (s == NULL) {
+ mutex_spinunlock(&dm_session_lock, lc);
+ break; /* noone wants it; proceed with mount */
+ }
+ error = dm_enqueue(s, lc, tevp, 1, 0, 0);
+ }
+
+ /* If the mount will be allowed to complete, then update the fsrp entry
+ accordingly. If the mount is to be aborted, remove the fsrp entry.
+ */
+
+ if (error <= 0) {
+ dm_change_fsys_entry(sbp, DM_STATE_MOUNTED);
+ error = 0;
+ } else {
+ dm_remove_fsys_entry(sbp);
+ }
+ return(error);
+}
+
+int
+dm_enqueue_sendmsg_event(
+ dm_sessid_t targetsid,
+ dm_tokevent_t *tevp,
+ int sync)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ if ((error = dm_find_session_and_lock(targetsid, &s, &lc)) != 0)
+ return(error);
+
+ return(dm_enqueue(s, lc, tevp, sync, 0, 1));
+}
+
+
+dm_token_t
+dm_enqueue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t *tokenp)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Atomically find and lock the session whose session id is 'sid'. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ /* Assign a sequence number and token to the event, bump the
+ application reference count by one, and decrement the event
+ count because the caller gives up all ownership of the event.
+ We don't need 'te_lock' here because this thread is still the
+ only thread that can see the event.
+ */
+
+ nested_spinlock(&dm_token_lock);
+ tevp->te_msg.ev_sequence = dm_next_sequence++;
+ *tokenp = tevp->te_msg.ev_token = dm_next_token++;
+ nested_spinunlock(&dm_token_lock);
+
+ tevp->te_flags &= ~(DM_TEF_INTERMED|DM_TEF_FINAL);
+ tevp->te_app_ref++;
+ tevp->te_evt_ref--;
+
+ /* Add the request to the tail of the sn_delq. Now it's visible. */
+
+ dm_link_event(tevp, &s->sn_delq);
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ return(0);
+}
+
+
+// XFS BUG #11 BEGIN
+int
+dm_dequeue_user_event(
+ dm_sessid_t sid,
+ dm_tokevent_t *tevp,
+ dm_token_t token)
+{
+ dm_session_t *s;
+ int error;
+ unsigned long lc; /* lock cookie */
+
+ /* Atomically find and lock the session whose session id is 'sid'. */
+
+ if ((error = dm_find_session_and_lock(sid, &s, &lc)) != 0)
+ return(error);
+
+ if ((error = dm_find_msg(s, token, &tevp)) != 0) {
+ mutex_spinunlock(&s->sn_qlock, lc);
+ return(error);
+ }
+ nested_spinlock(&tevp->te_lock);
+
+ dm_unlink_event(tevp, &s->sn_delq);
+#ifdef DM_USE_SHASH
+ if (tevp->te_flags & DM_TEF_HASHED)
+ unhash_event(s, tevp);
+#endif
+
+ tevp->te_flags |= DM_TEF_FINAL;
+ tevp->te_app_ref--;
+ tevp->te_evt_ref++;
+ if (tevp->te_evt_ref)
+ sv_broadcast(&tevp->te_evt_queue);
+ nested_spinunlock(&tevp->te_lock);
+ mutex_spinunlock(&s->sn_qlock, lc);
+
+ return(0);
+}
+// XFS BUG #11 END
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_sysent.c linux-jfs-dmapi/fs/jfs/dmapi/dmapi_sysent.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/dmapi_sysent.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/dmapi_sysent.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,768 @@
+/*
+ * Copyright (c) 2000 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/* Data Migration API (DMAPI)
+ */
+
+
+/* We're using MISC_MAJOR / MISC_DYNAMIC_MINOR. */
+
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/major.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/major.h>
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+
+#include "dmapi_private.h"
+
+kmem_cache_t *dm_fsreg_cachep = NULL;
+kmem_cache_t *dm_tokdata_cachep = NULL;
+kmem_cache_t *dm_session_cachep = NULL;
+
+extern void __init jfs_dm_init(void);
+extern void __exit jfs_dm_exit(void);
+
+static int
+dmapi_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ sys_dmapi_args_t kargs;
+ sys_dmapi_args_t *uap = &kargs;
+ int error = 0;
+ int rvp = -ENOSYS;
+ int use_rvp = 0;
+
+ if (!capable(CAP_MKNOD))
+ return(-EPERM);
+
+ if( copy_from_user( &kargs, (sys_dmapi_args_t*)arg,
+ sizeof(sys_dmapi_args_t) ) )
+ return -EFAULT;
+
+ switch (_IOC_NR(cmd)) {
+ case DM_CLEAR_INHERIT:
+ error = dm_clear_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t *) DM_Parg(uap,5));/* attrnamep */
+ break;
+ case DM_CREATE_BY_HANDLE:
+ error = dm_create_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char *) DM_Parg(uap,7));/* cname */
+ break;
+ case DM_CREATE_SESSION:
+ error = dm_create_session(
+ (dm_sessid_t) DM_Uarg(uap,1), /* oldsid */
+ (char *) DM_Parg(uap,2), /* sessinfop */
+ (dm_sessid_t *) DM_Parg(uap,3));/* newsidp */
+ break;
+ case DM_CREATE_USEREVENT:
+ error = dm_create_userevent(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* msglen */
+ (void *) DM_Parg(uap,3), /* msgdatap */
+ (dm_token_t *) DM_Parg(uap,4));/* tokenp */
+ break;
+ case DM_DESTROY_SESSION:
+ error = dm_destroy_session(
+ (dm_sessid_t) DM_Uarg(uap,1));/* sid */
+ break;
+ case DM_DOWNGRADE_RIGHT:
+ error = dm_downgrade_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_FD_TO_HANDLE:
+ error = dm_fd_to_hdl(
+ (int) DM_Uarg(uap,1), /* fd */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_FIND_EVENTMSG:
+ error = dm_find_eventmsg(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (size_t) DM_Uarg(uap,3), /* buflen */
+ (void *) DM_Parg(uap,4), /* bufp */
+ (size_t *) DM_Parg(uap,5));/* rlenp */
+ break;
+ case DM_GET_ALLOCINFO:
+ use_rvp = 1;
+ error = dm_get_allocinfo_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t *) DM_Parg(uap,5), /* offp */
+ (u_int) DM_Uarg(uap,6), /* nelem */
+ (dm_extent_t *) DM_Parg(uap,7), /* extentp */
+ (u_int *) DM_Parg(uap,8), /* nelemp */
+ &rvp);
+ break;
+ case DM_GET_BULKALL:
+ use_rvp = 1;
+ error = dm_get_bulkall_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrname_t *) DM_Parg(uap,6),/* attrnamep */
+ (dm_attrloc_t *) DM_Parg(uap,7),/* locp */
+ (size_t) DM_Uarg(uap,8), /* buflen */
+ (void *) DM_Parg(uap,9), /* bufp */
+ (size_t *) DM_Parg(uap,10),/* rlenp */
+ &rvp);
+ break;
+ case DM_GET_BULKATTR:
+ use_rvp = 1;
+ error = dm_get_bulkattr_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrloc_t *)DM_Parg(uap,6), /* locp */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void *) DM_Parg(uap,8), /* bufp */
+ (size_t *) DM_Parg(uap,9), /* rlenp */
+ &rvp);
+ break;
+ case DM_GET_CONFIG:
+ error = dm_get_config(
+ (void *) DM_Parg(uap,1), /* hanp */
+ (size_t) DM_Uarg(uap,2), /* hlen */
+ (dm_config_t) DM_Uarg(uap,3), /* flagname */
+ (dm_size_t *) DM_Parg(uap,4));/* retvalp */
+ break;
+ case DM_GET_CONFIG_EVENTS:
+ error = dm_get_config_events(
+ (void *) DM_Parg(uap,1), /* hanp */
+ (size_t) DM_Uarg(uap,2), /* hlen */
+ (u_int) DM_Uarg(uap,3), /* nelem */
+ (dm_eventset_t *) DM_Parg(uap,4),/* eventsetp */
+ (u_int *) DM_Parg(uap,5));/* nelemp */
+ break;
+ case DM_GET_DIRATTRS:
+ use_rvp = 1;
+ error = dm_get_dirattrs_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_attrloc_t *)DM_Parg(uap,6), /* locp */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void *) DM_Parg(uap,8), /* bufp */
+ (size_t *) DM_Parg(uap,9), /* rlenp */
+ &rvp);
+ break;
+ case DM_GET_DMATTR:
+ error = dm_get_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t *) DM_Parg(uap,5),/* attrnamep */
+ (size_t) DM_Uarg(uap,6), /* buflen */
+ (void *) DM_Parg(uap,7), /* bufp */
+ (size_t *) DM_Parg(uap,8));/* rlenp */
+
+ break;
+ case DM_GET_EVENTLIST:
+ error = dm_get_eventlist(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_eventset_t *) DM_Parg(uap,6),/* eventsetp */
+ (u_int *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GET_EVENTS:
+ error = dm_get_events(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (u_int) DM_Uarg(uap,2), /* maxmsgs */
+ (u_int) DM_Uarg(uap,3), /* flags */
+ (size_t) DM_Uarg(uap,4), /* buflen */
+ (void *) DM_Parg(uap,5), /* bufp */
+ (size_t *) DM_Parg(uap,6));/* rlenp */
+ break;
+ case DM_GET_FILEATTR:
+ error = dm_get_fileattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_stat_t *) DM_Parg(uap,6));/* statp */
+ break;
+ case DM_GET_MOUNTINFO:
+ error = dm_get_mountinfo(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void *) DM_Parg(uap,6), /* bufp */
+ (size_t *) DM_Parg(uap,7));/* rlenp */
+ break;
+ case DM_GET_REGION:
+ error = dm_get_region(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_region_t *) DM_Parg(uap,6), /* regbufp */
+ (u_int *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GETALL_DISP:
+ error = dm_getall_disp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* buflen */
+ (void *) DM_Parg(uap,3), /* bufp */
+ (size_t *) DM_Parg(uap,4));/* rlenp */
+ break;
+ case DM_GETALL_DMATTR:
+ error = dm_getall_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void *) DM_Parg(uap,6), /* bufp */
+ (size_t *) DM_Parg(uap,7));/* rlenp */
+ break;
+ case DM_GETALL_INHERIT:
+ error = dm_getall_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_inherit_t *)DM_Parg(uap,6), /* inheritbufp*/
+ (u_int *) DM_Parg(uap,7));/* nelemp */
+ break;
+ case DM_GETALL_SESSIONS:
+ error = dm_getall_sessions(
+ (u_int) DM_Uarg(uap,1), /* nelem */
+ (dm_sessid_t *) DM_Parg(uap,2), /* sidbufp */
+ (u_int *) DM_Parg(uap,3));/* nelemp */
+ break;
+ case DM_GETALL_TOKENS:
+ error = dm_getall_tokens(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (u_int) DM_Uarg(uap,2), /* nelem */
+ (dm_token_t *) DM_Parg(uap,3), /* tokenbufp */
+ (u_int *) DM_Parg(uap,4));/* nelemp */
+ break;
+ case DM_INIT_ATTRLOC:
+ error = dm_init_attrloc(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrloc_t *) DM_Parg(uap,5));/* locp */
+ break;
+ case DM_MKDIR_BY_HANDLE:
+ error = dm_mkdir_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char *) DM_Parg(uap,7));/* cname */
+ break;
+ case DM_MOVE_EVENT:
+ error = dm_move_event(
+ (dm_sessid_t) DM_Uarg(uap,1), /* srcsid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_sessid_t) DM_Uarg(uap,3), /* targetsid */
+ (dm_token_t *) DM_Parg(uap,4));/* rtokenp */
+ break;
+ case DM_OBJ_REF_HOLD:
+ error = dm_obj_ref_hold(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4));/* hlen */
+ break;
+ case DM_OBJ_REF_QUERY:
+ use_rvp = 1;
+ error = dm_obj_ref_query_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4), /* hlen */
+ &rvp);
+ break;
+ case DM_OBJ_REF_RELE:
+ error = dm_obj_ref_rele(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (void *) DM_Parg(uap,3), /* hanp */
+ (size_t) DM_Uarg(uap,4));/* hlen */
+ break;
+ case DM_PATH_TO_FSHANDLE:
+ error = dm_path_to_fshdl(
+ (char *) DM_Parg(uap,1), /* path */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_PATH_TO_HANDLE:
+ error = dm_path_to_hdl(
+ (char *) DM_Parg(uap,1), /* path */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t *) DM_Parg(uap,3));/* hlenp */
+ break;
+ case DM_PENDING:
+ error = dm_pending(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_timestruct_t *) DM_Parg(uap,3));/* delay */
+ break;
+ case DM_PROBE_HOLE:
+ error = dm_probe_hole(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6), /* len */
+ (dm_off_t *) DM_Parg(uap,7), /* roffp */
+ (dm_size_t *) DM_Parg(uap,8));/* rlenp */
+ break;
+ case DM_PUNCH_HOLE:
+ error = dm_punch_hole(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6));/* len */
+ break;
+ case DM_QUERY_RIGHT:
+ error = dm_query_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_right_t *) DM_Parg(uap,5));/* rightp */
+ break;
+ case DM_QUERY_SESSION:
+ error = dm_query_session(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (size_t) DM_Uarg(uap,2), /* buflen */
+ (void *) DM_Parg(uap,3), /* bufp */
+ (size_t *) DM_Parg(uap,4));/* rlenp */
+ break;
+ case DM_READ_INVIS:
+ use_rvp = 1;
+ error = dm_read_invis_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_off_t) DM_Uarg(uap,5), /* off */
+ (dm_size_t) DM_Uarg(uap,6), /* len */
+ (void *) DM_Parg(uap,7), /* bufp */
+ &rvp);
+ break;
+ case DM_RELEASE_RIGHT:
+ error = dm_release_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_REMOVE_DMATTR:
+ error = dm_remove_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (int) DM_Uarg(uap,5), /* setdtime */
+ (dm_attrname_t *) DM_Parg(uap,6));/* attrnamep */
+ break;
+ case DM_REQUEST_RIGHT:
+ error = dm_request_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* flags */
+ (dm_right_t) DM_Uarg(uap,6));/* right */
+ break;
+ case DM_RESPOND_EVENT:
+ error = dm_respond_event(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (dm_token_t) DM_Uarg(uap,2), /* token */
+ (dm_response_t) DM_Uarg(uap,3), /* response */
+ (int) DM_Uarg(uap,4), /* reterror */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (void *) DM_Parg(uap,6));/* respbufp */
+ break;
+ case DM_SEND_MSG:
+ error = dm_send_msg(
+ (dm_sessid_t) DM_Uarg(uap,1), /* targetsid */
+ (dm_msgtype_t) DM_Uarg(uap,2), /* msgtype */
+ (size_t) DM_Uarg(uap,3), /* buflen */
+ (void *) DM_Parg(uap,4));/* bufp */
+ break;
+ case DM_SET_DISP:
+ error = dm_set_disp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_eventset_t *) DM_Parg(uap,5),/* eventsetp */
+ (u_int) DM_Uarg(uap,6));/* maxevent */
+ break;
+ case DM_SET_DMATTR:
+ error = dm_set_dmattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t *) DM_Parg(uap,5),/* attrnamep */
+ (int) DM_Uarg(uap,6), /* setdtime */
+ (size_t) DM_Uarg(uap,7), /* buflen */
+ (void *) DM_Parg(uap,8));/* bufp */
+ break;
+ case DM_SET_EVENTLIST:
+ error = dm_set_eventlist(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_eventset_t *) DM_Parg(uap,5),/* eventsetp */
+ (u_int) DM_Uarg(uap,6));/* maxevent */
+ break;
+ case DM_SET_FILEATTR:
+ error = dm_set_fileattr(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* mask */
+ (dm_fileattr_t *)DM_Parg(uap,6));/* attrp */
+ break;
+ case DM_SET_INHERIT:
+ error = dm_set_inherit(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t *)DM_Parg(uap,5),/* attrnamep */
+ (mode_t) DM_Uarg(uap,6));/* mode */
+ break;
+ case DM_SET_REGION:
+ error = dm_set_region(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (u_int) DM_Uarg(uap,5), /* nelem */
+ (dm_region_t *) DM_Parg(uap,6), /* regbufp */
+ (dm_boolean_t *) DM_Parg(uap,7));/* exactflagp */
+ break;
+ case DM_SET_RETURN_ON_DESTROY:
+ error = dm_set_return_on_destroy(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (dm_attrname_t *) DM_Parg(uap,5),/* attrnamep */
+ (dm_boolean_t) DM_Uarg(uap,6));/* enable */
+ break;
+ case DM_SYMLINK_BY_HANDLE:
+ error = dm_symlink_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* dirhanp */
+ (size_t) DM_Uarg(uap,3), /* dirhlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (void *) DM_Parg(uap,5), /* hanp */
+ (size_t) DM_Uarg(uap,6), /* hlen */
+ (char *) DM_Parg(uap,7), /* cname */
+ (char *) DM_Parg(uap,8));/* path */
+ break;
+ case DM_SYNC_BY_HANDLE:
+ error = dm_sync_by_handle(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_UPGRADE_RIGHT:
+ error = dm_upgrade_right(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4));/* token */
+ break;
+ case DM_WRITE_INVIS:
+ use_rvp = 1;
+ error = dm_write_invis_rvp(
+ (dm_sessid_t) DM_Uarg(uap,1), /* sid */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (dm_token_t) DM_Uarg(uap,4), /* token */
+ (int) DM_Uarg(uap,5), /* flags */
+ (dm_off_t) DM_Uarg(uap,6), /* off */
+ (dm_size_t) DM_Uarg(uap,7), /* len */
+ (void *) DM_Parg(uap,8), /* bufp */
+ &rvp);
+ break;
+ case DM_OPEN_BY_HANDLE:
+ use_rvp = 1;
+ error = dm_open_by_handle_rvp(
+ (unsigned int) DM_Uarg(uap,1), /* fd */
+ (void *) DM_Parg(uap,2), /* hanp */
+ (size_t) DM_Uarg(uap,3), /* hlen */
+ (int) DM_Uarg(uap,4), /* flags */
+ &rvp);
+ break;
+// XFS BUG #12 BEGIN
+ case DM_HANDLE_TO_PATH:
+ error = dm_hdl_to_path(
+ (void *) DM_Parg(uap,1), /* dirhanp */
+ (size_t) DM_Uarg(uap,2), /* dirhlen */
+ (void *) DM_Parg(uap,3), /* targhanp */
+ (size_t) DM_Uarg(uap,4), /* targhlen */
+ (size_t) DM_Uarg(uap,5), /* buflen */
+ (char *) DM_Parg(uap,6), /* pathbufp */
+ (size_t *) DM_Parg(uap,7));/* rlenp */
+ break;
+// XFS BUG #12 END
+ default:
+ error = -ENOSYS;
+ break;
+ }
+ /* If it was an *_rvp() function, then
+ if error==0, return |rvp|
+ */
+ if( use_rvp && (error == 0) )
+ return rvp;
+ else
+ return error;
+}
+
+
+
+static int
+dmapi_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+static int
+dmapi_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+/* say hello, and let me know the device is hooked up */
+static ssize_t
+dmapi_dump(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ char tmp[50];
+ int len;
+ if( *ppos == 0 ){
+ len = sprintf( tmp, "# " DM_VER_STR_CONTENTS "\n" );
+ if( copy_to_user(buf, tmp, len) )
+ return -EFAULT;
+ *ppos += 1;
+ return len;
+ }
+ return 0;
+}
+
+static struct file_operations dmapi_fops = {
+ open: dmapi_open,
+ ioctl: dmapi_ioctl,
+ read: dmapi_dump,
+ release: dmapi_release
+};
+
+static struct miscdevice dmapi_dev = {
+ minor: MISC_DYNAMIC_MINOR,
+ name: "jfs_dmapi",
+ fops: &dmapi_fops
+};
+
+
+
+#ifdef CONFIG_PROC_FS
+static int
+dmapi_summary(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len;
+
+ extern u_int dm_sessions_active;
+ extern dm_sessid_t dm_next_sessid;
+ extern dm_token_t dm_next_token;
+ extern dm_sequence_t dm_next_sequence;
+ extern int dm_fsys_cnt;
+
+#define CHKFULL if(len >= count) break;
+#define ADDBUF(a,b) len += sprintf(buffer + len, a, b); CHKFULL;
+
+ len=0;
+ while(1){
+ ADDBUF("dm_sessions_active=%u\n", dm_sessions_active);
+ ADDBUF("dm_next_sessid=%d\n", (int)dm_next_sessid);
+ ADDBUF("dm_next_token=%d\n", (int)dm_next_token);
+ ADDBUF("dm_next_sequence=%u\n", (u_int)dm_next_sequence);
+ ADDBUF("dm_fsys_cnt=%d\n", dm_fsys_cnt);
+
+ break;
+ }
+
+ if (offset >= len) {
+ *start = buffer;
+ *eof = 1;
+ return 0;
+ }
+ *start = buffer + offset;
+ if ((len -= offset) > count)
+ return count;
+ *eof = 1;
+
+ return len;
+}
+#endif
+
+
+static void __init
+dmapi_init_procfs(int dmapi_minor)
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *entry;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS, 0)) == NULL )
+ return;
+ entry->owner = THIS_MODULE;
+ entry->mode = S_IFDIR | S_IRUSR | S_IXUSR;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/fsreg", 0)) == NULL )
+ return;
+ entry->owner = THIS_MODULE;
+
+ if ((entry = proc_mkdir( DMAPI_DBG_PROCFS "/sessions", 0)) == NULL )
+ return;
+ entry->owner = THIS_MODULE;
+
+ entry = create_proc_read_entry( DMAPI_DBG_PROCFS "/summary", 0, 0, dmapi_summary, NULL);
+ entry->owner = THIS_MODULE;
+
+ entry = create_proc_entry( DMAPI_PROCFS, S_IFCHR | S_IRUSR | S_IWUSR, NULL);
+ if( entry == NULL || entry->proc_fops != NULL)
+ return;
+ entry->proc_fops = &dmapi_fops;
+ entry->owner = THIS_MODULE;
+#endif
+}
+
+static void __exit
+dmapi_cleanup_procfs(void)
+{
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry( DMAPI_PROCFS, NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS "/summary", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS "/fsreg", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS "/sessions", NULL);
+ remove_proc_entry( DMAPI_DBG_PROCFS, NULL);
+#endif
+}
+
+
+int __init dmapi_init(void)
+{
+ int ret;
+
+ dm_tokdata_cachep = kmem_cache_create("jfs_dm_tokdata",
+ sizeof(struct dm_tokdata), 0, 0, NULL, NULL);
+ if (dm_tokdata_cachep == NULL)
+ return -ENOMEM;
+
+ dm_fsreg_cachep = kmem_cache_create("jfs_dm_fsreg",
+ sizeof(struct dm_fsreg), 0, 0, NULL, NULL);
+ if (dm_fsreg_cachep == NULL) {
+ kmem_cache_destroy(dm_tokdata_cachep);
+ return -ENOMEM;
+ }
+
+ dm_session_cachep = kmem_cache_create("jfs_dm_session",
+ sizeof(struct dm_session), 0, 0, NULL, NULL);
+ if (dm_session_cachep == NULL) {
+ kmem_cache_destroy(dm_tokdata_cachep);
+ kmem_cache_destroy(dm_fsreg_cachep);
+ return -ENOMEM;
+ }
+
+ ret = misc_register(&dmapi_dev);
+ if( ret != 0 )
+ printk(KERN_ERR "dmapi_init: misc_register returned %d\n", ret);
+ dmapi_init_procfs(dmapi_dev.minor);
+ jfs_dm_init();
+ return(0);
+}
+
+void __exit dmapi_uninit(void)
+{
+ jfs_dm_exit();
+ misc_deregister(&dmapi_dev);
+ dmapi_cleanup_procfs();
+ kmem_cache_destroy(dm_tokdata_cachep);
+ kmem_cache_destroy(dm_fsreg_cachep);
+ kmem_cache_destroy(dm_session_cachep);
+ dm_fsys_vector_free();
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/jfsdmapi.h linux-jfs-dmapi/fs/jfs/dmapi/jfsdmapi.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/jfsdmapi.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/jfsdmapi.h 2004-05-28 13:43:14.000000000 -0500
@@ -0,0 +1,1022 @@
+/*
+ * Copyright (c) 1995-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __JFSDMAPI_H__
+#define __JFSDMAPI_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __KERNEL__
+#include <sys/types.h>
+#endif
+#include <linux/types.h>
+
+/**************************************************************************
+ * *
+ * The SGI implementation of DMAPI is based upon the X/Open document *
+ * Systems Management: Data Storage Managment (XDSM) API *
+ * dated February 1997. Not all DMAPI functions and structure fields *
+ * have been implemented. Most importantly, the DMAPI functions *
+ * dm_request_right, dm_release_right, dm_query_right, dm_upgrade_right *
+ * and dm_downgrade_right do not work as described in the specification. *
+ * *
+ * The XFS filesystem currently does not allow its locking mechanisms to *
+ * be externally accessed from user space. While the above-mentioned *
+ * dm_xxx_right functions exist and can be called by applications, they *
+ * always return successfully without actually obtaining any locks *
+ * within the filesystem. *
+ * *
+ * Applications which do not need full rights support and which only *
+ * make dm_xxx_right calls in order to satisfy the input requirements of *
+ * other DMAPI calls should be able to use these routines to avoid *
+ * having to implement special-case code for SGI platforms. Applications *
+ * which truely need the capabilities of a full implementation of rights *
+ * will unfortunately have to come up with alternate software solutions *
+ * until such time as rights can be completely implemented. *
+ * *
+ * Functions and structure fields defined within this file which are not *
+ * supported in the SGI implementation of DMAPI are indicated by comments *
+ * following their definitions such as "not supported", or "not *
+ * completely supported". Any function or field not so marked may be *
+ * assumed to work exactly according to the spec. *
+ * *
+ **************************************************************************/
+
+
+
+/* The first portion of this file contains defines and typedefs that are
+ DMAPI implementation-dependent, and could be different on other platforms.
+*/
+
+typedef __s64 dm_attrloc_t;
+typedef unsigned int dm_boolean_t;
+typedef __u64 dm_eventset_t;
+typedef __u64 dm_fsid_t;
+typedef __u64 dm_ino_t;
+typedef __u32 dm_igen_t;
+typedef __s64 dm_off_t;
+typedef unsigned int dm_sequence_t;
+typedef int dm_sessid_t;
+typedef __u64 dm_size_t;
+typedef __s64 dm_ssize_t;
+typedef int dm_token_t;
+
+/* XXX dev_t, mode_t, and nlink_t are not the same size in kernel space
+ and user space. This affects the field offsets for dm_stat_t.
+ The following solution is temporary.
+
+ user space sizes: dev_t=8 mode_t=4 nlink_t=4
+ kernel space : dev_t=2 mode_t=2 nlink_t=2
+
+*/
+typedef __s64 dm_dev_t;
+typedef int dm_mode_t;
+typedef int dm_nlink_t;
+
+
+#define DM_REGION_NOEVENT 0x0
+#define DM_REGION_READ 0x1
+#define DM_REGION_WRITE 0x2
+#define DM_REGION_TRUNCATE 0x4
+
+/* Values for the mask argument used with dm_get_fileattr, dm_get_bulkattr,
+ dm_get_dirattrs, and dm_set_fileattr.
+*/
+
+#define DM_AT_MODE 0x0001
+#define DM_AT_UID 0x0002
+#define DM_AT_GID 0x0004
+#define DM_AT_ATIME 0x0008
+#define DM_AT_MTIME 0x0010
+#define DM_AT_CTIME 0x0020
+#define DM_AT_SIZE 0x0040
+#define DM_AT_DTIME 0x0080
+#define DM_AT_HANDLE 0x0100
+#define DM_AT_EMASK 0x0200
+#define DM_AT_PMANR 0x0400
+#define DM_AT_PATTR 0x0800
+#define DM_AT_STAT 0x1000
+#define DM_AT_CFLAG 0x2000
+
+#define DM_EV_WAIT 0x1 /* used in dm_get_events() */
+
+#define DM_MOUNT_RDONLY 0x1 /* me_mode field in dm_mount_event_t */
+
+#define DM_RR_WAIT 0x1
+
+#define DM_UNMOUNT_FORCE 0x1 /* ne_mode field in dm_namesp_event_t */
+
+#define DM_WRITE_SYNC 0x1 /* used in dm_write_invis() */
+
+#define DM_SESSION_INFO_LEN 256
+#define DM_NO_SESSION 0
+#define DM_TRUE 1
+#define DM_FALSE 0
+#define DM_INVALID_TOKEN 0
+#define DM_NO_TOKEN (-1)
+#define DM_INVALID_HANP NULL
+#define DM_INVALID_HLEN 0
+#define DM_GLOBAL_HANP ((void *)(1LL))
+#define DM_GLOBAL_HLEN ((size_t)(1))
+#define DM_VER_STR_CONTENTS "IBM JFS DMAPI (XDSM) API, Release 1.0"
+
+
+#define DMEV_SET(event_type, event_list) \
+ ((event_list) |= (1 << (event_type)))
+#define DMEV_CLR(event_type, event_list) \
+ ((event_list) &= ~(1 << (event_type)))
+#define DMEV_ISSET(event_type, event_list) \
+ (int)(((event_list) & (1 << (event_type))) != 0)
+#define DMEV_ZERO(event_list) \
+ (event_list) = 0
+
+
+typedef struct {
+ int vd_offset; /* offset from start of containing struct */
+ unsigned int vd_length; /* length of data starting at vd_offset */
+} dm_vardata_t;
+
+#define DM_GET_VALUE(p, field, type) \
+ ((type) ((char *)(p) + (p)->field.vd_offset))
+
+#define DM_GET_LEN(p, field) \
+ ((p)->field.vd_length)
+
+#define DM_STEP_TO_NEXT(p, type) \
+ ((type) ((p)->_link ? (char *)(p) + (p)->_link : NULL))
+
+
+
+
+/* The remainder of this include file contains defines, typedefs, and
+ structures which are strictly defined by the DMAPI 2.3 specification.
+
+ (The _link field which appears in several structures is an
+ implementation-specific way to implement DM_STEP_TO_NEXT, and
+ should not be referenced directly by application code.)
+*/
+
+
+#define DM_ATTR_NAME_SIZE 8
+
+
+struct dm_attrname {
+ unsigned char an_chars[DM_ATTR_NAME_SIZE];
+};
+typedef struct dm_attrname dm_attrname_t;
+
+
+struct dm_attrlist {
+ int _link;
+ dm_attrname_t al_name;
+ dm_vardata_t al_data;
+};
+typedef struct dm_attrlist dm_attrlist_t;
+
+
+typedef enum {
+ DM_CONFIG_INVALID,
+ DM_CONFIG_BULKALL,
+ DM_CONFIG_CREATE_BY_HANDLE,
+ DM_CONFIG_DTIME_OVERLOAD,
+ DM_CONFIG_LEGACY,
+ DM_CONFIG_LOCK_UPGRADE,
+ DM_CONFIG_MAX_ATTR_ON_DESTROY,
+ DM_CONFIG_MAX_ATTRIBUTE_SIZE,
+ DM_CONFIG_MAX_HANDLE_SIZE,
+ DM_CONFIG_MAX_MANAGED_REGIONS,
+ DM_CONFIG_MAX_MESSAGE_DATA,
+ DM_CONFIG_OBJ_REF,
+ DM_CONFIG_PENDING,
+ DM_CONFIG_PERS_ATTRIBUTES,
+ DM_CONFIG_PERS_EVENTS,
+ DM_CONFIG_PERS_INHERIT_ATTRIBS,
+ DM_CONFIG_PERS_MANAGED_REGIONS,
+ DM_CONFIG_PUNCH_HOLE,
+ DM_CONFIG_TOTAL_ATTRIBUTE_SPACE,
+ DM_CONFIG_WILL_RETRY
+} dm_config_t;
+
+
+struct dm_dispinfo {
+ int _link;
+ unsigned int di_pad1; /* reserved; do not reference */
+ dm_vardata_t di_fshandle;
+ dm_eventset_t di_eventset;
+};
+typedef struct dm_dispinfo dm_dispinfo_t;
+
+
+#ifndef HAVE_DM_EVENTTYPE_T
+#define HAVE_DM_EVENTTYPE_T
+typedef enum {
+ DM_EVENT_INVALID = -1,
+ DM_EVENT_CANCEL = 0, /* not supported */
+ DM_EVENT_MOUNT = 1,
+ DM_EVENT_PREUNMOUNT = 2,
+ DM_EVENT_UNMOUNT = 3,
+ DM_EVENT_DEBUT = 4, /* not supported */
+ DM_EVENT_CREATE = 5,
+ DM_EVENT_CLOSE = 6,
+ DM_EVENT_POSTCREATE = 7,
+ DM_EVENT_REMOVE = 8,
+ DM_EVENT_POSTREMOVE = 9,
+ DM_EVENT_RENAME = 10,
+ DM_EVENT_POSTRENAME = 11,
+ DM_EVENT_LINK = 12,
+ DM_EVENT_POSTLINK = 13,
+ DM_EVENT_SYMLINK = 14,
+ DM_EVENT_POSTSYMLINK = 15,
+ DM_EVENT_READ = 16,
+ DM_EVENT_WRITE = 17,
+ DM_EVENT_TRUNCATE = 18,
+ DM_EVENT_ATTRIBUTE = 19,
+ DM_EVENT_DESTROY = 20,
+ DM_EVENT_NOSPACE = 21,
+ DM_EVENT_USER = 22,
+ DM_EVENT_MAX = 23
+} dm_eventtype_t;
+#endif
+
+
+struct dm_eventmsg {
+ int _link;
+ dm_eventtype_t ev_type;
+ dm_token_t ev_token;
+ dm_sequence_t ev_sequence;
+ dm_vardata_t ev_data;
+};
+typedef struct dm_eventmsg dm_eventmsg_t;
+
+
+struct dm_cancel_event { /* not supported */
+ dm_sequence_t ce_sequence;
+ dm_token_t ce_token;
+};
+typedef struct dm_cancel_event dm_cancel_event_t;
+
+
+struct dm_data_event {
+ dm_vardata_t de_handle;
+ dm_off_t de_offset;
+ dm_size_t de_length;
+};
+typedef struct dm_data_event dm_data_event_t;
+
+struct dm_destroy_event {
+ dm_vardata_t ds_handle;
+ dm_attrname_t ds_attrname;
+ dm_vardata_t ds_attrcopy;
+};
+typedef struct dm_destroy_event dm_destroy_event_t;
+
+struct dm_mount_event {
+ dm_mode_t me_mode;
+ dm_vardata_t me_handle1;
+ dm_vardata_t me_handle2;
+ dm_vardata_t me_name1;
+ dm_vardata_t me_name2;
+ dm_vardata_t me_roothandle;
+};
+typedef struct dm_mount_event dm_mount_event_t;
+
+struct dm_namesp_event {
+ dm_mode_t ne_mode;
+ dm_vardata_t ne_handle1;
+ dm_vardata_t ne_handle2;
+ dm_vardata_t ne_name1;
+ dm_vardata_t ne_name2;
+ int ne_retcode;
+};
+typedef struct dm_namesp_event dm_namesp_event_t;
+
+
+typedef enum {
+ DM_EXTENT_INVALID,
+ DM_EXTENT_RES,
+ DM_EXTENT_HOLE
+} dm_extenttype_t;
+
+
+struct dm_extent {
+ dm_extenttype_t ex_type;
+ unsigned int ex_pad1; /* reserved; do not reference */
+ dm_off_t ex_offset;
+ dm_size_t ex_length;
+};
+typedef struct dm_extent dm_extent_t;
+
+struct dm_fileattr {
+ dm_mode_t fa_mode;
+ uid_t fa_uid;
+ gid_t fa_gid;
+ time_t fa_atime;
+ time_t fa_mtime;
+ time_t fa_ctime;
+ time_t fa_dtime;
+ unsigned int fa_pad1; /* reserved; do not reference */
+ dm_off_t fa_size;
+};
+typedef struct dm_fileattr dm_fileattr_t;
+
+
+struct dm_inherit { /* not supported */
+ dm_attrname_t ih_name;
+ dm_mode_t ih_filetype;
+};
+typedef struct dm_inherit dm_inherit_t;
+
+
+typedef enum {
+ DM_MSGTYPE_INVALID,
+ DM_MSGTYPE_SYNC,
+ DM_MSGTYPE_ASYNC
+} dm_msgtype_t;
+
+
+struct dm_region {
+ dm_off_t rg_offset;
+ dm_size_t rg_size;
+ unsigned int rg_flags;
+ unsigned int rg_pad1; /* reserved; do not reference */
+};
+typedef struct dm_region dm_region_t;
+
+
+typedef enum {
+ DM_RESP_INVALID,
+ DM_RESP_CONTINUE,
+ DM_RESP_ABORT,
+ DM_RESP_DONTCARE
+} dm_response_t;
+
+
+#ifndef HAVE_DM_RIGHT_T
+#define HAVE_DM_RIGHT_T
+typedef enum {
+ DM_RIGHT_NULL,
+ DM_RIGHT_SHARED,
+ DM_RIGHT_EXCL
+} dm_right_t;
+#endif
+
+
+struct dm_stat {
+ int _link;
+ dm_vardata_t dt_handle;
+ dm_vardata_t dt_compname;
+ int dt_nevents;
+ dm_eventset_t dt_emask;
+ int dt_pers;
+ int dt_pmanreg;
+ time_t dt_dtime;
+ unsigned int dt_change;
+ unsigned int dt_pad1; /* reserved; do not reference */
+ dm_dev_t dt_dev;
+ dm_ino_t dt_ino;
+ dm_mode_t dt_mode;
+ dm_nlink_t dt_nlink;
+ uid_t dt_uid;
+ gid_t dt_gid;
+ dm_dev_t dt_rdev;
+ unsigned int dt_pad2; /* reserved; do not reference */
+ dm_off_t dt_size;
+ time_t dt_atime;
+ time_t dt_mtime;
+ time_t dt_ctime;
+ unsigned int dt_blksize;
+ dm_size_t dt_blocks;
+
+ /* Non-standard filesystem-specific fields.
+ */
+
+ __u64 dt_pad3; /* reserved; do not reference */
+ int dt_fstype; /* filesystem index; see sysfs(2) */
+ union {
+ struct {
+ dm_igen_t igen;
+ unsigned int xflags;
+ unsigned int extsize;
+ unsigned int extents;
+ unsigned short aextents;
+ unsigned short dmstate;
+ } sgi_xfs;
+ } fsys_dep;
+};
+typedef struct dm_stat dm_stat_t;
+
+#define dt_xfs_igen fsys_dep.sgi_xfs.igen
+#define dt_xfs_xflags fsys_dep.sgi_xfs.xflags
+#define dt_xfs_extsize fsys_dep.sgi_xfs.extsize
+#define dt_xfs_extents fsys_dep.sgi_xfs.extents
+#define dt_xfs_aextents fsys_dep.sgi_xfs.aextents
+#define dt_xfs_dmstate fsys_dep.sgi_xfs.dmstate
+
+/* Flags for the non-standard dt_xfs_xflags field. */
+
+#define DM_XFLAG_REALTIME 0x1
+#define DM_XFLAG_PREALLOC 0x2
+#define DM_XFLAG_HASATTR 0x80000000
+
+
+struct dm_timestruct {
+ time_t dm_tv_sec;
+ int dm_tv_nsec;
+};
+typedef struct dm_timestruct dm_timestruct_t;
+
+
+struct dm_xstat { /* not supported */
+ dm_stat_t dx_statinfo;
+ dm_vardata_t dx_attrdata;
+};
+typedef struct dm_xstat dm_xstat_t;
+
+
+
+/* The following list provides the prototypes for all functions defined in
+ the DMAPI interface.
+*/
+
+extern int
+dm_clear_inherit( /* not supported */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep);
+
+extern int
+dm_create_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname);
+
+extern int
+dm_create_session(
+ dm_sessid_t oldsid,
+ char *sessinfop,
+ dm_sessid_t *newsidp);
+
+extern int
+dm_create_userevent(
+ dm_sessid_t sid,
+ size_t msglen,
+ void *msgdatap,
+ dm_token_t *tokenp);
+
+extern int
+dm_destroy_session(
+ dm_sessid_t sid);
+
+extern int
+dm_downgrade_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_fd_to_handle(
+ int fd,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_find_eventmsg(
+ dm_sessid_t sid,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_allocinfo(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t *offp,
+ unsigned int nelem,
+ dm_extent_t *extentp,
+ unsigned int *nelemp);
+
+extern int
+dm_get_bulkall( /* not supported */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrname_t *attrnamep,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_bulkattr( /* not supported */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_config(
+ void *hanp,
+ size_t hlen,
+ dm_config_t flagname,
+ dm_size_t *retvalp);
+
+extern int
+dm_get_config_events(
+ void *hanp,
+ size_t hlen,
+ unsigned int nelem,
+ dm_eventset_t *eventsetp,
+ unsigned int *nelemp);
+
+extern int
+dm_get_dirattrs(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_attrloc_t *locp,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_eventset_t *eventsetp,
+ unsigned int *nelemp);
+
+extern int
+dm_get_events(
+ dm_sessid_t sid,
+ unsigned int maxmsgs,
+ unsigned int flags,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_fileattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_stat_t *statp);
+
+extern int
+dm_get_mountinfo(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_get_region(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_region_t *regbufp,
+ unsigned int *nelemp);
+
+extern int
+dm_getall_disp(
+ dm_sessid_t sid,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_getall_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern int
+dm_getall_inherit( /* not supported */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_inherit_t *inheritbufp,
+ unsigned int *nelemp);
+
+extern int
+dm_getall_sessions(
+ unsigned int nelem,
+ dm_sessid_t *sidbufp,
+ unsigned int *nelemp);
+
+extern int
+dm_getall_tokens(
+ dm_sessid_t sid,
+ unsigned int nelem,
+ dm_token_t *tokenbufp,
+ unsigned int *nelemp);
+
+extern int
+dm_handle_cmp(
+ void *hanp1,
+ size_t hlen1,
+ void *hanp2,
+ size_t hlen2);
+
+extern void
+dm_handle_free(
+ void *hanp,
+ size_t hlen);
+
+extern u_int
+dm_handle_hash(
+ void *hanp,
+ size_t hlen);
+
+extern dm_boolean_t
+dm_handle_is_valid(
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_handle_to_fshandle(
+ void *hanp,
+ size_t hlen,
+ void **fshanpp,
+ size_t *fshlenp);
+
+extern int
+dm_handle_to_fsid(
+ void *hanp,
+ size_t hlen,
+ dm_fsid_t *fsidp);
+
+extern int
+dm_handle_to_igen(
+ void *hanp,
+ size_t hlen,
+ dm_igen_t *igenp);
+
+extern int
+dm_handle_to_ino(
+ void *hanp,
+ size_t hlen,
+ dm_ino_t *inop);
+
+extern int
+dm_handle_to_path(
+ void *dirhanp,
+ size_t dirhlen,
+ void *targhanp,
+ size_t targhlen,
+ size_t buflen,
+ char *pathbufp,
+ size_t *rlenp);
+
+extern int
+dm_init_attrloc(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrloc_t *locp);
+
+extern int
+dm_init_service(
+ char **versionstrpp);
+
+extern int
+dm_make_handle(
+ dm_fsid_t *fsidp,
+ dm_ino_t *inop,
+ dm_igen_t *igenp,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_make_fshandle(
+ dm_fsid_t *fsidp,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_mkdir_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname);
+
+extern int
+dm_move_event(
+ dm_sessid_t srcsid,
+ dm_token_t token,
+ dm_sessid_t targetsid,
+ dm_token_t *rtokenp);
+
+extern int
+dm_obj_ref_hold(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_obj_ref_query(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_obj_ref_rele(
+ dm_sessid_t sid,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen);
+
+extern int
+dm_path_to_fshandle(
+ char *path,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_path_to_handle(
+ char *path,
+ void **hanpp,
+ size_t *hlenp);
+
+extern int
+dm_pending(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_timestruct_t *delay);
+
+extern int
+dm_probe_hole(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ dm_off_t *roffp,
+ dm_size_t *rlenp);
+
+extern int
+dm_punch_hole(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len);
+
+extern int
+dm_query_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_right_t *rightp);
+
+extern int
+dm_query_session(
+ dm_sessid_t sid,
+ size_t buflen,
+ void *bufp,
+ size_t *rlenp);
+
+extern dm_ssize_t
+dm_read_invis(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp);
+
+extern int
+dm_release_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_remove_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int setdtime,
+ dm_attrname_t *attrnamep);
+
+extern int
+dm_request_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int flags,
+ dm_right_t right);
+
+extern int
+dm_respond_event(
+ dm_sessid_t sid,
+ dm_token_t token,
+ dm_response_t response,
+ int reterror,
+ size_t buflen,
+ void *respbufp);
+
+extern int
+dm_send_msg(
+ dm_sessid_t targetsid,
+ dm_msgtype_t msgtype,
+ size_t buflen,
+ void *bufp);
+
+extern int
+dm_set_disp(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ unsigned int maxevent);
+
+extern int
+dm_set_dmattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ int setdtime,
+ size_t buflen,
+ void *bufp);
+
+extern int
+dm_set_eventlist(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_eventset_t *eventsetp,
+ unsigned int maxevent);
+
+extern int
+dm_set_fileattr(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int mask,
+ dm_fileattr_t *attrp);
+
+extern int
+dm_set_inherit( /* not supported */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ mode_t mode);
+
+extern int
+dm_set_region(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ unsigned int nelem,
+ dm_region_t *regbufp,
+ dm_boolean_t *exactflagp);
+
+extern int
+dm_set_return_on_destroy(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ dm_attrname_t *attrnamep,
+ dm_boolean_t enable);
+
+extern int
+dm_symlink_by_handle( /* not supported */
+ dm_sessid_t sid,
+ void *dirhanp,
+ size_t dirhlen,
+ dm_token_t token,
+ void *hanp,
+ size_t hlen,
+ char *cname,
+ char *path);
+
+extern int
+dm_sync_by_handle(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern int
+dm_upgrade_right( /* not completely supported; see caveat above */
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token);
+
+extern dm_ssize_t
+dm_write_invis(
+ dm_sessid_t sid,
+ void *hanp,
+ size_t hlen,
+ dm_token_t token,
+ int flags,
+ dm_off_t off,
+ dm_size_t len,
+ void *bufp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JFSDMAPI_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/kmem.c linux-jfs-dmapi/fs/jfs/dmapi/kmem.c
--- linux-2.6.7-rc1/fs/jfs/dmapi/kmem.c 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/kmem.c 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+
+#include "time.h"
+#include "kmem.h"
+
+#define DEF_PRIORITY_SHRINK (6)
+#define MAX_SLAB_SIZE 0x10000
+
+static __inline unsigned int flag_convert(int flags)
+{
+#if DEBUG
+ if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS))) {
+ printk(KERN_WARNING
+ "JFS: memory allocation with wrong flags (%x)\n", flags);
+ BUG();
+ }
+#endif
+
+ if (flags & KM_NOSLEEP)
+ return GFP_ATOMIC;
+ /* If we're in a transaction, FS activity is not ok */
+ else if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
+ return GFP_NOFS;
+ else
+ return GFP_KERNEL;
+}
+
+#define MAX_SHAKE 8
+
+static kmem_shake_func_t shake_list[MAX_SHAKE];
+static DECLARE_MUTEX(shake_sem);
+
+void kmem_shake_register(kmem_shake_func_t sfunc)
+{
+ int i;
+
+ down(&shake_sem);
+ for (i = 0; i < MAX_SHAKE; i++) {
+ if (shake_list[i] == NULL) {
+ shake_list[i] = sfunc;
+ break;
+ }
+ }
+ if (i == MAX_SHAKE)
+ BUG();
+ up(&shake_sem);
+}
+
+void kmem_shake_deregister(kmem_shake_func_t sfunc)
+{
+ int i;
+
+ down(&shake_sem);
+ for (i = 0; i < MAX_SHAKE; i++) {
+ if (shake_list[i] == sfunc)
+ break;
+ }
+ if (i == MAX_SHAKE)
+ BUG();
+ for (; i < MAX_SHAKE - 1; i++) {
+ shake_list[i] = shake_list[i+1];
+ }
+ shake_list[i] = NULL;
+ up(&shake_sem);
+}
+
+static __inline__ void kmem_shake(void)
+{
+ int i;
+
+ down(&shake_sem);
+ for (i = 0; i < MAX_SHAKE && shake_list[i]; i++)
+ (*shake_list[i])();
+ up(&shake_sem);
+ delay(10);
+}
+
+void *
+kmem_alloc(size_t size, int flags)
+{
+ int shrink = DEF_PRIORITY_SHRINK; /* # times to try to shrink cache */
+ void *rval;
+
+repeat:
+ if (MAX_SLAB_SIZE < size) {
+ /* Avoid doing filesystem sensitive stuff to get this */
+ rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL);
+ } else {
+ rval = kmalloc(size, flag_convert(flags));
+ }
+
+ if (rval || (flags & KM_NOSLEEP))
+ return rval;
+
+ /*
+ * KM_SLEEP callers don't expect a failure
+ */
+ if (shrink) {
+ kmem_shake();
+
+ shrink--;
+ goto repeat;
+ }
+
+ rval = __vmalloc(size, flag_convert(flags), PAGE_KERNEL);
+ if (!rval && (flags & KM_SLEEP))
+ panic("kmem_alloc: NULL memory on KM_SLEEP request!");
+
+ return rval;
+}
+
+void *
+kmem_zalloc(size_t size, int flags)
+{
+ void *ptr;
+
+ ptr = kmem_alloc(size, flags);
+
+ if (ptr)
+ memset((char *)ptr, 0, (int)size);
+
+ return (ptr);
+}
+
+void
+kmem_free(void *ptr, size_t size)
+{
+ if (((unsigned long)ptr < VMALLOC_START) ||
+ ((unsigned long)ptr >= VMALLOC_END)) {
+ kfree(ptr);
+ } else {
+ vfree(ptr);
+ }
+}
+
+void *
+kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
+{
+ void *new;
+
+ new = kmem_alloc(newsize, flags);
+ if (ptr) {
+ if (new)
+ memcpy(new, ptr,
+ ((oldsize < newsize) ? oldsize : newsize));
+ kmem_free(ptr, oldsize);
+ }
+
+ return new;
+}
+
+kmem_zone_t *
+kmem_zone_init(int size, char *zone_name)
+{
+ return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
+}
+
+void *
+kmem_zone_alloc(kmem_zone_t *zone, int flags)
+{
+ int shrink = DEF_PRIORITY_SHRINK; /* # times to try to shrink cache */
+ void *ptr = NULL;
+
+repeat:
+ ptr = kmem_cache_alloc(zone, flag_convert(flags));
+
+ if (ptr || (flags & KM_NOSLEEP))
+ return ptr;
+
+ /*
+ * KM_SLEEP callers don't expect a failure
+ */
+ if (shrink) {
+ kmem_shake();
+
+ shrink--;
+ goto repeat;
+ }
+
+ if (flags & KM_SLEEP)
+ panic("kmem_zone_alloc: NULL memory on KM_SLEEP request!");
+
+ return NULL;
+}
+
+void *
+kmem_zone_zalloc(kmem_zone_t *zone, int flags)
+{
+ int shrink = DEF_PRIORITY_SHRINK; /* # times to try to shrink cache */
+ void *ptr = NULL;
+
+repeat:
+ ptr = kmem_cache_alloc(zone, flag_convert(flags));
+
+ if (ptr) {
+ memset(ptr, 0, kmem_cache_size(zone));
+ return ptr;
+ }
+
+ if (flags & KM_NOSLEEP)
+ return ptr;
+
+ /*
+ * KM_SLEEP callers don't expect a failure
+ */
+ if (shrink) {
+ kmem_shake();
+
+ shrink--;
+ goto repeat;
+ }
+
+ if (flags & KM_SLEEP)
+ panic("kmem_zone_zalloc: NULL memory on KM_SLEEP request!");
+
+ return NULL;
+}
+
+void
+kmem_zone_free(kmem_zone_t *zone, void *ptr)
+{
+ kmem_cache_free(zone, ptr);
+}
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/kmem.h linux-jfs-dmapi/fs/jfs/dmapi/kmem.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/kmem.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/kmem.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __JFS_SUPPORT_KMEM_H__
+#define __JFS_SUPPORT_KMEM_H__
+
+#include <linux/slab.h>
+
+/*
+ * memory management routines
+ */
+#define KM_SLEEP 0x0001
+#define KM_NOSLEEP 0x0002
+#define KM_NOFS 0x0004
+
+#define kmem_zone kmem_cache_s
+#define kmem_zone_t kmem_cache_t
+
+extern kmem_zone_t *kmem_zone_init(int, char *);
+extern void *kmem_zone_zalloc(kmem_zone_t *, int);
+extern void *kmem_zone_alloc(kmem_zone_t *, int);
+extern void kmem_zone_free(kmem_zone_t *, void *);
+
+extern void *kmem_alloc(size_t, int);
+extern void *kmem_realloc(void *, size_t, size_t, int);
+extern void *kmem_zalloc(size_t, int);
+extern void kmem_free(void *, size_t);
+
+typedef void (*kmem_shake_func_t)(void);
+
+extern void kmem_shake_register(kmem_shake_func_t);
+extern void kmem_shake_deregister(kmem_shake_func_t);
+
+#endif /* __JFS_SUPPORT_KMEM_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/spin.h linux-jfs-dmapi/fs/jfs/dmapi/spin.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/spin.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/spin.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __JFS_SUPPORT_SPIN_H__
+#define __JFS_SUPPORT_SPIN_H__
+
+#include <linux/sched.h> /* preempt needs this */
+#include <linux/spinlock.h>
+
+/*
+ * Map lock_t from IRIX to Linux spinlocks.
+ *
+ * Note that linux turns on/off spinlocks depending on CONFIG_SMP.
+ * We don't need to worry about SMP or not here.
+ */
+
+typedef spinlock_t lock_t;
+
+#define spinlock_init(lock, name) spin_lock_init(lock)
+#define spinlock_destroy(lock)
+
+static inline unsigned long mutex_spinlock(lock_t *lock)
+{
+ spin_lock(lock);
+ return 0;
+}
+
+/*ARGSUSED*/
+static inline void mutex_spinunlock(lock_t *lock, unsigned long s)
+{
+ spin_unlock(lock);
+}
+
+static inline void nested_spinlock(lock_t *lock)
+{
+ spin_lock(lock);
+}
+
+static inline void nested_spinunlock(lock_t *lock)
+{
+ spin_unlock(lock);
+}
+
+#endif /* __JFS_SUPPORT_SPIN_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/Status linux-jfs-dmapi/fs/jfs/dmapi/Status
--- linux-2.6.7-rc1/fs/jfs/dmapi/Status 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/Status 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,114 @@
+for linux:
+
+
+67 external interfaces in libdm
+
+ 57 of those interfaces go through to dmi(), the kernel side of DMAPI
+
+
+
+Functions known to work (on a FVT-level)
+----------------------------------------------
+
+dm_create_session
+dm_create_userevent
+dm_destroy_session
+dm_fd_to_handle
+dm_find_eventmsg
+dm_getall_disp
+dm_getall_dmattr
+dm_getall_sessions
+dm_getall_tokens
+dm_get_allocinfo
+dm_get_config
+dm_get_config_events
+dm_get_dirattrs
+dm_get_dmattr
+dm_get_eventlist
+dm_get_events
+dm_get_fileattr
+dm_get_mountinfo
+dm_get_region
+dm_handle_cmp
+dm_handle_free
+dm_handle_hash
+dm_handle_is_valid
+dm_handle_to_fshandle
+dm_handle_to_fsid
+dm_handle_to_igen
+dm_handle_to_ino
+dm_handle_to_path
+dm_init_attrloc
+dm_init_service
+dm_make_fshandle
+dm_make_handle
+dm_move_event
+dm_obj_ref_hold
+dm_obj_ref_query
+dm_obj_ref_rele
+dm_path_to_fshandle
+dm_path_to_handle
+dm_pending
+dm_probe_hole
+dm_punch_hole
+dm_query_session
+dm_read_invis
+dm_remove_dmattr
+dm_respond_event
+dm_send_msg
+dm_set_disp
+dm_set_dmattr
+dm_set_eventlist
+dm_set_fileattr
+dm_set_region
+dm_set_return_on_destory
+dm_sync_by_handle
+dm_write_invis
+54
+
+Functions that work syntactically (rights are not implemented on JFS)
+------------------------------------------
+
+dm_downgrade_right
+dm_query_right
+dm_release_right
+dm_request_right
+dm_upgrade_right
+5
+
+Functions untested but probably work
+----------------------------------------------
+0
+
+Functions that do not work
+-----------------------------------------
+0
+
+Functions not supported in JFS DMAPI
+-------------------------------------------------------------
+
+dm_clear_inherit
+dm_create_by_handle
+dm_getall_inherit
+dm_get_bulkall
+dm_get_bulkattr
+dm_mkdir_by_handle
+dm_set_inherit
+dm_symlink_by_handle
+8
+
+Functions that seem to work (would like more rigorous test case)
+----------------------------------------------------------------
+0
+
+Functions that do not work
+-----------------------------------------------------------------
+0
+
+Functions that are untested, but probably work
+-----------------------------------------------------------------
+0
+
+Other things not working
+----------------------------------
+DM_EVENT_NOSPACE (very intrusive to JFS kernel code, putting off to last)
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/sv.h linux-jfs-dmapi/fs/jfs/dmapi/sv.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/sv.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/sv.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Portions Copyright (c) 2002 Christoph Hellwig. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __JFS_SUPPORT_SV_H__
+#define __JFS_SUPPORT_SV_H__
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+/*
+ * Synchronisation variables.
+ *
+ * (Parameters "pri", "svf" and "rts" are not implemented)
+ */
+
+typedef struct sv_s {
+ wait_queue_head_t waiters;
+} sv_t;
+
+#define SV_FIFO 0x0 /* sv_t is FIFO type */
+#define SV_LIFO 0x2 /* sv_t is LIFO type */
+#define SV_PRIO 0x4 /* sv_t is PRIO type */
+#define SV_KEYED 0x6 /* sv_t is KEYED type */
+#define SV_DEFAULT SV_FIFO
+
+
+static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
+ unsigned long timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue_exclusive(&sv->waiters, &wait);
+ __set_current_state(state);
+ spin_unlock(lock);
+
+ schedule_timeout(timeout);
+
+ remove_wait_queue(&sv->waiters, &wait);
+}
+
+#define init_sv(sv,type,name,flag) \
+ init_waitqueue_head(&(sv)->waiters)
+#define sv_init(sv,flag,name) \
+ init_waitqueue_head(&(sv)->waiters)
+#define sv_destroy(sv) \
+ /*NOTHING*/
+#define sv_wait(sv, pri, lock, s) \
+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_wait_sig(sv, pri, lock, s) \
+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
+ _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
+ _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_signal(sv) \
+ wake_up(&(sv)->waiters)
+#define sv_broadcast(sv) \
+ wake_up_all(&(sv)->waiters)
+
+#endif /* __JFS_SUPPORT_SV_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/dmapi/time.h linux-jfs-dmapi/fs/jfs/dmapi/time.h
--- linux-2.6.7-rc1/fs/jfs/dmapi/time.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/dmapi/time.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __JFS_SUPPORT_TIME_H__
+#define __JFS_SUPPORT_TIME_H__
+
+#include <linux/sched.h>
+#include <linux/time.h>
+
+typedef struct timespec timespec_t;
+
+static inline void delay(long ticks)
+{
+ current->state = TASK_UNINTERRUPTIBLE;
+ schedule_timeout(ticks);
+}
+
+static inline void nanotime(struct timespec *tvp)
+{
+ struct timeval tv;
+
+ do_gettimeofday(&tv);
+ tvp->tv_sec = tv.tv_sec;
+ tvp->tv_nsec = tv.tv_usec * 1000;
+}
+
+#endif /* __JFS_SUPPORT_TIME_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/file.c linux-jfs-dmapi/fs/jfs/file.c
--- linux-2.6.7-rc1/fs/jfs/file.c 2004-05-27 12:56:43.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/file.c 2004-05-27 18:30:11.000000000 -0500
@@ -24,10 +24,15 @@
#include "jfs_xattr.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
+#ifdef CONFIG_JFS_DMAPI
+#include <linux/uio.h>
+#include "jfs_dmapi.h"
+#endif
extern int jfs_commit_inode(struct inode *, int);
extern void jfs_truncate(struct inode *);
+extern int jfs_acl_chmod(struct inode *);
int jfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
@@ -74,7 +79,8 @@ static int jfs_open(struct inode *inode,
return 0;
}
-static int jfs_release(struct inode *inode, struct file *file)
+
+int jfs_release(struct inode *inode, struct file *file)
{
struct jfs_inode_info *ji = JFS_IP(inode);
@@ -84,9 +90,209 @@ static int jfs_release(struct inode *ino
ji->active_ag = -1;
}
+#ifdef CONFIG_JFS_DMAPI
+ if ((atomic_read(&file->f_dentry->d_count) == 1) &&
+ (DM_EVENT_ENABLED(inode, DM_EVENT_CLOSE)))
+ JFS_SEND_NAMESP(DM_EVENT_CLOSE, inode, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, NULL, NULL, 0, 0, 0);
+#endif
+
return 0;
}
+#if defined(CONFIG_JFS_DMAPI) || defined(CONFIG_JFS_POSIX_ACL)
+int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = dentry->d_inode;
+ int rc;
+
+ rc = inode_change_ok(inode, iattr);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_JFS_DMAPI
+ if ((iattr->ia_valid & ATTR_SIZE) &&
+ DM_EVENT_ENABLED(inode, DM_EVENT_TRUNCATE)) {
+ rc = JFS_SEND_DATA(DM_EVENT_TRUNCATE, inode, iattr->ia_size, 0,
+ 0, NULL);
+
+ if (rc)
+ return rc;
+ }
+#endif
+
+ inode_setattr(inode, iattr);
+
+#ifdef CONFIG_JFS_POSIX_ACL
+ if (iattr->ia_valid & ATTR_MODE)
+ rc = jfs_acl_chmod(inode);
+#endif
+
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(inode, DM_EVENT_ATTRIBUTE))
+ JFS_SEND_NAMESP(DM_EVENT_ATTRIBUTE, inode, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, NULL, NULL, 0, 0, 0);
+
+ if (JFS_SBI(inode->i_sb)->flag & JFS_DMI) {
+ /* Metadata change */
+ if (rc >= 0) {
+ inode->i_version++;
+ mark_inode_dirty(inode);
+ }
+ }
+#endif
+
+ return rc;
+}
+#endif
+
+#ifdef CONFIG_JFS_DMAPI
+static ssize_t jfs_read(struct file *file, char __user *bufp,
+ size_t count, loff_t *ppos)
+{
+ struct inode *ip = file->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_READ)) {
+ error = JFS_SEND_DATA(DM_EVENT_READ, ip, *ppos, count,
+ FILP_DELAY_FLAG(file), NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ return generic_file_read(file, bufp, count, ppos);
+}
+
+static ssize_t jfs_write(struct file *file, const char __user *bufp,
+ size_t count, loff_t *ppos)
+{
+ struct inode *ip = file->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
+ error = JFS_SEND_DATA(DM_EVENT_WRITE, ip, *ppos, count,
+ FILP_DELAY_FLAG(file), NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ error = generic_file_write(file, bufp, count, ppos);
+
+ if (JFS_SBI(ip->i_sb)->flag & JFS_DMI) {
+ /* Data change */
+ if (error > 0) {
+ ip->i_version++;
+ mark_inode_dirty(ip);
+ }
+ }
+
+ return error;
+}
+
+static ssize_t jfs_aio_read(struct kiocb *iocb, char __user *bufp,
+ size_t count, loff_t pos)
+{
+ struct inode *ip = iocb->ki_filp->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_READ)) {
+ error = JFS_SEND_DATA(DM_EVENT_READ, ip, pos, count,
+ FILP_DELAY_FLAG(iocb->ki_filp),
+ NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ return generic_file_aio_read(iocb, bufp, count, pos);
+}
+
+static ssize_t jfs_aio_write(struct kiocb *iocb, const char __user *bufp,
+ size_t count, loff_t pos)
+{
+ struct inode *ip = iocb->ki_filp->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
+ error = JFS_SEND_DATA(DM_EVENT_WRITE, ip, pos, count,
+ FILP_DELAY_FLAG(iocb->ki_filp),
+ NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ error = generic_file_aio_write(iocb, bufp, count, pos);
+
+ if (JFS_SBI(ip->i_sb)->flag & JFS_DMI) {
+ /* Data change */
+ if (error > 0) {
+ ip->i_version++;
+ mark_inode_dirty(ip);
+ }
+ }
+
+ return error;
+}
+
+static ssize_t jfs_readv(struct file *file, const struct iovec *invecs,
+ unsigned long count, loff_t *ppos)
+{
+ struct inode *ip = file->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_READ)) {
+ error = JFS_SEND_DATA(DM_EVENT_READ, ip, *ppos,
+ iov_length(invecs, count),
+ FILP_DELAY_FLAG(file), NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ return generic_file_readv(file, invecs, count, ppos);
+}
+
+static ssize_t jfs_writev(struct file *file, const struct iovec *outvecs,
+ unsigned long count, loff_t *ppos)
+{
+ struct inode *ip = file->f_dentry->d_inode;
+ int error;
+
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
+ error = JFS_SEND_DATA(DM_EVENT_WRITE, ip, *ppos,
+ iov_length(outvecs, count),
+ FILP_DELAY_FLAG(file), NULL /*locktype*/);
+ if (error)
+ return error;
+ }
+
+ error = generic_file_writev(file, outvecs, count, ppos);
+
+ if (JFS_SBI(ip->i_sb)->flag & JFS_DMI) {
+ /* Data change */
+ if (error > 0) {
+ ip->i_version++;
+ mark_inode_dirty(ip);
+ }
+ }
+
+ return error;
+}
+
+static int jfs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct inode *ip = filp->f_dentry->d_inode;
+ int error;
+
+ if (S_ISREG(ip->i_mode) && (JFS_SBI(ip->i_sb)->flag & JFS_DMI)) {
+ error = JFS_SEND_MMAP(vma, 0);
+ if (error) {
+ return error;
+ }
+ }
+
+ return generic_file_mmap(filp, vma);
+}
+#endif
+
struct inode_operations jfs_file_inode_operations = {
.truncate = jfs_truncate,
.setxattr = jfs_setxattr,
@@ -96,7 +302,9 @@ struct inode_operations jfs_file_inode_o
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
.permission = jfs_permission,
-#endif
+#elif defined(CONFIG_JFS_DMAPI)
+ .setattr = jfs_setattr,
+#endif
};
struct file_operations jfs_file_operations = {
@@ -112,4 +320,13 @@ struct file_operations jfs_file_operatio
.sendfile = generic_file_sendfile,
.fsync = jfs_fsync,
.release = jfs_release,
+#ifdef CONFIG_JFS_DMAPI
+ .write = jfs_write,
+ .read = jfs_read,
+ .aio_read = jfs_aio_read,
+ .aio_write = jfs_aio_write,
+ .mmap = jfs_mmap,
+ .readv = jfs_readv,
+ .writev = jfs_writev,
+#endif
};
diff -Nurp linux-2.6.7-rc1/fs/jfs/inode.c linux-jfs-dmapi/fs/jfs/inode.c
--- linux-2.6.7-rc1/fs/jfs/inode.c 2004-05-27 12:54:42.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/inode.c 2004-05-27 18:30:11.000000000 -0500
@@ -27,6 +27,9 @@
#include "jfs_extent.h"
#include "jfs_unicode.h"
#include "jfs_debug.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_dmapi.h"
+#endif
extern struct inode_operations jfs_dir_inode_operations;
@@ -129,6 +132,11 @@ void jfs_delete_inode(struct inode *inod
{
jfs_info("In jfs_delete_inode, inode = 0x%p", inode);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(inode, DM_EVENT_DESTROY))
+ JFS_SEND_DESTROY(inode, DM_RIGHT_NULL);
+#endif
+
if (test_cflag(COMMIT_Freewmap, inode))
freeZeroLink(inode);
@@ -156,8 +164,7 @@ void jfs_dirty_inode(struct inode *inode
set_cflag(COMMIT_Dirty, inode);
}
-static int
-jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
+int jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks,
struct buffer_head *bh_result, int create)
{
s64 lblock64 = lblock;
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_dmapi.h linux-jfs-dmapi/fs/jfs/jfs_dmapi.h
--- linux-2.6.7-rc1/fs/jfs/jfs_dmapi.h 1969-12-31 18:00:00.000000000 -0600
+++ linux-jfs-dmapi/fs/jfs/jfs_dmapi.h 2004-05-27 18:30:11.000000000 -0500
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __JFS_DMAPI_H__
+#define __JFS_DMAPI_H__
+
+/* Values used to define the on-disk version of dm_attrname_t. All
+ * on-disk attribute names start with the 8-byte string "app.dmi.".
+ *
+ * In the on-disk inode, DMAPI attribute names consist of the user-provided
+ * name with the DMATTR_PREFIXSTRING pre-pended. This string must NEVER be
+ * changed.
+ */
+
+
+#define DM_EVENT_ENABLED(inode, event) ( \
+ unlikely (JFS_SBI(inode->i_sb)->flag & JFS_DMI) && \
+ ((JFS_IP(inode)->dmattrs.da_dmevmask & (1 << event)) || \
+ (JFS_SBI(inode->i_sb)->dm_evmask & (1 << event))))
+
+
+/*
+ * Definitions used for the flags field on dm_send_*_event().
+ */
+
+#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
+#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
+
+/*
+ * Macro to turn caller specified delay/block flags into
+ * dm_send_xxxx_event flag DM_FLAGS_NDELAY.
+ */
+
+#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
+ DM_FLAGS_NDELAY : 0)
+
+
+extern int dmapi_init(void);
+extern void dmapi_uninit(void);
+
+int jfs_dm_send_data_event(int, struct inode *,
+ dm_off_t, size_t, int, int /*vrwlock_t*/ *);
+int jfs_dm_send_mmap_event(struct vm_area_struct *, unsigned int);
+int dm_send_destroy_event(struct inode *, dm_right_t);
+int dm_send_namesp_event(dm_eventtype_t, struct inode *,
+ dm_right_t, struct inode *, dm_right_t,
+ char *, char *, mode_t, int, int);
+void dm_send_unmount_event(struct super_block *, struct inode *,
+ dm_right_t, mode_t, int, int);
+
+#define JFS_SEND_DATA(ev,ip,off,len,fl,lock) \
+ jfs_dm_send_data_event(ev,ip,off,len,fl,lock)
+#define JFS_SEND_MMAP(vma,fl) \
+ jfs_dm_send_mmap_event(vma,fl)
+#define JFS_SEND_DESTROY(ip,right) \
+ dm_send_destroy_event(ip,right)
+#define JFS_SEND_NAMESP(ev,i1,r1,i2,r2,n1,n2,mode,rval,fl) \
+ dm_send_namesp_event(ev,i1,r1,i2,r2,n1,n2,mode,rval,fl)
+#define JFS_SEND_UNMOUNT(sbp,ip,right,mode,rval,fl) \
+ dm_send_unmount_event(sbp,ip,right,mode,rval,fl)
+
+void jfs_dm_umount_begin(struct super_block *);
+int jfs_dm_read_pers_data(struct jfs_inode_info *jfs_ip);
+
+#define DMATTR_PERS_REGIONS "system.dmi.persistent.regions"
+#endif /* __JFS_DMAPI_H__ */
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_filsys.h linux-jfs-dmapi/fs/jfs/jfs_filsys.h
--- linux-2.6.7-rc1/fs/jfs/jfs_filsys.h 2004-05-27 12:55:48.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_filsys.h 2004-05-27 18:30:11.000000000 -0500
@@ -81,6 +81,10 @@
#define JFS_DIR_INDEX 0x00200000 /* Persistant index for */
/* directory entries */
+/* DMAPI enablement */
+#define JFS_DMI 0x01000000 /* FS has DMI enabled */
+#define JFS_UNMOUNT_FORCE 0x02000000 /* FS being forcibly unmount'd */
+
/*
* buffer cache configuration
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_imap.c linux-jfs-dmapi/fs/jfs/jfs_imap.c
--- linux-2.6.7-rc1/fs/jfs/jfs_imap.c 2004-05-27 12:55:10.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_imap.c 2004-05-28 11:09:34.000000000 -0500
@@ -53,6 +53,9 @@
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_debug.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_dmapi.h"
+#endif
/*
* imap locks
@@ -3145,6 +3148,15 @@ static int copy_from_dinode(struct dinod
jfs_ip->atlhead = 0;
jfs_ip->atltail = 0;
jfs_ip->xtlid = 0;
+
+#ifdef CONFIG_JFS_DMAPI
+ memset(&jfs_ip->dmattrs, 0, sizeof(dm_attrs_t));
+ /* Restore any DMAPI persistent data if DMAPI enabled */
+ if (JFS_SBI(ip->i_sb)->flag & JFS_DMI)
+ return jfs_dm_read_pers_data(jfs_ip);
+ ip->i_version = 0;
+#endif
+
return (0);
}
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_incore.h linux-jfs-dmapi/fs/jfs/jfs_incore.h
--- linux-2.6.7-rc1/fs/jfs/jfs_incore.h 2004-05-27 12:54:52.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_incore.h 2004-05-28 13:42:06.000000000 -0500
@@ -25,6 +25,11 @@
#include "jfs_types.h"
#include "jfs_xtree.h"
#include "jfs_dtree.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_filsys.h"
+#include "dmapi/jfsdmapi.h" /* for dm_region_t */
+#include "dmapi/dmapi_jfs.h" /* for dm_attrs_t */
+#endif
/*
* JFS magic number
@@ -93,6 +98,12 @@ struct jfs_inode_info {
unchar _inline_ea[128]; /* 128: inline extended attr */
} link;
} u;
+#ifdef CONFIG_JFS_DMAPI
+ /* DMAPI necessities */
+ dm_attrs_t dmattrs; /* DMAPI attributes (da_dmevmask persistent) */
+ int dmnumrgns; /* DMAPI number regions */
+ dm_region_t *dmrgns; /* DMAPI regions (persistent) */
+#endif
u32 dev; /* will die when we get wide dev_t */
struct inode vfs_inode;
};
@@ -167,6 +178,13 @@ struct jfs_sb_info {
uint state; /* mount/recovery state */
unsigned long flag; /* mount time flags */
uint p_state; /* state prior to going no integrity */
+
+#ifdef CONFIG_JFS_DMAPI
+ u64 dm_fsid; /* 8: FS ID = hash of original uuid */
+ uint dm_evmask; /* 4: DMAPI event mask */
+ struct inode *dm_root; /* 4: root inode */
+ char dm_mtpt[JFS_NAME_MAX+1]; /* 256: mount point */
+#endif
};
/* jfs_sb_info commit_state */
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_inode.c linux-jfs-dmapi/fs/jfs/jfs_inode.c
--- linux-2.6.7-rc1/fs/jfs/jfs_inode.c 2004-05-27 12:54:57.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_inode.c 2004-05-27 18:30:11.000000000 -0500
@@ -88,5 +88,13 @@ struct inode *ialloc(struct inode *paren
jfs_info("ialloc returns inode = 0x%p\n", inode);
+#ifdef CONFIG_JFS_DMAPI
+ /* Zero DMAPI fields */
+ memset(&jfs_inode->dmattrs, 0, sizeof(dm_attrs_t));
+ jfs_inode->dmnumrgns = 0;
+ jfs_inode->dmrgns = NULL;
+ inode->i_version = 0;
+#endif
+
return inode;
}
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_mount.c linux-jfs-dmapi/fs/jfs/jfs_mount.c
--- linux-2.6.7-rc1/fs/jfs/jfs_mount.c 2004-05-27 12:55:18.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_mount.c 2004-05-27 18:30:11.000000000 -0500
@@ -296,6 +296,13 @@ int jfs_mount_rw(struct super_block *sb,
return rc;
}
+#ifdef CONFIG_JFS_DMAPI
+u64 uuid_hash(u64 *uuid)
+{
+ return uuid[0] + uuid[1];
+}
+#endif
+
/*
* chkSuper()
*
@@ -403,6 +410,11 @@ static int chkSuper(struct super_block *
sbi->fsckpxd = j_sb->s_fsckpxd;
sbi->ait2 = j_sb->s_ait2;
+#ifdef CONFIG_JFS_DMAPI
+ /* Initialize DMAPI field */
+ sbi->dm_fsid = uuid_hash((u64 *)j_sb->s_uuid);
+#endif
+
out:
brelse(bh);
return rc;
diff -Nurp linux-2.6.7-rc1/fs/jfs/jfs_xtree.c linux-jfs-dmapi/fs/jfs/jfs_xtree.c
--- linux-2.6.7-rc1/fs/jfs/jfs_xtree.c 2004-05-27 12:55:48.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/jfs_xtree.c 2004-05-27 18:30:11.000000000 -0500
@@ -123,15 +123,17 @@ static int xtSplitPage(tid_t tid, struct
static int xtSplitRoot(tid_t tid, struct inode *ip,
struct xtsplit * split, struct metapage ** rmpp);
-#ifdef _STILL_TO_PORT
+#ifdef CONFIG_JFS_DMAPI
static int xtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
xtpage_t * fp, struct btstack * btstack);
+static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
+#endif
+
+#ifdef _STILL_TO_PORT
static int xtSearchNode(struct inode *ip,
xad_t * xad,
int *cmpp, struct btstack * btstack, int flag);
-
-static int xtRelink(tid_t tid, struct inode *ip, xtpage_t * fp);
#endif /* _STILL_TO_PORT */
/* External references */
@@ -2557,8 +2559,8 @@ int xtAppend(tid_t tid, /* transaction
return rc;
}
-#ifdef _STILL_TO_PORT
+#ifdef CONFIG_JFS_DMAPI
/* - TBD for defragmentaion/reorganization -
*
* xtDelete()
@@ -2622,7 +2624,7 @@ int xtDelete(tid_t tid, struct inode *ip
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index, xtlck->lwm.offset) : index;
+ (xtlck->lwm.offset) ? min((u8)index, xtlck->lwm.offset) : index;
/* if delete from middle, shift left/compact the remaining entries */
if (index < nextindex - 1)
@@ -2756,7 +2758,7 @@ xtDeleteUp(tid_t tid, struct inode *ip,
tlck = txLock(tid, ip, mp, tlckXTREE);
xtlck = (struct xtlock *) & tlck->lock;
xtlck->lwm.offset =
- (xtlck->lwm.offset) ? min(index,
+ (xtlck->lwm.offset) ? min((u8)index,
xtlck->lwm.
offset) : index;
@@ -2784,8 +2786,9 @@ xtDeleteUp(tid_t tid, struct inode *ip,
return 0;
}
+#endif /* CONFIG_JFS_DMAPI */
-
+#ifdef _STILL_TO_PORT
/*
* NAME: xtRelocate()
*
@@ -3237,8 +3240,9 @@ static int xtSearchNode(struct inode *ip
XT_PUTPAGE(mp);
}
}
+#endif /* _STILL_TO_PORT */
-
+#ifdef CONFIG_JFS_DMAPI
/*
* xtRelink()
*
@@ -3306,7 +3310,7 @@ static int xtRelink(tid_t tid, struct in
return 0;
}
-#endif /* _STILL_TO_PORT */
+#endif /* CONFIG_JFS_DMAPI */
/*
@@ -4463,3 +4467,191 @@ int jfs_xtstat_read(char *buffer, char *
return len;
}
#endif
+
+#ifdef CONFIG_JFS_DMAPI
+/*
+ *
+ * xtPunchHole()
+ *
+ * function:
+ * delete portion of file for the entry with the specified key.
+ *
+ * N.B.: whole extent of the entry is assumed to be deleted.
+ *
+ * parameter:
+ * tid - transaction id;
+ * ip - file object;
+ * xoff - extent offset (in bytes);
+ * xlen - extent length (in bytes);
+ * flag -
+ *
+ * return:
+ * ENOENT: if the entry is not found.
+ *
+ * exception:
+ */
+int xtPunchHole(tid_t tid, struct inode *ip, s64 xoff, s32 xlen, int flag)
+{
+ int error = 0;
+ struct lxdlist lxdlist;
+ lxd_t lxd;
+ struct xadlist xadlist;
+ xad_t *pxad_array = NULL;
+ xad_t xad;
+ int elem = 0;
+ int alloc_size = 0;
+ int i;
+ dm_off_t xad_off;
+ dm_size_t xad_len;
+ dm_off_t xad_end;
+ u64 xend;
+
+ /* Handle truncations first, they don't require twiddling xads */
+ if (xlen == 0) {
+ loff_t old_size = ip->i_size;
+ s64 new_size;
+
+ new_size = xtTruncate(tid, ip, xoff, 0);
+
+ if (new_size < 0) {
+ ip->i_size = old_size;
+ return new_size;
+ } else {
+ return 0;
+ }
+ }
+
+ xoff >>= ip->i_blkbits;
+ xlen >>= ip->i_blkbits;
+ xend = xoff + xlen;
+
+ /* Obtain single array of xads that covers entire file */
+ do {
+ /* Free prior xad array if one exists */
+ if (pxad_array != NULL) {
+ kmem_free(pxad_array, alloc_size);
+ }
+
+ elem += 16; /* 256-byte chunk */
+ alloc_size = elem * sizeof(xad_t);
+ pxad_array = kmem_alloc(alloc_size, KM_SLEEP);
+
+ if (pxad_array == NULL)
+ return -ENOMEM;
+
+ lxdlist.maxnlxd = lxdlist.nlxd = 1;
+ LXDlength(&lxd, (ip->i_size >> ip->i_blkbits)+1);
+ LXDoffset(&lxd, 0);
+ lxdlist.lxd = &lxd;
+
+ xadlist.maxnxad = xadlist.nxad = elem;
+ xadlist.xad = pxad_array;
+
+ error = xtLookupList(ip, &lxdlist, &xadlist, 0);
+
+ if (error) {
+ if (pxad_array != NULL)
+ kmem_free(pxad_array, alloc_size);
+ return error;
+ }
+ } while ((xadlist.nxad == elem) &&
+ ((offsetXAD(&xadlist.xad[elem-1]) + lengthXAD(&xadlist.xad[elem-1])) < xoff + xlen));
+
+ for (i = 0, error = 0; (i < xadlist.nxad) && (error == 0); i++) {
+ /* nothing to do if xad is already hole */
+ if (xadlist.xad[i].flag & XAD_NOTRECORDED)
+ continue;
+
+ xad_off = offsetXAD(&xadlist.xad[i]);
+ xad_len = lengthXAD(&xadlist.xad[i]);
+ xad_end = xad_off + xad_len;
+
+ /* xad completely within hole
+ * |------XAD------|
+ * |----------------hole--------------------|
+ */
+ if ((xad_off >= xoff) && (xad_end <= xend)) {
+ error = xtDelete(tid, ip, xad_off, xad_len, 0);
+ }
+
+ /* xad overlaps beginning of hole, eliminate part of xad
+ * |--------XAD--------|---->
+ * |------hole------|
+ */
+ else if ((xad_off < xoff) && (xad_end <= xend) && (xad_end > xoff)) {
+ memcpy(&xad, &xadlist.xad[i], sizeof(xad_t));
+ XADlength(&xad, xoff - xad_off);
+
+ error = xtUpdate(tid, ip, &xad);
+ if (!error) {
+ /* should now have following, so delete rXAD:
+ * |--lXAD--|----rXAD---|---->
+ * |------hole------|
+ */
+
+ error = xtDelete(tid, ip, xoff, xad_len - xoff, 0);
+ }
+ }
+
+ /* xad overlaps end of hole, eliminate part of xad
+ * >-------|--------XAD--------|
+ * |------hole------|
+ */
+ else if ((xad_off >= xoff) && (xad_end > xend) && (xad_off < xend)) {
+ memcpy(&xad, &xadlist.xad[i], sizeof(xad_t));
+ XADlength(&xad, xad_len - (xad_end - xend));
+
+ error = xtUpdate(tid, ip, &xad);
+ if (!error) {
+ /* should now have following, so delete lXAD:
+ * >-------|--lXAD--|----rXAD---|
+ * |------hole------|
+ */
+
+ error = xtDelete(tid, ip, xad_off,
+ xad_len - (xad_end - xend), 0);
+ }
+ }
+
+ /* xad completely contains hole, need to do some twiddling
+ * |----------------XAD--------------------|
+ * |------hole------|
+ */
+ else if ((xad_off < xoff) && (xad_end > xend)) {
+ dm_off_t xad_addr = addressXAD(&xadlist.xad[i]);
+ dm_off_t new_off;
+ dm_size_t new_len;
+
+ memcpy(&xad, &xadlist.xad[i], sizeof(xad_t));
+ new_len = xoff - xad_off;
+ XADlength(&xad, new_len);
+
+ error = xtUpdate(tid, ip, &xad);
+ if (!error) {
+ error = xtDelete(tid, ip, xoff,
+ (xad_end - xoff), 0);
+ }
+
+ /* should now have following, so create rXAD:
+ * |--lXAD--|----------DELETED XAD---------|
+ * |------hole------|
+ */
+
+ if (!error) {
+ xad_addr += new_len + xlen;
+ new_off = xoff + xlen;
+ error = xtInsert(tid, ip, 0, new_off, xad_end - new_off, &xad_addr, 0);
+ }
+
+ /* should now have following:
+ * |--lXAD--|----DELETED-----|-----rXAD----|
+ * |------hole------|
+ */
+ }
+ }
+
+ if (pxad_array != NULL)
+ kmem_free(pxad_array, alloc_size);
+ return error;
+}
+#endif /* CONFIG_JFS_DMAPI */
diff -Nurp linux-2.6.7-rc1/fs/jfs/Makefile linux-jfs-dmapi/fs/jfs/Makefile
--- linux-2.6.7-rc1/fs/jfs/Makefile 2004-05-27 12:56:56.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/Makefile 2004-05-27 18:30:11.000000000 -0500
@@ -2,6 +2,10 @@
# Makefile for the Linux JFS filesystem routines.
#
+ifeq ($(CONFIG_JFS_DMAPI),y)
+ EXTRA_CFLAGS += -I$(TOPDIR)/fs/jfs -DDM_USE_SHASH
+endif
+
obj-$(CONFIG_JFS_FS) += jfs.o
jfs-y := super.o file.o inode.o namei.o jfs_mount.o jfs_umount.o \
@@ -12,4 +16,22 @@ jfs-y := super.o file.o inode.o namei
jfs-$(CONFIG_JFS_POSIX_ACL) += acl.o
+jfs-$(CONFIG_JFS_DMAPI) += $(addprefix dmapi/, \
+ dmapi_sysent.o \
+ dmapi_attr.o \
+ dmapi_config.o \
+ dmapi_bulkattr.o \
+ dmapi_dmattr.o \
+ dmapi_event.o \
+ dmapi_handle.o \
+ dmapi_hole.o \
+ dmapi_io.o \
+ dmapi_mountinfo.o \
+ dmapi_region.o \
+ dmapi_register.o \
+ dmapi_right.o \
+ dmapi_session.o \
+ dmapi_jfs.o \
+ kmem.o)
+
EXTRA_CFLAGS += -D_JFS_4K
diff -Nurp linux-2.6.7-rc1/fs/jfs/namei.c linux-jfs-dmapi/fs/jfs/namei.c
--- linux-2.6.7-rc1/fs/jfs/namei.c 2004-05-27 12:56:32.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/namei.c 2004-05-28 12:11:59.000000000 -0500
@@ -28,6 +28,10 @@
#include "jfs_xattr.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_dmapi.h"
+int jfs_setattr(struct dentry *, struct iattr *);
+#endif
extern struct inode_operations jfs_file_inode_operations;
extern struct inode_operations jfs_symlink_inode_operations;
@@ -74,6 +78,16 @@ static int jfs_create(struct inode *dip,
jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_CREATE)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_CREATE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, mode, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
/*
* search parent directory for entry/freespace
* (dtSearch() returns parent directory page pinned)
@@ -162,6 +176,14 @@ static int jfs_create(struct inode *dip,
out1:
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_POSTCREATE))
+ JFS_SEND_NAMESP(DM_EVENT_POSTCREATE, dip, DM_RIGHT_NULL, ip,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, ip->i_mode, rc, 0);
+ out_dm:
+#endif
+
jfs_info("jfs_create: rc:%d", rc);
return rc;
}
@@ -195,6 +217,16 @@ static int jfs_mkdir(struct inode *dip,
jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_CREATE)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_CREATE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, mode, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
/* link count overflow on parent directory ? */
if (dip->i_nlink == JFS_LINK_MAX) {
rc = -EMLINK;
@@ -293,6 +325,14 @@ static int jfs_mkdir(struct inode *dip,
out1:
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_POSTCREATE))
+ JFS_SEND_NAMESP(DM_EVENT_POSTCREATE, dip, DM_RIGHT_NULL,
+ ip, DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, ip->i_mode, rc, 0);
+ out_dm:
+#endif
+
jfs_info("jfs_mkdir: rc:%d", rc);
return rc;
}
@@ -328,6 +368,16 @@ static int jfs_rmdir(struct inode *dip,
jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_REMOVE)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_REMOVE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, 0, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
/* directory must be empty to be removed */
if (!dtEmpty(ip)) {
rc = -ENOTEMPTY;
@@ -415,6 +465,14 @@ static int jfs_rmdir(struct inode *dip,
free_UCSname(&dname);
out:
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_POSTREMOVE))
+ JFS_SEND_NAMESP(DM_EVENT_POSTREMOVE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, ip->i_mode, rc, 0);
+ out_dm:
+#endif
+
jfs_info("jfs_rmdir: rc:%d", rc);
return rc;
}
@@ -452,6 +510,17 @@ static int jfs_unlink(struct inode *dip,
int commit_flag;
jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_REMOVE)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_REMOVE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, 0, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
+ /* directory must be empty to be removed */
if ((rc = get_UCSname(&dname, dentry)))
goto out;
@@ -564,6 +633,15 @@ static int jfs_unlink(struct inode *dip,
out1:
free_UCSname(&dname);
out:
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_POSTREMOVE))
+ JFS_SEND_NAMESP(DM_EVENT_POSTREMOVE, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, ip->i_mode, rc, 0);
+
+ out_dm:
+#endif
+
jfs_info("jfs_unlink: rc:%d", rc);
return rc;
}
@@ -774,6 +852,16 @@ static int jfs_link(struct dentry *old_d
jfs_info("jfs_link: %s %s", old_dentry->d_name.name,
dentry->d_name.name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dir, DM_EVENT_LINK)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_LINK, dir, DM_RIGHT_NULL, ip,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, 0, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
if (ip->i_nlink == JFS_LINK_MAX)
return -EMLINK;
@@ -823,6 +911,13 @@ static int jfs_link(struct dentry *old_d
up(&JFS_IP(dir)->commit_sem);
up(&JFS_IP(ip)->commit_sem);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dir, DM_EVENT_POSTLINK))
+ JFS_SEND_NAMESP(DM_EVENT_POSTLINK, dir, DM_RIGHT_NULL,
+ ip, DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ NULL, 0, rc, 0);
+ out_dm:
+#endif
jfs_info("jfs_link: rc:%d", rc);
return rc;
}
@@ -867,6 +962,16 @@ static int jfs_symlink(struct inode *dip
jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_SYMLINK)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_SYMLINK, dip, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ (char *)name, 0, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
+
ssize = strlen(name) + 1;
/*
@@ -1041,6 +1146,13 @@ static int jfs_symlink(struct inode *dip
#endif
out1:
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(dip, DM_EVENT_POSTSYMLINK))
+ JFS_SEND_NAMESP(DM_EVENT_POSTSYMLINK, dip, DM_RIGHT_NULL, ip,
+ DM_RIGHT_NULL, (char *)dentry->d_name.name,
+ (char *)name, 0, rc, 0);
+ out_dm:
+#endif
jfs_info("jfs_symlink: rc:%d", rc);
return rc;
}
@@ -1077,6 +1189,18 @@ static int jfs_rename(struct inode *old_
old_ip = old_dentry->d_inode;
new_ip = new_dentry->d_inode;
+
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(old_dir, DM_EVENT_RENAME) ||
+ DM_EVENT_ENABLED(new_dir, DM_EVENT_RENAME)) {
+ rc = JFS_SEND_NAMESP(DM_EVENT_RENAME, old_dir, DM_RIGHT_NULL,
+ new_dir, DM_RIGHT_NULL,
+ (char *)old_dentry->d_name.name,
+ (char *)new_dentry->d_name.name, 0, 0, 0);
+ if (rc)
+ goto out_dm;
+ }
+#endif
if ((rc = get_UCSname(&old_dname, old_dentry)))
goto out1;
@@ -1309,6 +1433,16 @@ static int jfs_rename(struct inode *old_
clear_cflag(COMMIT_Stale, old_dir);
}
+#ifdef CONFIG_JFS_DMAPI
+ if (DM_EVENT_ENABLED(old_dir, DM_EVENT_POSTRENAME) ||
+ DM_EVENT_ENABLED(new_dir, DM_EVENT_POSTRENAME))
+ JFS_SEND_NAMESP(DM_EVENT_POSTRENAME, old_dir, DM_RIGHT_NULL,
+ new_dir, DM_RIGHT_NULL,
+ (char *)old_dentry->d_name.name,
+ (char *)new_dentry->d_name.name, 0, rc, 0);
+ out_dm:
+#endif
+
jfs_info("jfs_rename: returning %d", rc);
return rc;
}
@@ -1470,6 +1604,18 @@ struct dentry *jfs_get_parent(struct den
return parent;
}
+#ifdef CONFIG_JFS_DMAPI
+static int jfs_releasedir(struct inode *inode, struct file *file)
+{
+
+ if ((atomic_read(&file->f_dentry->d_count) == 1) &&
+ (DM_EVENT_ENABLED(inode, DM_EVENT_CLOSE)))
+ JFS_SEND_NAMESP(DM_EVENT_CLOSE, inode, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, NULL, NULL, 0, 0, 0);
+ return 0;
+}
+#endif
+
struct inode_operations jfs_dir_inode_operations = {
.create = jfs_create,
.lookup = jfs_lookup,
@@ -1487,6 +1633,8 @@ struct inode_operations jfs_dir_inode_op
#ifdef CONFIG_JFS_POSIX_ACL
.setattr = jfs_setattr,
.permission = jfs_permission,
+#elif defined(CONFIG_JFS_DMAPI)
+ .setattr = jfs_setattr,
#endif
};
@@ -1494,4 +1642,7 @@ struct file_operations jfs_dir_operation
.read = generic_read_dir,
.readdir = jfs_readdir,
.fsync = jfs_fsync,
+#ifdef CONFIG_JFS_DMAPI
+ .release = jfs_releasedir,
+#endif
};
diff -Nurp linux-2.6.7-rc1/fs/jfs/super.c linux-jfs-dmapi/fs/jfs/super.c
--- linux-2.6.7-rc1/fs/jfs/super.c 2004-05-27 12:55:43.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/super.c 2004-05-27 18:30:11.000000000 -0500
@@ -34,6 +34,9 @@
#include "jfs_imap.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_dmapi.h"
+#endif
MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
@@ -157,6 +160,14 @@ static void jfs_destroy_inode(struct ino
}
#endif
+#ifdef CONFIG_JFS_DMAPI
+ /* Clean up any DMAPI managed regions */
+ if (ji->dmrgns) {
+ kfree(ji->dmrgns);
+ ji->dmrgns = NULL;
+ }
+#endif
+
kmem_cache_free(jfs_inode_cachep, ji);
}
@@ -198,6 +209,19 @@ static void jfs_put_super(struct super_b
int rc;
jfs_info("In jfs_put_super");
+
+#ifdef CONFIG_JFS_DMAPI
+ /* if being forcibly unmounted, preunmount event already sent */
+ if ((sbi->flag & JFS_DMI) && !(sbi->mntflag & JFS_UNMOUNT_FORCE)) {
+ rc = jfs_dm_preunmount(sb);
+ if (rc) {
+ jfs_err("jfs_dm_preunmount aborted with return code %d",
+ rc);
+ return;
+ }
+ }
+#endif
+
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
@@ -205,12 +229,20 @@ static void jfs_put_super(struct super_b
unload_nls(sbi->nls_tab);
sbi->nls_tab = NULL;
+#ifdef CONFIG_JFS_DMAPI
+ if (sbi->flag & JFS_DMI)
+ jfs_dm_unmount(sb, rc);
+#endif
+
kfree(sbi);
}
enum {
Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err,
+#ifdef CONFIG_JFS_DMAPI
+ Opt_dmapi, Opt_mtpt,
+#endif
};
static match_table_t tokens = {
@@ -220,6 +252,11 @@ static match_table_t tokens = {
{Opt_resize, "resize=%u"},
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
+#ifdef CONFIG_JFS_DMAPI
+ {Opt_dmapi, "dmapi"},
+ {Opt_dmapi, "xdsm"},
+ {Opt_mtpt, "mtpt=%s"},
+#endif
{Opt_ignore, "noquota"},
{Opt_ignore, "quota"},
{Opt_ignore, "usrquota"},
@@ -238,6 +275,10 @@ static int parse_options(char *options,
if (!options)
return 1;
+
+#ifdef CONFIG_JFS_DMAPI
+ *sbi->dm_mtpt = '\0';
+#endif
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
@@ -306,6 +347,14 @@ static int parse_options(char *options,
}
break;
}
+#ifdef CONFIG_JFS_DMAPI
+ case Opt_dmapi:
+ *flag |= JFS_DMI;
+ break;
+ case Opt_mtpt:
+ strncpy(sbi->dm_mtpt, args[0].from, JFS_NAME_MAX+1);
+ break;
+#endif
default:
printk("jfs: Unrecognized mount option \"%s\" "
" or missing value\n", p);
@@ -313,6 +362,14 @@ static int parse_options(char *options,
}
}
+#ifdef CONFIG_JFS_DMAPI
+ if ((*flag & JFS_DMI) && (*sbi->dm_mtpt == '\0')) {
+ printk(KERN_ERR
+ "JFS: DMAPI option needs the mount point options as well\n");
+ goto cleanup;
+ }
+#endif
+
if (nls_map) {
/* Discard old (if remount) */
if (sbi->nls_tab)
@@ -348,12 +405,62 @@ static int jfs_remount(struct super_bloc
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+#ifdef CONFIG_JFS_DMAPI
+ if (flag & JFS_DMI) {
+ /* simulate unmount/mount so DM app gets change to R/W */
+ rc = jfs_dm_preunmount(sb);
+ if (rc) {
+ jfs_err("jfs_umount aborted with return code %d",
+ rc);
+ return rc;
+ }
+ jfs_dm_unmount(sb, rc);
+ }
+#endif
+
JFS_SBI(sb)->flag = flag;
- return jfs_mount_rw(sb, 1);
+ rc = jfs_mount_rw(sb, 1);
+
+#ifdef CONFIG_JFS_DMAPI
+ if ((flag & JFS_DMI) && !rc) {
+ rc = jfs_dm_mount(sb);
+ if (rc) {
+ jfs_err("dm app aborts mount w/return code = %d",
+ rc);
+ return rc;
+ }
+ }
+#endif
}
if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
+#ifdef CONFIG_JFS_DMAPI
+ if (flag & JFS_DMI) {
+ /*
+ * simulate unmount/mount so DM app gets change to R/O
+ */
+ rc = jfs_dm_preunmount(sb);
+ if (rc) {
+ jfs_err("jfs_umount aborted with return code %d",
+ rc);
+ return rc;
+ }
+ jfs_dm_unmount(sb, rc);
+ }
+#endif
+
rc = jfs_umount_rw(sb);
JFS_SBI(sb)->flag = flag;
+
+#ifdef CONFIG_JFS_DMAPI
+ if ((flag & JFS_DMI) && !rc) {
+ rc = jfs_dm_mount(sb);
+ if (rc) {
+ jfs_err("dm app aborts mount w/return code = %d",
+ rc);
+ return rc;
+ }
+ }
+#endif
return rc;
}
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
@@ -453,6 +560,16 @@ static int jfs_fill_super(struct super_b
sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
#endif
+#ifdef CONFIG_JFS_DMAPI
+ if (sbi->flag & JFS_DMI) {
+ rc = jfs_dm_mount(sb);
+ if (rc) {
+ jfs_err("dm app aborts mount w/return code = %d", rc);
+ goto out_no_root;
+ }
+ }
+#endif
+
return 0;
out_no_root:
@@ -529,6 +646,9 @@ static struct super_operations jfs_super
.unlockfs = jfs_unlockfs,
.statfs = jfs_statfs,
.remount_fs = jfs_remount,
+#ifdef CONFIG_JFS_DMAPI
+ .umount_begin = jfs_dm_umount_begin,
+#endif
};
static struct export_operations jfs_export_operations = {
@@ -636,6 +756,14 @@ static int __init init_jfs_fs(void)
jfs_proc_init();
#endif
+#ifdef CONFIG_JFS_DMAPI
+ rc = dmapi_init();
+ if (rc) {
+ jfs_err("init_jfs_fs: dmapi_init failed w/rc = %d", rc);
+ goto kill_committask;
+ }
+#endif
+
return register_filesystem(&jfs_fs_type);
kill_committask:
@@ -674,6 +802,9 @@ static void __exit exit_jfs_fs(void)
#ifdef PROC_FS_JFS
jfs_proc_clean();
#endif
+#ifdef CONFIG_JFS_DMAPI
+ dmapi_uninit();
+#endif
unregister_filesystem(&jfs_fs_type);
kmem_cache_destroy(jfs_inode_cachep);
}
diff -Nurp linux-2.6.7-rc1/fs/jfs/xattr.c linux-jfs-dmapi/fs/jfs/xattr.c
--- linux-2.6.7-rc1/fs/jfs/xattr.c 2004-05-27 12:55:33.000000000 -0500
+++ linux-jfs-dmapi/fs/jfs/xattr.c 2004-05-27 18:30:11.000000000 -0500
@@ -28,6 +28,9 @@
#include "jfs_metapage.h"
#include "jfs_xattr.h"
#include "jfs_acl.h"
+#ifdef CONFIG_JFS_DMAPI
+#include "jfs_dmapi.h"
+#endif
/*
* jfs_xattr.c: extended attribute service
@@ -143,7 +146,7 @@ static inline int copy_name(char *buffer
}
/* Forward references */
-static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
+void jfs_ea_release(struct inode *inode, struct ea_buffer *ea_buf);
/*
* NAME: ea_write_inline
@@ -423,14 +426,14 @@ static int ea_read(struct inode *ip, str
}
/*
- * NAME: ea_get
+ * NAME: jfs_ea_get
*
* FUNCTION: Returns buffer containing existing extended attributes.
* The size of the buffer will be the larger of the existing
* attributes size, or min_size.
*
* The buffer, which may be inlined in the inode or in the
- * page cache must be release by calling ea_release or ea_put
+ * page cache must be release by calling jfs_ea_release or ea_put
*
* PARAMETERS:
* inode - Inode pointer
@@ -439,7 +442,7 @@ static int ea_read(struct inode *ip, str
*
* RETURNS: 0 for success; Other indicates failure
*/
-static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
+int jfs_ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
{
struct jfs_inode_info *ji = JFS_IP(inode);
struct super_block *sb = inode->i_sb;
@@ -482,7 +485,7 @@ static int ea_get(struct inode *inode, s
current_blocks = 0;
} else {
if (!(ji->ea.flag & DXD_EXTENT)) {
- jfs_error(sb, "ea_get: invalid ea.flag)");
+ jfs_error(sb, "jfs_ea_get: invalid ea.flag)");
return -EIO;
}
current_blocks = (ea_size + sb->s_blocksize - 1) >>
@@ -560,16 +563,16 @@ static int ea_get(struct inode *inode, s
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
- printk(KERN_ERR "ea_get: invalid extended attribute\n");
+ printk(KERN_ERR "jfs_ea_get: invalid extended attribute\n");
dump_mem("xattr", ea_buf->xattr, ea_size);
- ea_release(inode, ea_buf);
+ jfs_ea_release(inode, ea_buf);
return -EIO;
}
return ea_size;
}
-static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
+void jfs_ea_release(struct inode *inode, struct ea_buffer *ea_buf)
{
if (ea_buf->flag & EA_MALLOC)
kfree(ea_buf->xattr);
@@ -591,7 +594,7 @@ static int ea_put(struct inode *inode, s
tid_t tid;
if (new_size == 0) {
- ea_release(inode, ea_buf);
+ jfs_ea_release(inode, ea_buf);
ea_buf = 0;
} else if (ea_buf->flag & EA_INLINE) {
assert(new_size <= sizeof (ji->i_inline_ea));
@@ -719,6 +722,11 @@ static int can_set_system_xattr(struct i
return 0;
}
#endif /* CONFIG_JFS_POSIX_ACL */
+#ifdef CONFIG_JFS_DMAPI
+ /* Look for DMAPI xattr */
+ if (strcmp(name, DMATTR_PERS_REGIONS) == 0)
+ return 0;
+#endif
return -EOPNOTSUPP;
}
@@ -778,7 +786,7 @@ int __jfs_setxattr(struct inode *inode,
down_write(&JFS_IP(inode)->xattr_sem);
- xattr_size = ea_get(inode, &ea_buf, 0);
+ xattr_size = jfs_ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
rc = xattr_size;
goto out;
@@ -824,8 +832,8 @@ int __jfs_setxattr(struct inode *inode,
* We need to allocate more space for merged ea list.
* We should only have loop to again: once.
*/
- ea_release(inode, &ea_buf);
- xattr_size = ea_get(inode, &ea_buf, new_size);
+ jfs_ea_release(inode, &ea_buf);
+ xattr_size = jfs_ea_get(inode, &ea_buf, new_size);
if (xattr_size < 0) {
rc = xattr_size;
goto out;
@@ -881,7 +889,7 @@ int __jfs_setxattr(struct inode *inode,
goto out;
release:
- ea_release(inode, &ea_buf);
+ jfs_ea_release(inode, &ea_buf);
out:
up_write(&JFS_IP(inode)->xattr_sem);
@@ -894,12 +902,19 @@ int __jfs_setxattr(struct inode *inode,
int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t value_len, int flags)
{
+ int rc;
+
if (value == NULL) { /* empty EA, do not remove */
value = "";
value_len = 0;
}
- return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags);
+ rc = __jfs_setxattr(dentry->d_inode, name, value, value_len, flags);
+#ifdef CONFIG_JFS_DMAPI
+ if (!rc)
+ dentry->d_inode->i_version++;
+#endif
+ return rc;
}
static int can_get_xattr(struct inode *inode, const char *name)
@@ -937,7 +952,7 @@ ssize_t __jfs_getxattr(struct inode *ino
down_read(&JFS_IP(inode)->xattr_sem);
- xattr_size = ea_get(inode, &ea_buf, 0);
+ xattr_size = jfs_ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
size = xattr_size;
@@ -968,7 +983,7 @@ ssize_t __jfs_getxattr(struct inode *ino
not_found:
size = -ENODATA;
release:
- ea_release(inode, &ea_buf);
+ jfs_ea_release(inode, &ea_buf);
out:
up_read(&JFS_IP(inode)->xattr_sem);
@@ -1000,7 +1015,7 @@ ssize_t jfs_listxattr(struct dentry * de
down_read(&JFS_IP(inode)->xattr_sem);
- xattr_size = ea_get(inode, &ea_buf, 0);
+ xattr_size = jfs_ea_get(inode, &ea_buf, 0);
if (xattr_size < 0) {
size = xattr_size;
goto out;
@@ -1031,7 +1046,7 @@ ssize_t jfs_listxattr(struct dentry * de
}
release:
- ea_release(inode, &ea_buf);
+ jfs_ea_release(inode, &ea_buf);
out:
up_read(&JFS_IP(inode)->xattr_sem);
return size;
diff -Nurp linux-2.6.7-rc1/fs/Kconfig linux-jfs-dmapi/fs/Kconfig
--- linux-2.6.7-rc1/fs/Kconfig 2004-05-27 12:56:02.000000000 -0500
+++ linux-jfs-dmapi/fs/Kconfig 2004-05-27 18:30:58.000000000 -0500
@@ -299,6 +299,20 @@ config JFS_POSIX_ACL
If you don't know what Access Control Lists are, say N
+config JFS_DMAPI
+ bool "JFS DMAPI (XDSM) Support"
+ depends on JFS_FS
+ help
+ The Data Storage Management (XDSM) API is a CAE Specification. It
+ defines APIs which use events to notify Data Management (DM)
+ applications about operations on files, enable DM applications to
+ store arbitrary attribute information with a file, support managed
+ regions within a file, and use DMAPI access rights to control
+ access to a file object. DMAPI refers to the interface defined by
+ the XDSM Specification.
+
+ If you don't know what DMAPI is, say N.
+
config JFS_DEBUG
bool "JFS debugging"
depends on JFS_FS