xfs
[Top] [All Lists]

Re: snapshot regression test try 2

To: Greg Freemyer <freemyer@xxxxxxxxxxxxxxxxx>
Subject: Re: snapshot regression test try 2
From: Stephen Lord <lord@xxxxxxx>
Date: 29 Aug 2002 20:54:26 -0500
Cc: xfs mailing list <linux-xfs@xxxxxxxxxxx>
In-reply-to: <20020829232620.LRFR28682.imf05bis.bellsouth.net@TAZ2>
References: <20020829232620.LRFR28682.imf05bis.bellsouth.net@TAZ2>
Sender: linux-xfs-bounce@xxxxxxxxxxx
On Thu, 2002-08-29 at 18:23, Greg Freemyer wrote:
> 
> Nathan,
> 
> I have made a second attempt at the script.
> 
> I think it addresses your concerns.
> 
> Someone still needs to do the TODO items, but they don't come up in my 
> environment because $SCRATCH_DEV is a LV for me.
> 
> Steve Lord has run my previous version to some extent, but I don't know if he 
> did it via check, or he just ran the script.
> 

I ran it by hand, I think check needs to know about the specific test
numbers, it also wants a good output file to compare against.

One thing I did discover was the partition type needs to be set
correctly. I presume that was what the dd from /dev/zero was for,
I only had a free partition on a disk, not a free volume.

I have subsequently been chasing oopses from running freeze/thaw
on a filesystem under heavy load, but that is a different problem
than you saw I think.

Steve

> Greg Freemyer
> Internet Engineer
> Deployment and Integration Specialist
> Compaq ASE - Tru64 v4, v5
> Compaq Master ASE - SAN Architect
> The Norcross Group
> www.NorcrossGroup.com
> 
> ========  068.out
> QA output created by 068
> SUCCESS, COMPLETED ALL ITERATIONS WITH NO TIME OUTS!!!!!!!!!!!!
> Cleanup beginning
> ======== 068
> #! /bin/sh
> # XFS QA Test No. 068
> # $Id: 1.1 $
> #
> # Test LVM snapshot creation
> #
> # The timing and placement of kills and waits is particularily sensitive.
> #  Don't change them unless you want to spend some time getting it right 
> again.
> #
> #-----------------------------------------------------------------------
> # Copyright (c) 2000, 2002 Silicon Graphics, Inc.  All Rights Reserved.
> # 
> # This program is free software; you can redistribute it and/or modify it
> # under the terms of version 2 of the GNU General Public License as
> # published by the Free Software Foundation.
> # 
> # This program is distributed in the hope that it would be useful, but
> # WITHOUT ANY WARRANTY; without even the implied warranty of
> # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
> # 
> # Further, this software is distributed without any warranty that it is
> # free of the rightful claim of any third person regarding infringement
> # or the like.  Any license provided herein, whether implied or
> # otherwise, applies only to this software file.  Patent licenses, if
> # any, provided herein do not apply to combinations of this program with
> # other software, or any other product whatsoever.
> # 
> # You should have received a copy of the GNU General Public License along
> # with this program; if not, write the Free Software Foundation, Inc., 59
> # Temple Place - Suite 330, Boston MA 02111-1307, USA.
> # 
> # Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
> # Mountain View, CA  94043, or:
> # 
> # http://www.sgi.com 
> # 
> # For further information regarding this notice, see: 
> # 
> # http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
> #-----------------------------------------------------------------------
> #
> # creator
> owner=freemyer@xxxxxxxxxxxxxxxxx
> 
> seq=`basename $0`
> echo "QA output created by $seq"
> 
> here=`pwd`
> tmp=/tmp/$$
> status=1        # failure is the default!
> 
> GENERATE_IO_LOAD=TRUE           # If "FALSE", the dd load loop is skipped
> DELAY_BETWEEN_ITERATIONS=10
> ITERATIONS=20
> VG=/dev/VGscratch
> #SCRATCH_DEV=/dev/xxxx          # Only needed if running by hand,  ie.  check 
> sets these
> #SCRATCH_MNT=/scratch           # Only needed if running by hand,  ie.  check 
> sets these
> SCRATCH_SNAP_MNT=$tmp.scratch_snap
> 
> 
> _cleanup()
> {
> 
>         echo Cleanup beginning
> 
>         rm $tmp.running > /dev/null 2>&1
>         xfs_freeze -u $SCRATCH_MNT
> 
>         sleep 10      # Give the dd loop time to finish
> 
>         # Comment out unless needed.  If needed, wrap with logic to ensure 
> the FS is mounted
>         #Kill off any other possible stray stragglers that may be out there.  
> There should not be any.
>         # fuser -k -m $SCRATCH_SNAP_MNT/dummy     >/dev/null 2>&1
>         # fuser -k -m $SCRATCH_MNT/dummy          >/dev/null 2>&1
> 
>         wait
> 
>         umount $SCRATCH_SNAP_MNT > /dev/null 2>&1
>         rmdir $SCRATCH_SNAP_MNT > /dev/null 2>&1
>         umount $SCRATCH_MNT > /dev/null 2>&1
> 
>         lvremove -f $VG/scratch_snap > /dev/null 2>&1
>         lvremove -f $VG/scratch > /dev/null 2>&1
> 
> #TODO   vgremove $VG
> 
>         rm -f $tmp.*        # if we ever use tmp files
>         trap 0 1 2 3 15
>         exit $status
> }
> 
> trap "_cleanup" 0 1 2 3 15
> 
> 
> # get standard environment, filters and checks
> . ./common.rc
> . ./common.filter
> 
> if [ -e $SCRATCH_SNAP_MNT ]; then rm -rf $SCRATCH_SNAP_MNT; fi
> 
> mkdir $SCRATCH_SNAP_MNT
> 
> #Verify we have the lvm user tools
> [ -x /sbin/lvcreate ] || _notrun "LVM lvcreate utility is not installed in 
> /sbin"
> [ -x /sbin/lvremove ] || _notrun "LVM lvremove utility is not installed in 
> /sbin"
> 
> # if the above fails for LVM 2, the below may be useful notes
> #    try "lvm vgdisplay --version" and look for a > 1.9x version number, eg,
> # 
> #     LVM version:     1.95.10-cvs (2002-05-31)
> #     Library version: 0.96.03-ioctl-cvs (2002-06-27)
> #     Driver version:  1.0.3
> 
> 
> #Verify we have the a lvm enabled kernel
> 
> LVM=false
> 
> #Check if LVM 1 is in the kernel
> if grep lvm /proc/devices > /dev/null 2>&1; then LVM=true; fi
> 
> #Check if LVM 2 is in the kernel
> if grep device-mapper /proc/devices > /dev/null 2>&1; then LVM=true; fi
> 
> #Check if EVMS is in the kernel
> # TODO   # I don't know how to do this one.
> 
> if [ $LVM = false ]; then _notrun "This test requires the kernel have LVM or 
> EVMS present.  (The EVMS test is still TBD)."; fi
> 
> 
> # real QA test starts here
> 
> # Create a PV set from the scratch partition
> # TODO # (I don't know if this is needed.and it is dangerous because it 
> intentionally deletes the partition table!!! 
> # TODO # dd if=/dev/zero of=$SCRATCH_DEV bs=512 count=1
> # TODO # pvcreate $SCRATCH_DEV
> 
> # Create a VG from the PV
> # TODO # vgcreate $VG $SCRATCH_DEV
> 
> # Create a LV in the VG to snapshot
> # TODO # lvcreate -L 2G -n scratch $VG
> 
> # Mount the LV
> mkdir $SCRATCH_MNT > /dev/null 2&>1
> 
> mount $VG/scratch $SCRATCH_MNT
> 
> if [ $GENERATE_IO_LOAD != FALSE ];
> then
>         # Create a large 64 Meg zero filled file on the LV
>         dd if=/dev/zero of=$SCRATCH_MNT/dummy bs=64k count=1000  > /dev/null 
> 2>&1
> 
>         #setup an infinite loop to copy the large file, thus generating heavy 
> i/o
> 
>         touch $tmp.running
> 
>         while [ -f $tmp.running ]
>         do
>             dd if=$SCRATCH_MNT/dummy of=$SCRATCH_MNT/junk bs=64k > /dev/null 
> 2>&1
>             rm $SCRATCH_MNT/junk        # This forces metadata updates the 
> next time around
>             sync
>         done &
> fi
> 
> ii=1
> 
> while [ $ii -le $ITERATIONS ]
> do
> 
>         # echo $ii      Usefull if your are running interactive, but not from 
> the xfs test scripts
> 
>         #if the VFS lock patch is present, the calls to xfs_freeze are 
> redundant, but should cause no problems
>         #       OPTIONAL
>         xfs_freeze -f $SCRATCH_MNT
>         if [ $? != 0 ] ; then
>                 echo xfs_freeze -f $SCRATCH_MNT failed
>         fi
>         (
>                 lvcreate --snapshot --size 1G --name scratch_snap $VG/scratch 
> > /dev/null 2>&1
>                 ret=$?
>                 if [ $ret != 0 ] ; then
>                         echo snapshot creation for $SCRATCH_MNT failed with 
> return code $ret
>                 fi
>         ) &
>         SNAPSHOT_shell_pid=$!
> 
>         #if the Snapshot has not completed in ten minutes, kill it
>         (
>                         # I have NOT figured out how to kill the sleep 600 
> before it exits naturally.
>                         # This does not cause a problem, but it clutters the 
> ps table.
>                 sleep 600
>                         # The kill $TIMEOUT_shell_pid keeps the below from 
> occuring
>                 echo Snapshot Lockup Occured on loop $ii
>                 xfs_freeze -u $SCRATCH_MNT
>                 kill $$
>         ) &
>         TIMEOUT_shell_pid=$!
> 
>         wait $SNAPSHOT_shell_pid
> 
>         exec 2> /dev/null               # Send the shells stderr to /dev/null
>         kill $TIMEOUT_shell_pid    #Cancel the timeout
>         wait $TIMEOUT_shell_pid    # This causes consistent shell 
> notification for some unknow reason
>         exec 2>&1                       # Put it back to the same as stdout
> 
>         #if the VFS lock patch is present, the calls to xfs_freeze are 
> redundant, but should cause no problems
>         #       OPTIONAL
>         xfs_freeze -u $SCRATCH_MNT
>         if [ $? != 0 ] ; then
>                 echo xfs_freeze -u $SCRATCH_MNT failed
>         fi
>         #          MANDANTORY   (end)
> 
>         mount -t xfs -o ro,nouuid $VG/scratch_snap $SCRATCH_SNAP_MNT
>         if [ $? != 0 ] ; then
>                 echo mount for $SCRATCH_SNAP_MNT failed
>         fi
>         umount $SCRATCH_SNAP_MNT
>         if [ $? != 0 ] ; then
>                 echo umount for $SCRATCH_SNAP_MNT failed
>         fi
>         lvremove -f $VG/scratch_snap > /dev/null 2>&1
>         if [ $? != 0 ] ; then
>                echo lvremove for $VG/scratch_snap failed
>         fi
> 
> ii=`expr $ii + 1`
>         sleep $DELAY_BETWEEN_ITERATIONS # The VG seems to need time to 
> stabalize between snapshots
>                         # With LVM 1.0.3 and XFS 1.1, I have tried this at 
> 3600 seconds and still had failures
> 
> done
> 
> # success, all done
> echo SUCCESS, COMPLETED ALL ITERATIONS WITH NO TIME OUTS!!!!!!!!!!!!
> status=0
> _cleanup
> exit 1   # _cleanup should exit, so we should never get here.
> 



<Prev in Thread] Current Thread [Next in Thread>