[BACK]Return to 008 CVS log [TXT][DIR] Up to [Development] / xfs-cmds / xfstests

File: [Development] / xfs-cmds / xfstests / 008 (download)

Revision 1.15, Thu May 15 16:36:40 2008 UTC (9 years, 5 months ago) by dgc.longdrop.melbourne.sgi.com
Branch: MAIN
CVS Tags: HEAD
Changes since 1.14: +16 -6 lines

With the recent change for reliability with 64k page size
made to test 008,the file sizes got much larger. It appears
that randholes actually reads the entire file, so this has
slowed the test down by a factor of ten (all file sizes
were increased by 10x). This means the test is now taking
about 18 minutes to run on a UML session, and all the time
is spent reading the files.

Instead, scale the file size based on the page size. We know
how many holes we are trying to produce and the I/O size
being used to produce them, so the size of the files can be
finely tuned. Assuming a decent random distribution, if the
number of blocks in the file is 4x the page size and the
I/O size is page sized, this means that every I/O should
generate a new hole and we'll only get a small amount of
adjacent extents. This has passed over 10 times on ia64
w/ 64k page and another 15 times on UML with 4k page.

UML runtime is down from ~1000s to 5s, ia64 runtime is down from
~30s to 7s.
Merge of master-melb:xfs-cmds:31168a by kenmcd.

  Greatly reduce runtime by reducing filesizes down to sane minimum.

#! /bin/sh
# FS QA Test No. 008
#
# randholes test
#
#-----------------------------------------------------------------------
# Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
#-----------------------------------------------------------------------
#
# creator
owner=dxm@sgi.com

seq=`basename $0`
echo "QA output created by $seq"

here=`pwd`
tmp=/tmp/$$
status=0	# success is the default!
pgsize=`$here/src/feature -s`
trap "_cleanup; exit \$status" 0 1 2 3 15

_cleanup()
{
    rm -f $tmp.*
    rm -rf $testdir/randholes.$$.*
    _cleanup_testdir
}

_filter()
{
    sed -e "s/-b $pgsize/-b PGSIZE/g" \
	-e "s/-l .* -c/-l FSIZE -c/g"
}

# get standard environment, filters and checks
. ./common.rc
. ./common.filter

_do_test()
{
    _n="$1"
    _holes="$2"
    _param="$3"

    out=$testdir/randholes.$$.$_n
    echo ""
    echo "randholes.$_n : $_param" | _filter
    echo "------------------------------------------"
    if $here/src/randholes $_param $out >$tmp.out
    then
	# only check if we're not allocating in huge chunks (extsz flag)
	if _test_inode_flag extsize $out || _test_inode_flag realtime $out
	then
		echo "holes is in range"
	else
		# quick check - how many holes did we get?
	        count=`xfs_bmap $out | egrep -c ': hole'`
		# blocks can end up adjacent, therefore number of holes varies
		_within_tolerance "holes" $count $_holes 10% -v
	fi
    else
        echo "    randholes returned $? - see $seq.out.full"
        echo "--------------------------------------"       >>$here/$seq.out.full
        echo "$_n - output from randholes:"                 >>$here/$seq.out.full
        echo "--------------------------------------"       >>$here/$seq.out.full
        cat $tmp.out                                        >>$here/$seq.out.full
        echo "--------------------------------------"       >>$here/$seq.out.full
        echo "$_n - output from bmap:"                      >>$here/$seq.out.full
        echo "--------------------------------------"       >>$here/$seq.out.full
        xfs_bmap -vvv $out                                  >>$here/$seq.out.full
        status=1
    fi
}

# real QA test starts here
_supported_fs xfs
_supported_os IRIX Linux

_setup_testdir

rm -f $here/$seq.out.full

# Note on special numbers here.
#
# We are trying to create roughly 50 or 100 holes in a file
# using random writes. Assuming a good distribution of 50 writes
# in a file, the file only needs to be 3-4x the size of the write
# size muliplied by the number of writes. Hence we use 200 * pgsize
# for files we want 50 holes in and 400 * pgsize for files we want
# 100 holes in. This keeps the runtime down as low as possible.
#
_do_test 1 50 "-l `expr 200 \* $pgsize` -c 50 -b $pgsize"
_do_test 2 100 "-l `expr 400 \* $pgsize` -c 100 -b $pgsize"
_do_test 3 100 "-l `expr 400 \* $pgsize` -c 100 -b 512"   # test partial pages

# rinse, lather, repeat for direct IO
_do_test 4 50 "-d -l `expr 200 \* $pgsize` -c 50 -b $pgsize"
_do_test 5 100 "-d -l `expr 400 \* $pgsize` -c 100 -b $pgsize"
# note: direct IO requires page aligned IO

# todo: realtime.

# success, all done
exit