ktest: New TEST_START instead of using [], and use real SHA1s

Change the config to use TEST_START where the options after a
TEST_START automatically get the [] as it is read and they do
not need to exist in the config file;

TEST_START
MIN_CONFIG = myconfig

is the same as

MIN_CONFIG[1] = myconfig

The benefit is that you no longer need to keep track of test numbers
with tests.

Also process the commit ids that are passed to the options
to get the actually SHA1 so it is no longer relative to the branch.
Ie, saying HEAD will get the current SHA1 and then that will
be used, and will work even if another branch is checked out.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index ef97817..a7e86e3 100644
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -11,21 +11,23 @@
 use File::Copy qw(cp);
 use FileHandle;
 
-$#ARGV >= 0 || die "usage: autotest.pl config-file\n";
+$#ARGV >= 0 || die "usage: ktest.pl config-file\n";
 
 $| = 1;
 
 my %opt;
+my %repeat_tests;
+my %repeats;
 my %default;
 
 #default opts
-$default{"NUM_TESTS"}		= 5;
+$default{"NUM_TESTS"}		= 1;
 $default{"REBOOT_TYPE"}		= "grub";
 $default{"TEST_TYPE"}		= "test";
 $default{"BUILD_TYPE"}		= "randconfig";
 $default{"MAKE_CMD"}		= "make";
 $default{"TIMEOUT"}		= 120;
-$default{"TMP_DIR"}		= "/tmp/autotest";
+$default{"TMP_DIR"}		= "/tmp/ktest";
 $default{"SLEEP_TIME"}		= 60;	# sleep time between tests
 $default{"BUILD_NOCLEAN"}	= 0;
 $default{"REBOOT_ON_ERROR"}	= 0;
@@ -87,29 +89,138 @@
 my $localversion;
 my $iteration = 0;
 
+sub set_value {
+    my ($lvalue, $rvalue) = @_;
+
+    if (defined($opt{$lvalue})) {
+	die "Error: Option $lvalue defined more than once!\n";
+    }
+    $opt{$lvalue} = $rvalue;
+}
+
 sub read_config {
     my ($config) = @_;
 
     open(IN, $config) || die "can't read file $config";
 
+    my $name = $config;
+    $name =~ s,.*/(.*),$1,;
+
+    my $test_num = 0;
+    my $default = 1;
+    my $repeat = 1;
+    my $num_tests_set = 0;
+    my $skip = 0;
+    my $rest;
+
     while (<IN>) {
 
 	# ignore blank lines and comments
 	next if (/^\s*$/ || /\s*\#/);
 
-	if (/^\s*(\S+)\s*=\s*(.*?)\s*$/) {
+	if (/^\s*TEST_START(.*)/) {
+
+	    $rest = $1;
+
+	    if ($num_tests_set) {
+		die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+	    }
+
+	    my $old_test_num = $test_num;
+
+	    $test_num += $repeat;
+	    $default = 0;
+	    $repeat = 1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
+		$repeat = $1;
+		$rest = $2;
+		$repeat_tests{"$test_num"} = $repeat;
+	    }
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after TEST_START\n$_";
+	    }
+
+	    if ($skip) {
+		$test_num = $old_test_num;
+		$repeat = 1;
+	    }
+
+	} elsif (/^\s*DEFAULTS(.*)$/) {
+	    $default = 1;
+
+	    $rest = $1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after DEFAULTS\n$_";
+	    }
+
+	} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+
+	    next if ($skip);
+
 	    my $lvalue = $1;
 	    my $rvalue = $2;
 
-	    if (defined($opt{$lvalue})) {
-		die "Error: Option $lvalue defined more than once!\n";
+	    if (!$default &&
+		($lvalue eq "NUM_TESTS" ||
+		 $lvalue eq "LOG_FILE" ||
+		 $lvalue eq "CLEAR_LOG")) {
+		die "$name: $.: $lvalue must be set in DEFAULTS section\n";
 	    }
-	    $opt{$lvalue} = $rvalue;
+
+	    if ($lvalue eq "NUM_TESTS") {
+		if ($test_num) {
+		    die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+		}
+		if (!$default) {
+		    die "$name: $.: NUM_TESTS must be set in default section\n";
+		}
+		$num_tests_set = 1;
+	    }
+
+	    if ($default || $lvalue =~ /\[\d+\]$/) {
+		set_value($lvalue, $rvalue);
+	    } else {
+		my $val = "$lvalue\[$test_num\]";
+		set_value($val, $rvalue);
+
+		if ($repeat > 1) {
+		    $repeats{$val} = $repeat;
+		}
+	    }
+	} else {
+	    die "$name: $.: Garbage found in config\n$_";
 	}
     }
 
     close(IN);
 
+    if ($test_num) {
+	$test_num += $repeat - 1;
+	$opt{"NUM_TESTS"} = $test_num;
+    }
+
     # set any defaults
 
     foreach my $default (keys %default) {
@@ -398,6 +509,27 @@
     run_command "$reboot_script";
 }
 
+sub get_sha1 {
+    my ($commit) = @_;
+
+    doprint "git rev-list --max-count=1 $commit ... ";
+    my $sha1 = `git rev-list --max-count=1 $commit`;
+    my $ret = $?;
+
+    logit $sha1;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to get git $commit";
+    }
+
+    print "SUCCESS\n";
+
+    chomp $sha1;
+
+    return $sha1;
+}
+
 sub monitor {
     my $booted = 0;
     my $bug = 0;
@@ -497,7 +629,7 @@
 	dodie "Failed to install modules";
 
     my $modlib = "/lib/modules/$version";
-    my $modtar = "autotest-mods.tar.bz2";
+    my $modtar = "ktest-mods.tar.bz2";
 
     run_command "ssh $target rm -rf $modlib" or
 	dodie "failed to remove old mods: $modlib";
@@ -840,6 +972,10 @@
     my $start = $opt{"BISECT_START[$i]"};
     my $replay = $opt{"BISECT_REPLAY[$i]"};
 
+    # convert to true sha1's
+    $good = get_sha1($good);
+    $bad = get_sha1($bad);
+
     if (defined($opt{"BISECT_REVERSE[$i]"}) &&
 	$opt{"BISECT_REVERSE[$i]"} == 1) {
 	doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
@@ -859,20 +995,7 @@
     if (defined($check) && $check ne "0") {
 
 	# get current HEAD
-	doprint "git rev-list HEAD --max-count=1 ... ";
-	my $head = `git rev-list HEAD --max-count=1`;
-	my $ret = $?;
-
-	logit $head;
-
-	if ($ret) {
-	    doprint "FAILED\n";
-	    dodie "Failed to get git HEAD";
-	}
-
-	print "SUCCESS\n";
-
-	chomp $head;
+	my $head = get_sha1("HEAD");
 
 	if ($check ne "good") {
 	    doprint "TESTING BISECT BAD [$bad]\n";
@@ -956,6 +1079,10 @@
 	$end = $opt{"PATCHCHECK_END[$i]"};
     }
 
+    # Get the true sha1's since we can use things like HEAD~3
+    $start = get_sha1($start);
+    $end = get_sha1($end);
+
     my $type = $opt{"PATCHCHECK_TYPE[$i]"};
 
     # Can't have a test without having a test to run
@@ -1054,8 +1181,29 @@
 
 doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
 
-foreach my $option (sort keys %opt) {
-    doprint "$option = $opt{$option}\n";
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+    if (!$i) {
+	doprint "DEFAULT OPTIONS:\n";
+    } else {
+	doprint "\nTEST $i OPTIONS";
+	if (defined($repeat_tests{$i})) {
+	    $repeat = $repeat_tests{$i};
+	    doprint " ITERATE $repeat";
+	}
+	doprint "\n";
+    }
+
+    foreach my $option (sort keys %opt) {
+
+	if ($option =~ /\[(\d+)\]$/) {
+	    next if ($i != $1);
+	} else {
+	    next if ($i);
+	}
+
+	doprint "$option = $opt{$option}\n";
+    }
 }
 
 sub set_test_option {
@@ -1067,6 +1215,16 @@
 	return $opt{$option};
     }
 
+    foreach my $test (keys %repeat_tests) {
+	if ($i >= $test &&
+	    $i < $test + $repeat_tests{$test}) {
+	    $option = "$name\[$test\]";
+	    if (defined($opt{$option})) {
+		return $opt{$option};
+	    }
+	}
+    }
+
     if (defined($opt{$name})) {
 	return $opt{$name};
     }
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 546014a..9236fe9 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -1,25 +1,83 @@
 #
-# Config file for autotest.pl
+# Config file for ktest.pl
 #
 # Note, all paths must be absolute
 #
 
-# Almost all options may be overwritten per test run, by appending
-# a [x] to the config. For example, to change the test type for
-# the third iteration of tests, you can specify:
-#  (1 is for the first test, 2 for the second, and so on)
+# Options set in the beginning of the file are considered to be
+# default options. These options can be overriden by test specific
+# options, with the following exceptions:
 #
-#  TEST_TYPE[3] = build
-#
-# The options that can not be changed like this are:
-#  NUM_TESTS
 #  LOG_FILE
 #  CLEAR_LOG
 #  POWEROFF_ON_SUCCESS
 #  REBOOT_ON_SUCCESS
 #
+# Test specific options are set after the label:
+#
+# TEST_START
+#
+# The options after a TEST_START label are specific to that test.
+# Each TEST_START label will set up a new test. If you want to
+# perform a test more than once, you can add the ITERATE label
+# to it followed by the number of times you want that test
+# to iterate. If the ITERATE is left off, the test will only
+# be performed once.
+#
+# TEST_START ITERATE 10
+#
+# You can skip a test by adding SKIP (before or after the ITERATE
+# and number)
+#
+# TEST_START SKIP
+#
+# TEST_START SKIP ITERATE 10
+#
+# TEST_START ITERATE 10 SKIP
+#
+# The SKIP label causes the options and the test itself to be ignored.
+# This is useful to set up several different tests in one config file, and
+# only enabling the ones you want to use for a current test run.
+#
+# You can add default options anywhere in the file as well
+# with the DEFAULTS tag. This allows you to have default options
+# after the test options to keep the test options at the top
+# of the file. You can even place the DEFAULTS tag between
+# test cases (but not in the middle of a single test case)
+#
+# TEST_START
+# MIN_CONFIG = /home/test/config-test1
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-default
+#
+# TEST_START ITERATE 10
+#
+# The above will run the first test with MIN_CONFIG set to
+# /home/test/config-test-1. Then 10 tests will be executed
+# with MIN_CONFIG with /home/test/config-default.
+#
+# You can also disable defaults with the SKIP option
+#
+# DEFAULTS SKIP
+# MIN_CONFIG = /home/test/config-use-sometimes
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-most-times
+#
+# The above will ignore the first MIN_CONFIG. If you want to
+# use the first MIN_CONFIG, remove the SKIP from the first
+# DEFAULTS tag and add it to the second. Be careful, options
+# may only be declared once per test or default. If you have
+# the same option name under the same test or as default
+# ktest will fail to execute, and no tests will run.
+#
 
-#### Mandatory Config Options ####
+
+#### Mandatory Default Options ####
+
+# These options must be in the default section, although most
+# may be overridden by test options.
 
 # The machine hostname that you will test
 #MACHINE = target
@@ -43,17 +101,21 @@
 #TARGET_IMAGE = /boot/vmlinuz-test
 
 # A script or command to reboot the box
+#
 # Here is a digital loggers power switch example
 #POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
+#
 # Here is an example to reboot a virtual box on the current host
 # with the name "Guest".
-#POWER_CYCLE = virsh list | grep '\<Guest\>' | awk '{printf ("%d", $1)}' | xargs virsh destroy; sleep 5; virsh start Guest
+#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
 
 # The script or command that reads the console
+#
 #  If you use ttywatch server, something like the following would work.
 #CONSOLE = nc -d localhost 3001
+#
 # For a virtual machine with guest name "Guest".
-#CONSOLE =  virsh console `virsh list | grep '\<Guest\>' | awk '{printf ("%d", $1)}'`
+#CONSOLE =  virsh console Guest
 
 # Required version ending to differentiate the test
 # from other linux builds on the system.
@@ -62,8 +124,14 @@
 # The grub title name for the test kernel to boot
 # (Only mandatory if REBOOT_TYPE = grub)
 #
+# Note, ktest.pl will not update the grub menu.lst, you need to
+# manually add an option for the test. ktest.pl will search
+# the grub menu.lst for this option to find what kernel to
+# reboot into.
+#
 # For example, if in the /boot/grub/menu.lst the test kernel title has:
 # title Test Kernel
+# kernel vmlinuz-test
 #GRUB_MENU = Test Kernel
 
 # A script to reboot the target into the test kernel
@@ -72,21 +140,37 @@
 
 #### Optional Config Options (all have defaults) ####
 
-# The number of tests to run (default 5)
-#NUM_TESTS = 5
+# Start a test setup. If you leave this off, all options
+# will be default and the test will run once.
+# This is a label and not really an option (it takes no value).
+# You can append ITERATE and a number after it to iterate the
+# test a number of times, or SKIP to ignore this test.
+#
+#TEST_START
+#TEST_START ITERATE 5
+#TEST_START SKIP
 
 # The default test type (default test)
 # The test types may be:
 #   build - only build the kernel, do nothing else
 #   boot - build and boot the kernel
 #   test - build, boot and if TEST is set, run the test script
+#          (If TEST is not set, it defaults back to boot)
 #   bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
 #   patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
 #TEST_TYPE = test
 
-# The build type is any make config type or a command.
+# Test to run if there is a successful boot and TEST_TYPE is test.
+# Must exit with 0 on success and non zero on error
+# default (undefined)
+#TEST = ssh user@machine /root/run_test
+
+# The build type is any make config type or special command
 #  (default randconfig)
 #   nobuild - skip the clean and build step
+#   useconfig:/path/to/config - use the given config and run
+#              oldconfig on it.
+# This option is ignored if TEST_TYPE is patchcheck or bisect
 #BUILD_TYPE = randconfig
 
 # The make command (default make)
@@ -95,8 +179,14 @@
 
 # If you need an initrd, you can add a script or code here to install
 # it. The environment variable KERNEL_VERSION will be set to the
-# kernel version that is used.
+# kernel version that is used. Remember to add the initrd line
+# to your grub menu.lst file.
+#
+# Here's a couple of examples to use:
 #POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
+#
+# or on some systems:
+#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
 
 # Way to reboot the box to the test kernel.
 # Only valid options so far are "grub" and "script"
@@ -106,12 +196,19 @@
 # and select that target to reboot to the kernel. If this is not
 # your setup, then specify "script" and have a command or script
 # specified in REBOOT_SCRIPT to boot to the target.
+#
+# The entry in /boot/grub/menu.lst must be entered in manually.
+# The test will not modify that file.
 #REBOOT_TYPE = grub
 
-# Line to define success in output. (default "login:")
+# Line to define a successful boot up in console output.
 # This is what the line contains, not the entire line. If you need
-# the entire line to match, then use regural expression syntax like
-#  ^MyBox Login:$
+# the entire line to match, then use regural expression syntax like:
+#  (do not add any quotes around it)
+#
+#  SUCCESS_LINE = ^MyBox Login:$
+#
+# (default "login:")
 #SUCCESS_LINE = login:
 
 # As the test reads the console, after it hits the SUCCESS_LINE
@@ -121,24 +218,33 @@
 #BOOTED_TIMEOUT = 1
 
 # The timeout in seconds when we consider the box hung after
-# the console stop producing output.
+# the console stop producing output. Be sure to leave enough
+# time here to get pass a reboot. Some machines may not produce
+# any console output for a long time during a reboot. You do
+# not want the test to fail just because the system was in
+# the process of rebooting to the test kernel.
 # (default 120)
 #TIMEOUT = 120
 
 # The location on the host where to write temp files
-# (default /tmp/autotest)
-#TMP_DIR = /tmp/autotest
+# (default /tmp/ktest)
+#TMP_DIR = /tmp/ktest
 
 # In between tests, a reboot of the box may occur, and this
 # is the time to wait for the console after it stops producing
 # output. Some machines may not produce a large lag on reboot
 # so this should accommodate it.
+# The difference between this and TIMEOUT, is that TIMEOUT happens
+# when rebooting to the test kernel. This sleep time happens
+# after a test has completed and we are about to start running
+# another test. If a reboot to the reliable kernel happens,
+# we wait SLEEP_TIME for the console to stop producing output
+# before starting the next test.
 # (default 60)
 #SLEEP_TIME = 60
 
 # The time in between bisects to sleep (in seconds)
-# Can be less than SLEEP_TIME since bisects do more work
-# in between boots. (default 60)
+# (default 60)
 #BISECT_SLEEP_TIME = 60
 
 # Build without doing a make mrproper, or removing .config
@@ -149,10 +255,12 @@
 #REBOOT_ON_ERROR = 0
 
 # Power off the target on error (ignored if REBOOT_ON_ERROR is set)
+#  Note, this is a DEFAULT section only option.
 # (default 0)
 #POWEROFF_ON_ERROR = 0
 
 # Power off the target after all tests have completed successfully
+#  Note, this is a DEFAULT section only option.
 # (default 0)
 #POWEROFF_ON_SUCCESS = 0
 
@@ -160,7 +268,7 @@
 # (ignored if POWEROFF_ON_SUCCESS is set)
 #REBOOT_ON_SUCCESS = 1
 
-# In case there's isses with rebooting, you can specify this
+# In case there are isses with rebooting, you can specify this
 # to always powercycle after this amount of time after calling
 # reboot.
 # Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
@@ -190,43 +298,68 @@
 
 # Directory to store failure directories on failure. If this is not
 # set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
-# bootlog.
+# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
+# (default undefined)
 #STORE_FAILURES = /home/test/failures
 
-# A script or command to power off the box (default undef)
+# A script or command to power off the box (default undefined)
 # Needed for POWEROFF_ON_ERROR and SUCCESS
+#
 # Example for digital loggers power switch:
 #POWER_OFF = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
+#
 # Example for a virtual guest call "Guest".
-#POWER_OFF = virsh list | grep '\<GuestF12\>' | awk '{printf ("%d", $1)}' | xargs virsh destroy
+#POWER_OFF = virsh destroy Guest
 
-# Any build options for the make (default "")
+# Any build options for the make of the kernel (not for other makes, like configs)
+# (default "")
 #BUILD_OPTIONS = -j20
 
 # Optional log file to write the status (recommended)
-# (default undef)
+#  Note, this is a DEFAULT section only option.
+# (default undefined)
 #LOG_FILE = /home/test/logfiles/target.log
 
 # Remove old logfile if it exists before starting all tests.
+#  Note, this is a DEFAULT section only option.
 # (default 0)
 #CLEAR_LOG = 0
 
-# Test to run if there is a successful boot and TEST_TYPE is test.
-# Must exit with 0 on success and non zero on error
-# default (undef)
-#TEST = ssh user@machine /root/run_test
-#TEST[1] = ssh root@mxtest /root/run_test
-
 # The min config that is needed to build for the machine
-# A nice way to get this to work, is to do a "lsmod > mymods" on the target
-# copy it to the build server, and then run "make LSMOD=mymods localyesconfig".
-# Then copy all the options that are set: "grep '^CONFIG' > /home/test/config-min"
+# A nice way to create this is with the following:
 #
-#  You might want to set:
+#   $ ssh target
+#   $ lsmod > mymods
+#   $ scp mymods host:/tmp
+#   $ exit
+#   $ cd linux.git
+#   $ rm .config
+#   $ make LSMOD=mymods localyesconfig
+#   $ grep '^CONFIG' .config > /home/test/config-min
+#
+# If you want even less configs:
+#
+#   log in directly to target (do not ssh)
+#
+#   $ su
+#   # lsmod | cut -d' ' -f1 | xargs rmmod
+#
+#   repeat the above several times
+#
+#   # lsmod > mymods
+#   # reboot
+#
+# May need to reboot to get your network back to copy the mymods
+# to the host, and then remove the previous .config and run the
+# localyesconfig again. The CONFIG_MIN generated like this will
+# not guarantee network activity to the box so the TEST_TYPE of
+# test may fail.
+#
+# You might also want to set:
 #   CONFIG_CMDLINE="<your options here>"
 #  randconfig may set the above and override your real command
 #  line options.
-# (default undef)
+# (default undefined)
 #MIN_CONFIG = /home/test/config-min
 
 # Sometimes there's options that just break the boot and
@@ -239,34 +372,47 @@
 #  KGDB may cause oops waiting for a connection that's not there.
 # This option points to the file containing config options that will be prepended
 # to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
-# before running it through randconfig
-# (default undef)
+#
+# Note, config options in MIN_CONFIG will override these options.
+#
+# (default undefined)
 #ADD_CONFIG = /home/test/config-broken
 
 #### Per test run options ####
-# These are options are per build only. The only exist with the [x]
-# syntax, and there is no general option.
+# The following options are only allowed in TEST_START sections.
+# They are ignored in the DEFAULTS sections.
 #
-# All are optional and undef by default
+# All of these are optional and undefined by default, although
+#  some of these options are required for TEST_TYPE of patchcheck
+#  and bisect.
 #
-# CHECKOUT[x] = branch
+#
+# CHECKOUT = branch
 #
 #  If the BUILD_DIR is a git repository, then you can set this option
 #  to checkout the given branch before running the TEST. If you
 #  specify this for the first run, that branch will be used for
-#  all preceding tests until a new CHECKOUT[x] is set.
+#  all preceding tests until a new CHECKOUT is set.
 #
-# For TEST_TYPE[x] = patchcheck
+#
+#
+# For TEST_TYPE = patchcheck
 #
 #  This expects the BUILD_DIR to be a git repository, and
-#  will checkout the PATCHCHECK_START[x].
+#  will checkout the PATCHCHECK_START commit.
 #
-#  PATCHCHECK_START[x] is required and is the first patch to
-#   test (the SHA1 of the commit).
+#  The option BUILD_TYPE will be ignored.
 #
-#  PATCHCHECK_END[x] is the last patch to check (default HEAD)
+#  The MIN_CONFIG will be used for all builds of the patchcheck. The build type
+#  used for patchcheck is oldconfig.
 #
-#  PATCHCHECK_TYPE[x] is required and is the type of test to run:
+#  PATCHCHECK_START is required and is the first patch to
+#   test (the SHA1 of the commit). You may also specify anything
+#   that git checkout allows (branch name, tage, HEAD~3).
+#
+#  PATCHCHECK_END is the last patch to check (default HEAD)
+#
+#  PATCHCHECK_TYPE is required and is the type of test to run:
 #      build, boot, test.
 #
 #   Note, the build test will look for warnings, if a warning occurred
@@ -279,75 +425,86 @@
 #   make mrproper. This helps speed up the test.
 #
 # Example:
-#   TEST_TYPE[1] = patchcheck
-#   CHECKOUT[1] = mybranch
-#   PATCHCHECK_TYPE[1] = boot
-#   PATCHCHECK_START[1] = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
-#   PATCHCHEKC_END[1] = b8b2663bd7c9da04ac804659b9f617c199d0252c
+#   TEST_START
+#   TEST_TYPE = patchcheck
+#   CHECKOUT = mybranch
+#   PATCHCHECK_TYPE = boot
+#   PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
+#   PATCHCHEKC_END = HEAD~2
 #
 #
-# For TEST_TYPE[x] = bisect
 #
-# You can specify a git bisect if the BUILD_DIR is a git repository.
-# The MIN_CONFIG will be used for all builds of the bisect. The build type
-# used for bisecting is oldconfig.
+# For TEST_TYPE = bisect
 #
-# BISECT_TYPE[x] is the type of test to perform:
+#  You can specify a git bisect if the BUILD_DIR is a git repository.
+#  The MIN_CONFIG will be used for all builds of the bisect. The build type
+#  used for bisecting is oldconfig.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  BISECT_TYPE is the type of test to perform:
 #	build	- bad fails to build
 #	boot	- bad builds but fails to boot
 #	test	- bad boots but fails a test
 #
-# BISECT_GOOD[x] is the commit (SHA1) to label as good
-# BISECT_BAD[x] is the commit to label as bad
+# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
+# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
 #
 # The above three options are required for a bisect operation.
 #
-# BISECT_REPLAY[x] = /path/to/replay/file (optional, default undefined)
+# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
 #
 #   If an operation failed in the bisect that was not expected to
 #   fail. Then the test ends. The state of the BUILD_DIR will be
-#   left off at where the failur occurred. You can examine the
+#   left off at where the failure occurred. You can examine the
 #   reason for the failure, and perhaps even find a git commit
 #   that would work to continue with. You can run:
 #
 #   git bisect log > /path/to/replay/file
 #
-#   and if BISECT_REPLAY[x] is set, the test will run git bisect replay
-#   before continuing with the bisect.
+#   The adding:
 #
-# BISECT_START[x] = commit (optional, default undefined)
+#    BISECT_REPLAY= /path/to/replay/file
 #
-#   As with BISECT_REPLAY[x], if the test failed on a commit that
+#   And running the test again. The test will perform the initial
+#    git bisect start, git bisect good, and git bisect bad, and
+#    then it will run git bisect replay on this file, before
+#    continuing with the bisect.
+#
+# BISECT_START = commit (optional, default undefined)
+#
+#   As with BISECT_REPLAY, if the test failed on a commit that
 #   just happen to have a bad commit in the middle of the bisect,
-#   and you need to skip it. If BISECT_START[x] is defined, it
-#   will checkout that commit before continuing with the bisect.
+#   and you need to skip it. If BISECT_START is defined, it
+#   will checkout that commit after doing the initial git bisect start,
+#   git bisect good, git bisect bad, and running the git bisect replay
+#   if the BISECT_REPLAY is set.
 #
-#   Note, BISECT_REPLAY[x] is executed before BISECT_START[x].
-#
-# BISECT_REVERSE[x] = 1 (optional, default 0)
+# BISECT_REVERSE = 1 (optional, default 0)
 #
 #   In those strange instances where it was broken forever
 #   and you are trying to find where it started to work!
-#   Set BISECT_GOOD[x] to the commit that was last known to fail
-#   Set BISECT_BAD[x] to the commit that is known where it started
-#   to work. With BISECT_REVERSE[x] = 1, The test will consider
-#   failures as good, and success as bad.
+#   Set BISECT_GOOD to the commit that was last known to fail
+#   Set BISECT_BAD to the commit that is known to start working.
+#   With BISECT_REVERSE = 1, The test will consider failures as
+#   good, and success as bad.
 #
-# BISECT_CHECK[x] = 1 (optional, default 0)
+# BISECT_CHECK = 1 (optional, default 0)
 #
 #   Just to be sure the good is good and bad is bad, setting
-#   BISECT_CHECK[x] to 1 will start the bisect by first checking
-#   out BISECT_BAD[x] and makes sure it fails, then it will check
-#   out BISECT_GOOD[x] and makes sure it succeeds before starting
-#   the bisect (it works for BISECT_REVERSE[x] too).
+#   BISECT_CHECK to 1 will start the bisect by first checking
+#   out BISECT_BAD and makes sure it fails, then it will check
+#   out BISECT_GOOD and makes sure it succeeds before starting
+#   the bisect (it works for BISECT_REVERSE too).
 #
-#   You can limit the test to just check BISECT_GOOD[x] or
-#   BISECT_BAD[x] with BISECT_CHECK[x] = good or
-#   BISECT_CHECK[x] = bad, respectively.
+#   You can limit the test to just check BISECT_GOOD or
+#   BISECT_BAD with BISECT_CHECK = good or
+#   BISECT_CHECK = bad, respectively.
 #
 # Example:
-#   TEST_TYPE[1] = bisect
-#   BISECT_GOOD[1] = v2.6.36
-#   BISECT_BAD[1] = b5153163ed580e00c67bdfecb02b2e3843817b3e
-#   BISECT_TYPE[1] = build
-#   MIN_CONFIG[1] = /home/test/config-bisect
+#   TEST_START
+#   TEST_TYPE = bisect
+#   BISECT_GOOD = v2.6.36
+#   BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
+#   BISECT_TYPE = build
+#   MIN_CONFIG = /home/test/config-bisect