this is a library of test tools ported from an older project, released under Apache license version 2.0.
kona \
documentation huffware hypermedia \
production \
- experiments
+ testkit \
+ experiments
LAST_TARGETS += end_make
include rules.def
##############
# set the SHUNIT_PATH so our shunit tests can find the codebase.
- define_yeti_variable SHUNIT_PATH="$FEISTY_MEOW_SCRIPTS/shunit"
+ define_yeti_variable SHUNIT_PATH="$FEISTY_MEOW_SCRIPTS/testkit/shunit"
##############
+++ /dev/null
-#!/bin/bash
-
-# An example of using shunit2.
-#
-# Author: Chris Koeritz
-# license gnu gpl v3
-
-export THISDIR="$( \cd "$(\dirname "$0")" && /bin/pwd )" # obtain the script's working directory.
-if [[ ! "$0" =~ ^/.* ]]; then
- # re-run the script with an absolute path if it didn't start out that way; otherwise,
- # shunit is not happy with finding the script.
- exec "$THISDIR/$(basename $0)" $*
-fi
-cd $THISDIR
-
-oneTimeSetUp()
-{
- echo "into oneTimeSetUp."
-}
-
-testOneThing()
-{
- echo "got to primary test case."
- zero=0
- assertEquals "zero should be equal to 0" 0 $zero
- echo "passed tautological test."
- sleep_time=83
- echo "$(date): now sleeping for $sleep_time seconds."
- sleep $sleep_time
- echo "$(date): woke up."
-}
-
-oneTimeTearDown()
-{
- echo "into oneTimeTearDown."
-}
-
-# load and run shUnit2
-source $SHUNIT_PATH/shunit2
-
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2 335 2011-05-01 20:10:33Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-#
-# shUnit2 -- Unit testing framework for Unix shell scripts.
-# http://code.google.com/p/shunit2/
-#
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is
-# based on the popular JUnit unit testing framework for Java.
-
-# return if shunit already loaded
-[ -n "${SHUNIT_VERSION:-}" ] && exit 0
-
-SHUNIT_VERSION='2.1.6'
-
-SHUNIT_TRUE=0
-SHUNIT_FALSE=1
-SHUNIT_ERROR=2
-
-# enable strict mode by default
-SHUNIT_STRICT=${SHUNIT_STRICT:-${SHUNIT_TRUE}}
-
-_shunit_warn() { echo "shunit2:WARN $@" >&2; }
-_shunit_error() { echo "shunit2:ERROR $@" >&2; }
-_shunit_fatal() { echo "shunit2:FATAL $@" >&2; exit ${SHUNIT_ERROR}; }
-
-# specific shell checks
-if [ -n "${ZSH_VERSION:-}" ]; then
- setopt |grep "^shwordsplit$" >/dev/null
- if [ $? -ne ${SHUNIT_TRUE} ]; then
- _shunit_fatal 'zsh shwordsplit option is required for proper operation'
- fi
- if [ -z "${SHUNIT_PARENT:-}" ]; then
- _shunit_fatal "zsh does not pass \$0 through properly. please declare \
-\"SHUNIT_PARENT=\$0\" before calling shUnit2"
- fi
-fi
-
-#
-# constants
-#
-
-__SHUNIT_ASSERT_MSG_PREFIX='ASSERT:'
-__SHUNIT_MODE_SOURCED='sourced'
-__SHUNIT_MODE_STANDALONE='standalone'
-__SHUNIT_PARENT=${SHUNIT_PARENT:-$0}
-
-# set the constants readonly
-shunit_constants_=`set |grep '^__SHUNIT_' |cut -d= -f1`
-echo "${shunit_constants_}" |grep '^Binary file' >/dev/null && \
- shunit_constants_=`set |grep -a '^__SHUNIT_' |cut -d= -f1`
-for shunit_constant_ in ${shunit_constants_}; do
- shunit_ro_opts_=''
- case ${ZSH_VERSION:-} in
- '') ;; # this isn't zsh
- [123].*) ;; # early versions (1.x, 2.x, 3.x)
- *) shunit_ro_opts_='-g' ;; # all later versions. declare readonly globally
- esac
- readonly ${shunit_ro_opts_} ${shunit_constant_}
-done
-unset shunit_constant_ shunit_constants_ shunit_ro_opts_
-
-# variables
-__shunit_lineno='' # line number of executed test
-__shunit_mode=${__SHUNIT_MODE_SOURCED} # operating mode
-__shunit_reportGenerated=${SHUNIT_FALSE} # is report generated
-__shunit_script='' # filename of unittest script (standalone mode)
-__shunit_skip=${SHUNIT_FALSE} # is skipping enabled
-__shunit_suite='' # suite of tests to execute
-
-# counts of tests
-__shunit_testSuccess=${SHUNIT_TRUE}
-__shunit_testsTotal=0
-__shunit_testsPassed=0
-__shunit_testsFailed=0
-
-# counts of asserts
-__shunit_assertsTotal=0
-__shunit_assertsPassed=0
-__shunit_assertsFailed=0
-__shunit_assertsSkipped=0
-
-# macros
-_SHUNIT_LINENO_='eval __shunit_lineno=""; if [ "${1:-}" = "--lineno" ]; then [ -n "$2" ] && __shunit_lineno="[$2] "; shift 2; fi'
-
-#-----------------------------------------------------------------------------
-# assert functions
-#
-
-# Assert that two values are equal to one another.
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertEquals()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "assertEquals() requires two or three arguments; $# given"
- _shunit_error "1: ${1:+$1} 2: ${2:+$2} 3: ${3:+$3}${4:+ 4: $4}"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- local assertion_name=""
- if [ $# -eq 3 ]; then
- assertion_name="$1"
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_expected_=$1
- shunit_actual_=$2
-
- shunit_return=${SHUNIT_TRUE}
- if [ "${shunit_expected_}" = "${shunit_actual_}" ]; then
- if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
- echo " OKAY: $assertion_name"
- fi
- _shunit_assertPass
- else
- failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}"
- shunit_return=${SHUNIT_FALSE}
- fi
-
- unset shunit_message_ shunit_expected_ shunit_actual_
- return ${shunit_return}
-}
-_ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"'
-
-# Assert that two values are not equal to one another.
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertNotEquals()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "assertNotEquals() requires two or three arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- local assertion_name=""
- if [ $# -eq 3 ]; then
- assertion_name="$1"
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_expected_=$1
- shunit_actual_=$2
-
- shunit_return=${SHUNIT_TRUE}
- if [ "${shunit_expected_}" != "${shunit_actual_}" ]; then
- if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
- echo " OKAY: $assertion_name"
- fi
- _shunit_assertPass
- else
- failSame "${shunit_message_}" "$@"
- shunit_return=${SHUNIT_FALSE}
- fi
-
- unset shunit_message_ shunit_expected_ shunit_actual_
- return ${shunit_return}
-}
-_ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"'
-
-# Assert that a value is null (i.e. an empty string)
-#
-# Args:
-# message: string: failure message [optional]
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertNull()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 1 -o $# -gt 2 ]; then
- _shunit_error "assertNull() requires one or two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 2 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- assertTrue "${shunit_message_}" "[ -z '$1' ]"
- shunit_return=$?
-
- unset shunit_message_
- return ${shunit_return}
-}
-_ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"'
-
-# Assert that a value is not null (i.e. a non-empty string)
-#
-# Args:
-# message: string: failure message [optional]
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertNotNull()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -gt 2 ]; then # allowing 0 arguments as $1 might actually be null
- _shunit_error "assertNotNull() requires one or two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 2 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_actual_=`_shunit_escapeCharactersInString "${1:-}"`
- test -n "${shunit_actual_}"
- assertTrue "${shunit_message_}" $?
- shunit_return=$?
-
- unset shunit_actual_ shunit_message_
- return ${shunit_return}
-}
-_ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"'
-
-# Assert that two values are the same (i.e. equal to one another).
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertSame()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "assertSame() requires two or three arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 3 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- assertEquals "${shunit_message_}" "$1" "$2"
- shunit_return=$?
-
- unset shunit_message_
- return ${shunit_return}
-}
-_ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"'
-
-# Assert that two values are not the same (i.e. not equal to one another).
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertNotSame()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "assertNotSame() requires two or three arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 3 ]; then
- shunit_message_="${shunit_message_:-}$1"
- shift
- fi
- assertNotEquals "${shunit_message_}" "$1" "$2"
- shunit_return=$?
-
- unset shunit_message_
- return ${shunit_return}
-}
-_ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"'
-
-# Assert that a value or shell test condition is true.
-#
-# In shell, a value of 0 is true and a non-zero value is false. Any integer
-# value passed can thereby be tested.
-#
-# Shell supports much more complicated tests though, and a means to support
-# them was needed. As such, this function tests that conditions are true or
-# false through evaluation rather than just looking for a true or false.
-#
-# The following test will succeed:
-# assertTrue 0
-# assertTrue "[ 34 -gt 23 ]"
-# The folloing test will fail with a message:
-# assertTrue 123
-# assertTrue "test failed" "[ -r '/non/existant/file' ]"
-#
-# Args:
-# message: string: failure message [optional]
-# condition: string: integer value or shell conditional statement
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertTrue()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -gt 2 ]; then
- _shunit_error "assertTrue() takes one two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- local assertion_name=""
- if [ $# -eq 2 ]; then
- assertion_name="$1"
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_condition_=$1
-
- # see if condition is an integer, i.e. a return value
- shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
- shunit_return=${SHUNIT_TRUE}
- if [ -z "${shunit_condition_}" ]; then
- # null condition
- shunit_return=${SHUNIT_FALSE}
- elif [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
- then
- # possible return value. treating 0 as true, and non-zero as false.
- [ ${shunit_condition_} -ne 0 ] && shunit_return=${SHUNIT_FALSE}
- else
- # (hopefully) a condition
- ( eval ${shunit_condition_} ) >/dev/null 2>&1
- [ $? -ne 0 ] && shunit_return=${SHUNIT_FALSE}
- fi
-
- # record the test
- if [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then
- if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
- echo " OKAY: $assertion_name"
- fi
- _shunit_assertPass
- else
- _shunit_assertFail "${shunit_message_}"
- fi
-
- unset shunit_message_ shunit_condition_ shunit_match_
- return ${shunit_return}
-}
-_ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"'
-
-# Assert that a value or shell test condition is false.
-#
-# In shell, a value of 0 is true and a non-zero value is false. Any integer
-# value passed can thereby be tested.
-#
-# Shell supports much more complicated tests though, and a means to support
-# them was needed. As such, this function tests that conditions are true or
-# false through evaluation rather than just looking for a true or false.
-#
-# The following test will succeed:
-# assertFalse 1
-# assertFalse "[ 'apples' = 'oranges' ]"
-# The folloing test will fail with a message:
-# assertFalse 0
-# assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]"
-#
-# Args:
-# message: string: failure message [optional]
-# condition: string: integer value or shell conditional statement
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-assertFalse()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 1 -o $# -gt 2 ]; then
- _shunit_error "assertFalse() quires one or two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- local assertion_name=""
- if [ $# -eq 2 ]; then
- assertion_name="$1"
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_condition_=$1
-
- # see if condition is an integer, i.e. a return value
- shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
- shunit_return=${SHUNIT_TRUE}
- if [ -z "${shunit_condition_}" ]; then
- # null condition
- shunit_return=${SHUNIT_FALSE}
- elif [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
- then
- # possible return value. treating 0 as true, and non-zero as false.
- [ ${shunit_condition_} -eq 0 ] && shunit_return=${SHUNIT_FALSE}
- else
- # (hopefully) a condition
- ( eval ${shunit_condition_} ) >/dev/null 2>&1
- [ $? -eq 0 ] && shunit_return=${SHUNIT_FALSE}
- fi
-
- # record the test
- if [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then
- if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
- echo " OKAY: $assertion_name"
- fi
- _shunit_assertPass
- else
- _shunit_assertFail "${shunit_message_}"
- fi
-
- unset shunit_message_ shunit_condition_ shunit_match_
- return ${shunit_return}
-}
-_ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"'
-
-#-----------------------------------------------------------------------------
-# failure functions
-#
-
-# Records a test failure.
-#
-# Args:
-# message: string: failure message [optional]
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-fail()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -gt 1 ]; then
- _shunit_error "fail() requires zero or one arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 1 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
-
- _shunit_assertFail "${shunit_message_}"
-
- unset shunit_message_
- return ${SHUNIT_FALSE}
-}
-_FAIL_='eval fail --lineno "${LINENO:-}"'
-
-# Records a test failure, stating two values were not equal.
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-failNotEquals()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "failNotEquals() requires one or two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 3 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- shunit_expected_=$1
- shunit_actual_=$2
-
- _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>"
-
- unset shunit_message_ shunit_expected_ shunit_actual_
- return ${SHUNIT_FALSE}
-}
-_FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"'
-
-# Records a test failure, stating two values should have been the same.
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-failSame()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "failSame() requires two or three arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 3 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
-
- _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same"
-
- unset shunit_message_
- return ${SHUNIT_FALSE}
-}
-_FAIL_SAME_='eval failSame --lineno "${LINENO:-}"'
-
-# Records a test failure, stating two values were not equal.
-#
-# This is functionally equivalent to calling failNotEquals().
-#
-# Args:
-# message: string: failure message [optional]
-# expected: string: expected value
-# actual: string: actual value
-# Returns:
-# integer: success (TRUE/FALSE/ERROR constant)
-failNotSame()
-{
- ${_SHUNIT_LINENO_}
- if [ $# -lt 2 -o $# -gt 3 ]; then
- _shunit_error "failNotEquals() requires one or two arguments; $# given"
- return ${SHUNIT_ERROR}
- fi
- _shunit_shouldSkip && return ${SHUNIT_TRUE}
-
- shunit_message_=${__shunit_lineno}
- if [ $# -eq 3 ]; then
- shunit_message_="${shunit_message_}$1"
- shift
- fi
- failNotEquals "${shunit_message_}" "$1" "$2"
- shunit_return=$?
-
- unset shunit_message_
- return ${shunit_return}
-}
-_FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"'
-
-#-----------------------------------------------------------------------------
-# skipping functions
-#
-
-# Force remaining assert and fail functions to be "skipped".
-#
-# This function forces the remaining assert and fail functions to be "skipped",
-# i.e. they will have no effect. Each function skipped will be recorded so that
-# the total of asserts and fails will not be altered.
-#
-# Args:
-# None
-startSkipping()
-{
- __shunit_skip=${SHUNIT_TRUE}
-}
-
-# Resume the normal recording behavior of assert and fail calls.
-#
-# Args:
-# None
-endSkipping()
-{
- __shunit_skip=${SHUNIT_FALSE}
-}
-
-# Returns the state of assert and fail call skipping.
-#
-# Args:
-# None
-# Returns:
-# boolean: (TRUE/FALSE constant)
-isSkipping()
-{
- return ${__shunit_skip}
-}
-
-#-----------------------------------------------------------------------------
-# suite functions
-#
-
-# Stub. This function should contains all unit test calls to be made.
-#
-# DEPRECATED (as of 2.1.0)
-#
-# This function can be optionally overridden by the user in their test suite.
-#
-# If this function exists, it will be called when shunit2 is sourced. If it
-# does not exist, shunit2 will search the parent script for all functions
-# beginning with the word 'test', and they will be added dynamically to the
-# test suite.
-#
-# This function should be overridden by the user in their unit test suite.
-# Note: see _shunit_mktempFunc() for actual implementation
-#
-# Args:
-# None
-#suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION
-
-# Adds a function name to the list of tests schedule for execution.
-#
-# This function should only be called from within the suite() function.
-#
-# Args:
-# function: string: name of a function to add to current unit test suite
-suite_addTest()
-{
- shunit_func_=${1:-}
-
- __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}"
- __shunit_testsTotal=`expr ${__shunit_testsTotal} + 1`
-
- unset shunit_func_
-}
-
-# Stub. This function will be called once before any tests are run.
-#
-# Common one-time environment preparation tasks shared by all tests can be
-# defined here.
-#
-# This function should be overridden by the user in their unit test suite.
-# Note: see _shunit_mktempFunc() for actual implementation
-#
-# Args:
-# None
-#oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION
-
-# Stub. This function will be called once after all tests are finished.
-#
-# Common one-time environment cleanup tasks shared by all tests can be defined
-# here.
-#
-# This function should be overridden by the user in their unit test suite.
-# Note: see _shunit_mktempFunc() for actual implementation
-#
-# Args:
-# None
-#oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
-
-# Stub. This function will be called before each test is run.
-#
-# Common environment preparation tasks shared by all tests can be defined here.
-#
-# This function should be overridden by the user in their unit test suite.
-# Note: see _shunit_mktempFunc() for actual implementation
-#
-# Args:
-# None
-#setUp() { :; }
-
-# Note: see _shunit_mktempFunc() for actual implementation
-# Stub. This function will be called after each test is run.
-#
-# Common environment cleanup tasks shared by all tests can be defined here.
-#
-# This function should be overridden by the user in their unit test suite.
-# Note: see _shunit_mktempFunc() for actual implementation
-#
-# Args:
-# None
-#tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
-
-#------------------------------------------------------------------------------
-# internal shUnit2 functions
-#
-
-# Create a temporary directory to store various run-time files in.
-#
-# This function is a cross-platform temporary directory creation tool. Not all
-# OSes have the mktemp function, so one is included here.
-#
-# Args:
-# None
-# Outputs:
-# string: the temporary directory that was created
-_shunit_mktempDir()
-{
- # try the standard mktemp function
- ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ) && return
-
- # the standard mktemp didn't work. doing our own.
- if [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then
- _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 </dev/urandom \
- |sed 's/^[^0-9a-f]*//'`
- elif [ -n "${RANDOM:-}" ]; then
- # $RANDOM works
- _shunit_random_=${RANDOM}${RANDOM}${RANDOM}$$
- else
- # $RANDOM doesn't work
- _shunit_date_=`date '+%Y%m%d%H%M%S'`
- _shunit_random_=`expr ${_shunit_date_} / $$`
- fi
-
- _shunit_tmpDir_="${TMPDIR:-/tmp}/shunit.${_shunit_random_}"
- ( umask 077 && mkdir "${_shunit_tmpDir_}" ) || \
- _shunit_fatal 'could not create temporary directory! exiting'
-
- echo ${_shunit_tmpDir_}
- unset _shunit_date_ _shunit_random_ _shunit_tmpDir_
-}
-
-# This function is here to work around issues in Cygwin.
-#
-# Args:
-# None
-_shunit_mktempFunc()
-{
- for _shunit_func_ in oneTimeSetUp oneTimeTearDown setUp tearDown suite noexec
- do
- _shunit_file_="${__shunit_tmpDir}/${_shunit_func_}"
- cat <<EOF >"${_shunit_file_}"
-#! /bin/sh
-exit ${SHUNIT_TRUE}
-EOF
- chmod +x "${_shunit_file_}"
- done
-
- unset _shunit_file_
-}
-
-# Final cleanup function to leave things as we found them.
-#
-# Besides removing the temporary directory, this function is in charge of the
-# final exit code of the unit test. The exit code is based on how the script
-# was ended (e.g. normal exit, or via Ctrl-C).
-#
-# Args:
-# name: string: name of the trap called (specified when trap defined)
-_shunit_cleanup()
-{
- _shunit_name_=$1
-
- case ${_shunit_name_} in
- EXIT) _shunit_signal_=0 ;;
- INT) _shunit_signal_=2 ;;
- TERM) _shunit_signal_=15 ;;
- *)
- _shunit_warn "unrecognized trap value (${_shunit_name_})"
- _shunit_signal_=0
- ;;
- esac
-
- # do our work
- rm -fr "${__shunit_tmpDir}"
-
- # exit for all non-EXIT signals
- if [ ${_shunit_name_} != 'EXIT' ]; then
- _shunit_warn "trapped and now handling the (${_shunit_name_}) signal"
- # disable EXIT trap
- trap 0
- # add 128 to signal and exit
- exit `expr ${_shunit_signal_} + 128`
- elif [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ] ; then
- _shunit_assertFail 'Unknown failure encountered running a test'
- _shunit_generateReport
- exit ${SHUNIT_ERROR}
- fi
-
- unset _shunit_name_ _shunit_signal_
-}
-
-# The actual running of the tests happens here.
-#
-# Args:
-# None
-_shunit_execSuite()
-{
- for _shunit_test_ in ${__shunit_suite}; do
- __shunit_testSuccess=${SHUNIT_TRUE}
-
- # disable skipping
- endSkipping
-
- # execute the per-test setup function
- setUp
-
- # execute the test
- echo
- echo
- echo "-------------------------------------------------------"
- echo "$(date): [${_shunit_test_}]"
- eval ${_shunit_test_}
-
- # execute the per-test tear-down function
- tearDown
-
- # update stats
- if [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then
- __shunit_testsPassed=`expr ${__shunit_testsPassed} + 1`
- else
- __shunit_testsFailed=`expr ${__shunit_testsFailed} + 1`
- fi
- done
-
- unset _shunit_test_
-}
-
-# Generates the user friendly report with appropriate OKAY/FAILED message.
-#
-# Args:
-# None
-# Output:
-# string: the report of successful and failed tests, as well as totals.
-_shunit_generateReport()
-{
- _shunit_ok_=${SHUNIT_TRUE}
-
- # if no exit code was provided one, determine an appropriate one
- [ ${__shunit_testsFailed} -gt 0 \
- -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ] \
- && _shunit_ok_=${SHUNIT_FALSE}
-
- echo
- if [ ${__shunit_testsTotal} -eq 1 ]; then
- echo "$(date): Ran ${__shunit_testsTotal} test."
- else
- echo "$(date): Ran ${__shunit_testsTotal} tests."
- fi
-
- _shunit_failures_=''
- _shunit_skipped_=''
- [ ${__shunit_assertsFailed} -gt 0 ] \
- && _shunit_failures_="failures=${__shunit_assertsFailed}"
- [ ${__shunit_assertsSkipped} -gt 0 ] \
- && _shunit_skipped_="skipped=${__shunit_assertsSkipped}"
-
- if [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then
- _shunit_msg_="$(basename $0) PASSED 100% OKAY"
- [ -n "${_shunit_skipped_}" ] \
- && _shunit_msg_="${_shunit_msg_} (${_shunit_skipped_})"
- else
- _shunit_msg_="$(basename $0) FAILED (${_shunit_failures_}"
- [ -n "${_shunit_skipped_}" ] \
- && _shunit_msg_="${_shunit_msg_},${_shunit_skipped_}"
- _shunit_msg_="${_shunit_msg_})"
- fi
-
- if [ -z "$suite_end" ]; then
- # make sure we don't get confused, since suite aborted early.
- suite_end=$(date +%s)
- fi
- # we keep duration_s for later printing.
- duration_s=$(($suite_end - $suite_start))
- # calculate full minutes count based on seconds.
- duration_m=$(($duration_s / 60))
- # calculate how many hours that is.
- duration_h=$(($duration_m / 60))
- # fix the minutes since we chopped those hours out.
- duration_m=$(($duration_m - $duration_h * 60))
- if [ $duration_m -lt 10 ]; then duration_m="0$duration_m"; fi
- if [ $duration_h -lt 10 ]; then duration_h="0$duration_h"; fi
- echo "Test suite ran for $duration_s total seconds [$duration_h:$duration_m hh:mm]"
- echo
- echo "$(date): ${_shunit_msg_}"
- __shunit_reportGenerated=${SHUNIT_TRUE}
-
- unset _shunit_failures_ _shunit_msg_ _shunit_ok_ _shunit_skipped_
-}
-
-# Test for whether a function should be skipped.
-#
-# Args:
-# None
-# Returns:
-# boolean: whether the test should be skipped (TRUE/FALSE constant)
-_shunit_shouldSkip()
-{
- [ ${__shunit_skip} -eq ${SHUNIT_FALSE} ] && return ${SHUNIT_FALSE}
- _shunit_assertSkip
-}
-
-# Records a successful test.
-#
-# Args:
-# None
-_shunit_assertPass()
-{
- __shunit_assertsPassed=`expr ${__shunit_assertsPassed} + 1`
- __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
-}
-
-# Records a test failure.
-#
-# Args:
-# message: string: failure message to provide user
-_shunit_assertFail()
-{
- _shunit_msg_=$1
-
- __shunit_testSuccess=${SHUNIT_FALSE}
- __shunit_assertsFailed=`expr ${__shunit_assertsFailed} + 1`
- __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
- echo "${__SHUNIT_ASSERT_MSG_PREFIX}${_shunit_msg_}"
-
- unset _shunit_msg_
-}
-
-# Records a skipped test.
-#
-# Args:
-# None
-_shunit_assertSkip()
-{
- __shunit_assertsSkipped=`expr ${__shunit_assertsSkipped} + 1`
- __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
-}
-
-# Prepare a script filename for sourcing.
-#
-# Args:
-# script: string: path to a script to source
-# Returns:
-# string: filename prefixed with ./ (if necessary)
-_shunit_prepForSourcing()
-{
- _shunit_script_=$1
- case "${_shunit_script_}" in
- /*|./*) echo "${_shunit_script_}" ;;
- *) echo "./${_shunit_script_}" ;;
- esac
- unset _shunit_script_
-}
-
-# Escape a character in a string.
-#
-# Args:
-# c: string: unescaped character
-# s: string: to escape character in
-# Returns:
-# string: with escaped character(s)
-_shunit_escapeCharInStr()
-{
- [ -n "$2" ] || return # no point in doing work on an empty string
-
- # Note: using shorter variable names to prevent conflicts with
- # _shunit_escapeCharactersInString().
- _shunit_c_=$1
- _shunit_s_=$2
-
-
- # escape the character
- echo ''${_shunit_s_}'' |sed 's/\'${_shunit_c_}'/\\\'${_shunit_c_}'/g'
-
- unset _shunit_c_ _shunit_s_
-}
-
-# Escape a character in a string.
-#
-# Args:
-# str: string: to escape characters in
-# Returns:
-# string: with escaped character(s)
-_shunit_escapeCharactersInString()
-{
- [ -n "$1" ] || return # no point in doing work on an empty string
-
- _shunit_str_=$1
-
- # Note: using longer variable names to prevent conflicts with
- # _shunit_escapeCharInStr().
- for _shunit_char_ in '"' '$' "'" '`'; do
- _shunit_str_=`_shunit_escapeCharInStr "${_shunit_char_}" "${_shunit_str_}"`
- done
-
- echo "${_shunit_str_}"
- unset _shunit_char_ _shunit_str_
-}
-
-# Extract list of functions to run tests against.
-#
-# Args:
-# script: string: name of script to extract functions from
-# Returns:
-# string: of function names
-_shunit_extractTestFunctions()
-{
- _shunit_script_=$1
-
- # extract the lines with test function names, strip of anything besides the
- # function name, and output everything on a single line.
- _shunit_regex_='^[ ]*(function )*test[A-Za-z0-9_]* *\(\)'
- egrep "${_shunit_regex_}" "${_shunit_script_}" \
- |sed 's/^[^A-Za-z0-9_]*//;s/^function //;s/\([A-Za-z0-9_]*\).*/\1/g' \
- |xargs
-
- unset _shunit_regex_ _shunit_script_
-}
-
-#------------------------------------------------------------------------------
-# main
-#
-
-# determine the operating mode
-if [ $# -eq 0 ]; then
- __shunit_script=${__SHUNIT_PARENT}
- __shunit_mode=${__SHUNIT_MODE_SOURCED}
-else
- __shunit_script=$1
- [ -r "${__shunit_script}" ] || \
- _shunit_fatal "unable to read from ${__shunit_script}"
- __shunit_mode=${__SHUNIT_MODE_STANDALONE}
-fi
-
-# create a temporary storage location
-__shunit_tmpDir=`_shunit_mktempDir`
-
-# provide a public temporary directory for unit test scripts
-# TODO(kward): document this
-SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp"
-mkdir "${SHUNIT_TMPDIR}"
-
-# setup traps to clean up after ourselves
-trap '_shunit_cleanup EXIT' 0
-trap '_shunit_cleanup INT' 2
-trap '_shunit_cleanup TERM' 15
-
-# create phantom functions to work around issues with Cygwin
-_shunit_mktempFunc
-PATH="${__shunit_tmpDir}:${PATH}"
-
-# make sure phantom functions are executable. this will bite if /tmp (or the
-# current $TMPDIR) points to a path on a partition that was mounted with the
-# 'noexec' option. the noexec command was created with _shunit_mktempFunc().
-noexec 2>/dev/null || _shunit_fatal \
- 'please declare TMPDIR with path on partition with exec permission'
-
-# we must manually source the tests in standalone mode
-if [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then
- . "`_shunit_prepForSourcing \"${__shunit_script}\"`"
-fi
-
-# record when the tests started running.
-suite_start=$(date +%s)
-
-# execute the oneTimeSetUp function (if it exists)
-oneTimeSetUp
-
-# execute the suite function defined in the parent test script
-# deprecated as of 2.1.0
-suite
-
-# if no suite function was defined, dynamically build a list of functions
-if [ -z "${__shunit_suite}" ]; then
- shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"`
- for shunit_func_ in ${shunit_funcs_}; do
- suite_addTest ${shunit_func_}
- done
-fi
-unset shunit_func_ shunit_funcs_
-
-# execute the tests
-_shunit_execSuite
-
-# execute the oneTimeTearDown function (if it exists)
-oneTimeTearDown
-
-suite_end=$(date +%s)
-
-# generate the report
-_shunit_generateReport
-
-# that's it folks
-[ ${__shunit_testsFailed} -eq 0 ]
-exit $?
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test.sh 322 2011-04-24 00:09:45Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test suite runner.
-#
-# This script runs all the unit tests that can be found, and generates a nice
-# report of the tests.
-
-MY_NAME=`basename $0`
-MY_PATH=`dirname $0`
-
-PREFIX='shunit2_test_'
-SHELLS='/bin/sh /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
-TESTS=''
-for test in ${PREFIX}[a-z]*.sh; do
- TESTS="${TESTS} ${test}"
-done
-
-# load common unit test functions
-. ../lib/versions
-. ./shunit2_test_helpers
-
-usage()
-{
- echo "usage: ${MY_NAME} [-e key=val ...] [-s shell(s)] [-t test(s)]"
-}
-
-env=''
-
-# process command line flags
-while getopts 'e:hs:t:' opt; do
- case ${opt} in
- e) # set an environment variable
- key=`expr "${OPTARG}" : '\([^=]*\)='`
- val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
- if [ -z "${key}" -o -z "${val}" ]; then
- usage
- exit 1
- fi
- eval "${key}='${val}'"
- export ${key}
- env="${env:+${env} }${key}"
- ;;
- h) usage; exit 0 ;; # output help
- s) shells=${OPTARG} ;; # list of shells to run
- t) tests=${OPTARG} ;; # list of tests to run
- *) usage; exit 1 ;;
- esac
-done
-shift `expr ${OPTIND} - 1`
-
-# fill shells and/or tests
-shells=${shells:-${SHELLS}}
-tests=${tests:-${TESTS}}
-
-# error checking
-if [ -z "${tests}" ]; then
- th_error 'no tests found to run; exiting'
- exit 1
-fi
-
-cat <<EOF
-#------------------------------------------------------------------------------
-# System data
-#
-
-# test run info
-shells: ${shells}
-tests: ${tests}
-EOF
-for key in ${env}; do
- eval "echo \"${key}=\$${key}\""
-done
-echo
-
-# output system data
-echo "# system info"
-echo "$ date"
-date
-echo
-
-echo "$ uname -mprsv"
-uname -mprsv
-
-#
-# run tests
-#
-
-for shell in ${shells}; do
- echo
-
- # check for existance of shell
- if [ ! -x ${shell} ]; then
- th_warn "unable to run tests with the ${shell} shell"
- continue
- fi
-
- cat <<EOF
-
-#------------------------------------------------------------------------------
-# Running the test suite with ${shell}
-#
-EOF
-
- SHUNIT_SHELL=${shell} # pass shell onto tests
- shell_name=`basename ${shell}`
- shell_version=`versions_shellVersion "${shell}"`
-
- echo "shell name: ${shell_name}"
- echo "shell version: ${shell_version}"
-
- # execute the tests
- for suite in ${tests}; do
- suiteName=`expr "${suite}" : "${PREFIX}\(.*\).sh"`
- echo
- echo "--- Executing the '${suiteName}' test suite ---"
- ( exec ${shell} ./${suite} 2>&1; )
- done
-done
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test_asserts.sh 312 2011-03-14 22:41:29Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-#
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test for assert functions
-
-# load test helpers
-. ./shunit2_test_helpers
-
-#------------------------------------------------------------------------------
-# suite tests
-#
-
-commonEqualsSame()
-{
- fn=$1
-
- ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'equal' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'equal; with msg' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} 'abc def' 'abc def' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'equal with spaces' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'not equal' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'null values' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-commonNotEqualsSame()
-{
- fn=$1
-
- ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not same' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} "${MSG}" 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not same, with msg' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testAssertEquals()
-{
- commonEqualsSame 'assertEquals'
-}
-
-testAssertNotEquals()
-{
- commonNotEqualsSame 'assertNotEquals'
-}
-
-testAssertSame()
-{
- commonEqualsSame 'assertSame'
-}
-
-testAssertNotSame()
-{
- commonNotEqualsSame 'assertNotSame'
-}
-
-testAssertNull()
-{
- ( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
-
- ( assertNull "${MSG}" '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'null, with msg' $? "${stdoutF}" "${stderrF}"
-
- ( assertNull 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'not null' $? "${stdoutF}" "${stderrF}"
-
- ( assertNull >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( assertNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testAssertNotNull()
-{
- ( assertNotNull 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null' $? "${stdoutF}" "${stderrF}"
-
- ( assertNotNull "${MSG}" 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null, with msg' $? "${stdoutF}" "${stderrF}"
-
- ( assertNotNull 'x"b' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null, with double-quote' $? \
- "${stdoutF}" "${stderrF}"
-
- ( assertNotNull "x'b" >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null, with single-quote' $? \
- "${stdoutF}" "${stderrF}"
-
- ( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null, with dollar' $? \
- "${stdoutF}" "${stderrF}"
-
- ( assertNotNull 'x`b' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'not null, with backtick' $? \
- "${stdoutF}" "${stderrF}"
-
- ( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
-
- # there is no test for too few arguments as $1 might actually be null
-
- ( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testAssertTrue()
-{
- ( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue "${MSG}" 0 >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'true, with msg' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'true condition' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue 1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'false' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'false condition' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( assertTrue arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testAssertFalse()
-{
- ( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse "${MSG}" 1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'false, with msg' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
- th_assertTrueWithNoOutput 'false condition' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse 0 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'true' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( assertFalse arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-#------------------------------------------------------------------------------
-# suite functions
-#
-
-oneTimeSetUp()
-{
- tmpDir="${__shunit_tmpDir}/output"
- mkdir "${tmpDir}"
- stdoutF="${tmpDir}/stdout"
- stderrF="${tmpDir}/stderr"
-
- MSG='This is a test message'
-}
-
-# load and run shUnit2
-[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
-. ${TH_SHUNIT}
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test_failures.sh 286 2008-11-24 21:42:34Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-#
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test for failure functions
-
-# load common unit-test functions
-. ./shunit2_test_helpers
-
-#-----------------------------------------------------------------------------
-# suite tests
-#
-
-testFail()
-{
- ( fail >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
-
- ( fail "${MSG}" >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'fail with msg' $? "${stdoutF}" "${stderrF}"
-
- ( fail arg1 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testFailNotEquals()
-{
- ( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
-
- ( failNotEquals "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
-
- ( failNotEquals 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
-
- ( failNotEquals '' '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
-
- ( failNotEquals >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( failNotEquals arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-testFailSame()
-{
- ( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
-
- ( failSame "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
-
- ( failSame 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
-
- ( failSame '' '' >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
-
- ( failSame >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
-
- ( failSame arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
- th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
-}
-
-#-----------------------------------------------------------------------------
-# suite functions
-#
-
-oneTimeSetUp()
-{
- tmpDir="${__shunit_tmpDir}/output"
- mkdir "${tmpDir}"
- stdoutF="${tmpDir}/stdout"
- stderrF="${tmpDir}/stderr"
-
- MSG='This is a test message'
-}
-
-# load and run shUnit2
-[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
-. ${TH_SHUNIT}
+++ /dev/null
-# $Id: shunit2_test_helpers 286 2008-11-24 21:42:34Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-#
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test common functions
-
-# treat unset variables as an error when performing parameter expansion
-set -u
-
-# set shwordsplit for zsh
-[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
-
-#
-# constants
-#
-
-# path to shUnit2 library. can be overridden by setting SHUNIT_INC
-TH_SHUNIT=${SHUNIT_INC:-./shunit2}
-
-# configure debugging. set the DEBUG environment variable to any
-# non-empty value to enable debug output, or TRACE to enable trace
-# output.
-TRACE=${TRACE:+'th_trace '}
-[ -n "${TRACE}" ] && DEBUG=1
-[ -z "${TRACE}" ] && TRACE=':'
-
-DEBUG=${DEBUG:+'th_debug '}
-[ -z "${DEBUG}" ] && DEBUG=':'
-
-#
-# variables
-#
-
-th_RANDOM=0
-
-#
-# functions
-#
-
-# message functions
-th_trace() { echo "${MY_NAME}:TRACE $@" >&2; }
-th_debug() { echo "${MY_NAME}:DEBUG $@" >&2; }
-th_info() { echo "${MY_NAME}:INFO $@" >&2; }
-th_warn() { echo "${MY_NAME}:WARN $@" >&2; }
-th_error() { echo "${MY_NAME}:ERROR $@" >&2; }
-th_fatal() { echo "${MY_NAME}:FATAL $@" >&2; }
-
-# output subtest name
-th_subtest() { echo " $@" >&2; }
-
-# generate a random number
-th_generateRandom()
-{
- tfgr_random=${th_RANDOM}
-
- while [ "${tfgr_random}" = "${th_RANDOM}" ]; do
- if [ -n "${RANDOM:-}" ]; then
- # $RANDOM works
- tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
- elif [ -r '/dev/urandom' ]; then
- tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
- else
- tfgr_date=`date '+%H%M%S'`
- tfgr_random=`expr ${tfgr_date} \* $$`
- unset tfgr_date
- fi
- [ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
- done
-
- th_RANDOM=${tfgr_random}
- unset tfgr_random
-}
-
-# this section returns the data section from the specified section of a file. a
-# datasection is defined by a [header], one or more lines of data, and then a
-# blank line.
-th_getDataSect()
-{
- th_sgrep "\\[$1\\]" "$2" |sed '1d'
-}
-
-# this function greps a section from a file. a section is defined as a group of
-# lines preceeded and followed by blank lines.
-th_sgrep()
-{
- th_pattern_=$1
- shift
-
- sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
-
- unset th_pattern_
-}
-
-# Custom assert that checks for true return value (0), and no output to STDOUT
-# or STDERR. If a non-zero return value is encountered, the output of STDERR
-# will be output.
-#
-# Args:
-# th_test_: string: name of the subtest
-# th_rtrn_: integer: the return value of the subtest performed
-# th_stdout_: string: filename where stdout was redirected to
-# th_stderr_: string: filename where stderr was redirected to
-th_assertTrueWithNoOutput()
-{
- th_test_=$1
- th_rtrn_=$2
- th_stdout_=$3
- th_stderr_=$4
-
- assertTrue "${th_test_}; expected return value of zero" ${th_rtrn_}
- [ ${th_rtrn_} -ne ${SHUNIT_TRUE} ] && cat "${th_stderr_}"
- assertFalse "${th_test_}; expected no output to STDOUT" \
- "[ -s '${th_stdout_}' ]"
- assertFalse "${th_test_}; expected no output to STDERR" \
- "[ -s '${th_stderr_}' ]"
-
- unset th_test_ th_rtrn_ th_stdout_ th_stderr_
-}
-
-# Custom assert that checks for non-zero return value, output to STDOUT, but no
-# output to STDERR.
-#
-# Args:
-# th_test_: string: name of the subtest
-# th_rtrn_: integer: the return value of the subtest performed
-# th_stdout_: string: filename where stdout was redirected to
-# th_stderr_: string: filename where stderr was redirected to
-th_assertFalseWithOutput()
-{
- th_test_=$1
- th_rtrn_=$2
- th_stdout_=$3
- th_stderr_=$4
-
- assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
- assertTrue "${th_test_}; expected output to STDOUT" \
- "[ -s '${th_stdout_}' ]"
- assertFalse "${th_test_}; expected no output to STDERR" \
- "[ -s '${th_stderr_}' ]"
-
- unset th_test_ th_rtrn_ th_stdout_ th_stderr_
-}
-
-# Custom assert that checks for non-zero return value, no output to STDOUT, but
-# output to STDERR.
-#
-# Args:
-# th_test_: string: name of the subtest
-# th_rtrn_: integer: the return value of the subtest performed
-# th_stdout_: string: filename where stdout was redirected to
-# th_stderr_: string: filename where stderr was redirected to
-th_assertFalseWithError()
-{
- th_test_=$1
- th_rtrn_=$2
- th_stdout_=$3
- th_stderr_=$4
-
- assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
- assertFalse "${th_test_}; expected no output to STDOUT" \
- "[ -s '${th_stdout_}' ]"
- assertTrue "${th_test_}; expected output to STDERR" \
- "[ -s '${th_stderr_}' ]"
-
- unset th_test_ th_rtrn_ th_stdout_ th_stderr_
-}
-
-#
-# main
-#
-
-${TRACE} 'trace output enabled'
-${DEBUG} 'debug output enabled'
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test_macros.sh 299 2010-05-03 12:44:20Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test for macros.
-
-# load test helpers
-. ./shunit2_test_helpers
-
-#------------------------------------------------------------------------------
-# suite tests
-#
-
-testAssertEquals()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testAssertNotEquals()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testSame()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_SAME_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testNotSame()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testNull()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NULL_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testNotNull()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_NOT_NULL_} '' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stdoutF}" "${stderrF}" >&2
-}
-
-testAssertTrue()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_TRUE_} ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
-
- ( ${_ASSERT_TRUE_} '"some msg"' ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testAssertFalse()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_ASSERT_FALSE_} ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_ASSERT_FALSE_} '"some msg"' ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testFail()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testFailNotEquals()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_FAIL_NOT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testFailSame()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_SAME_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testFailNotSame()
-{
- # start skipping if LINENO not available
- [ -z "${LINENO:-}" ] && startSkipping
-
- ( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-
- ( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
- grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
- rtrn=$?
- assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-#------------------------------------------------------------------------------
-# suite functions
-#
-
-oneTimeSetUp()
-{
- tmpDir="${__shunit_tmpDir}/output"
- mkdir "${tmpDir}"
- stdoutF="${tmpDir}/stdout"
- stderrF="${tmpDir}/stderr"
-}
-
-# load and run shUnit2
-[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
-. ${TH_SHUNIT}
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test_misc.sh 322 2011-04-24 00:09:45Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2008 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-#
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit tests of miscellaneous things
-
-# load test helpers
-. ./shunit2_test_helpers
-
-#------------------------------------------------------------------------------
-# suite tests
-#
-
-# Note: the test script is prefixed with '#' chars so that shUnit2 does not
-# incorrectly interpret the embedded functions as real functions.
-testUnboundVariable()
-{
- sed 's/^#//' >"${unittestF}" <<EOF
-## treat unset variables as an error when performing parameter expansion
-#set -u
-#
-#boom() { x=\$1; } # this function goes boom if no parameters are passed!
-#test_boom()
-#{
-# assertEquals 1 1
-# boom # No parameter given
-# assertEquals 0 \$?
-#}
-#. ${TH_SHUNIT}
-EOF
- ( exec ${SHUNIT_SHELL:-sh} "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
- assertFalse 'expected a non-zero exit value' $?
- grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
- assertTrue 'assert message was not generated' $?
- grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
- assertTrue 'test count message was not generated' $?
- grep '^FAILED' "${stdoutF}" >/dev/null
- assertTrue 'failure message was not generated' $?
-}
-
-testIssue7()
-{
- ( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
- diff "${stdoutF}" - >/dev/null <<EOF
-ASSERT:Some message. expected:<1> but was:<2>
-EOF
- rtrn=$?
- assertEquals ${SHUNIT_TRUE} ${rtrn}
- [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
-}
-
-testPrepForSourcing()
-{
- assertEquals '/abc' `_shunit_prepForSourcing '/abc'`
- assertEquals './abc' `_shunit_prepForSourcing './abc'`
- assertEquals './abc' `_shunit_prepForSourcing 'abc'`
-}
-
-testEscapeCharInStr()
-{
- actual=`_shunit_escapeCharInStr '\' ''`
- assertEquals '' "${actual}"
- assertEquals 'abc\\' `_shunit_escapeCharInStr '\' 'abc\'`
- assertEquals 'abc\\def' `_shunit_escapeCharInStr '\' 'abc\def'`
- assertEquals '\\def' `_shunit_escapeCharInStr '\' '\def'`
-
- actual=`_shunit_escapeCharInStr '"' ''`
- assertEquals '' "${actual}"
- assertEquals 'abc\"' `_shunit_escapeCharInStr '"' 'abc"'`
- assertEquals 'abc\"def' `_shunit_escapeCharInStr '"' 'abc"def'`
- assertEquals '\"def' `_shunit_escapeCharInStr '"' '"def'`
-
- actual=`_shunit_escapeCharInStr '$' ''`
- assertEquals '' "${actual}"
- assertEquals 'abc\$' `_shunit_escapeCharInStr '$' 'abc$'`
- assertEquals 'abc\$def' `_shunit_escapeCharInStr '$' 'abc$def'`
- assertEquals '\$def' `_shunit_escapeCharInStr '$' '$def'`
-
-# actual=`_shunit_escapeCharInStr "'" ''`
-# assertEquals '' "${actual}"
-# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
-# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
-# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
-
-# # must put the backtick in a variable so the shell doesn't misinterpret it
-# # while inside a backticked sequence (e.g. `echo '`'` would fail).
-# backtick='`'
-# actual=`_shunit_escapeCharInStr ${backtick} ''`
-# assertEquals '' "${actual}"
-# assertEquals '\`abc' \
-# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
-# assertEquals 'abc\`' \
-# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
-# assertEquals 'abc\`def' \
-# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
-}
-
-testEscapeCharInStr_specialChars()
-{
- # make sure our forward slash doesn't upset sed
- assertEquals '/' `_shunit_escapeCharInStr '\' '/'`
-
- # some shells escape these differently
- #assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
- #assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
-}
-
-# Test the various ways of declaring functions.
-#
-# Prefixing (then stripping) with comment symbol so these functions aren't
-# treated as real functions by shUnit2.
-testExtractTestFunctions()
-{
- f="${tmpD}/extract_test_functions"
- sed 's/^#//' <<EOF >"${f}"
-#testABC() { echo 'ABC'; }
-#test_def() {
-# echo 'def'
-#}
-#testG3 ()
-#{
-# echo 'G3'
-#}
-#function test4() { echo '4'; }
-# test5() { echo '5'; }
-#some_test_function() { echo 'some func'; }
-#func_with_test_vars() {
-# testVariable=1234
-#}
-EOF
-
- actual=`_shunit_extractTestFunctions "${f}"`
- assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
-}
-
-#------------------------------------------------------------------------------
-# suite functions
-#
-
-setUp()
-{
- for f in ${expectedF} ${stdoutF} ${stderrF}; do
- cp /dev/null ${f}
- done
- rm -fr "${tmpD}"
- mkdir "${tmpD}"
-}
-
-oneTimeSetUp()
-{
- tmpD="${SHUNIT_TMPDIR}/tmp"
- expectedF="${SHUNIT_TMPDIR}/expected"
- stdoutF="${SHUNIT_TMPDIR}/stdout"
- stderrF="${SHUNIT_TMPDIR}/stderr"
- unittestF="${SHUNIT_TMPDIR}/unittest"
-}
-
-# load and run shUnit2
-[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
-. ${TH_SHUNIT}
+++ /dev/null
-#! /bin/sh
-# $Id: shunit2_test_standalone.sh 303 2010-05-03 13:11:27Z kate.ward@forestent.com $
-# vim:et:ft=sh:sts=2:sw=2
-#
-# Copyright 2010 Kate Ward. All Rights Reserved.
-# Released under the LGPL (GNU Lesser General Public License)
-# Author: kate.ward@forestent.com (Kate Ward)
-#
-# shUnit2 unit test for standalone operation.
-#
-# This unit test is purely to test that calling shunit2 directly, while passing
-# the name of a unit test script, works. When run, this script determines if it
-# is running as a standalone program, and calls main() if it is.
-
-ARGV0=`basename "$0"`
-
-# load test helpers
-. ./shunit2_test_helpers
-
-#------------------------------------------------------------------------------
-# suite tests
-#
-
-testStandalone()
-{
- assertTrue ${SHUNIT_TRUE}
-}
-
-#------------------------------------------------------------------------------
-# main
-#
-
-main()
-{
- ${TH_SHUNIT} "${ARGV0}"
-}
-
-# are we running as a standalone?
-if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
- if [ $# -gt 0 ]; then main "$@"; else main; fi
-fi
--- /dev/null
+<html>
+ <head>
+ <meta http-equiv="content-type" content="text/html; charset=utf-8">
+ <title>TestKit Reference Manual</title>
+ </head>
+ <body vlink="purple" link="blue" lang="EN-US">
+ <h1 style=" text-align:center">Feisty Meow® TestKit Reference Manual</h1>
+ <address style=" text-align:center">Version 1.0 ― Updated August 12 2020</address>
+ <h1>The Feisty Meow® TestKit</h1>
+ <p>The TestKit is a collection of scripts that leverages the ShUnit unit
+ testing environment. TestKit provides a pattern for creating test
+ suites using a simple configuration file approach. Full reporting on
+ test runs is provided in a convenient tabular format.</p>
+ <p>Generally, writing a test script using the TestKit is a matter of
+ minutes. A blank test is provided as a template, and that can be
+ expanded with whatever test steps are needed.</p>
+ <p>TestKit (and ShUnit) are implemented in the GNU Bash script language, but
+ a TestKit test script can invoke external applications, written in
+ whatever programming language or scripting tool is desired, using the
+ standard POSIX interfaces.<br>
+ </p>
+ <h2> Getting the TestKit</h2>
+ <p>Follow these steps to download and install the Feisty Meow TestKit:<br>
+ </p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3"><span style="font-family: monospace;">sudo
+ mkdir /opt/feistymeow.org<br>
+ sudo chown -R $USER /opt/feistymeow.org<br>
+ cd /opt/feistymeow.org<br>
+ git clone git://feistymeow.org/feisty_meow</span></p>
+ </div>
+ <h3>Preparing the TestKit on Linux</h3>
+ <p>Linux is the easiest environment for running the TestKit, given that the
+ tests were built using the bash shell within a Linux environment. If
+ some of the packages used in the tests are missing (such as expect and gnu
+ awk), these may need to be installed from the appropriate repository for
+ your Linux distribution. Most distributions include these packages
+ automatically however.</p>
+ <h3> Preparing the TestKit on Mac OS X</h3>
+ <p>The test suite runs well on modern Macs with Intel CPUs. Due to
+ some differences in the versions of a few applications on the Mac, some
+ GNU tools may need to be installed to run the TestKit. These are
+ available via the Brew installer tool. <br>
+ </p>
+ <h3> </h3>
+ <h3> Preparing the TestKit on MS-Windows</h3>
+ <p>The Cygwin Unix emulation system is required to run the TestKit on
+ Windows. This package is available at: <a href="http://cygwin.com/install.html">http://cygwin.com/install.html</a></p>
+ <p>The default packages selected by Cygwin are the starting point of the
+ install. In addition to those packages, the following packages are
+ also required (see list below). Rather than using the cygwin setup
+ program for this task, the next section describes how to install Cygwin
+ with the apt-cyg tool. Apt-cyg is the preferred method, since it
+ involves less interaction with the somewhat clunky Cygwin installer.
+ If necessary, it is possible to install all the packages without apt-cyg
+ just by using the Cygwin setup program. To find each of these
+ packages more easily, try switching the “View” button on the Cygwin setup
+ program to “Full” to get an alphabetized list.</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:0in;
+margin-left:0in;margin-bottom:.0001pt;background:#DDD9C3">bc <br>
+ crypt <br>
+ cygutils <br>
+ emacs <br>
+ email <br>
+ expect <br>
+ gcc-g++<br>
+ git <br>
+ gitk <br>
+ gvim <br>
+ inetutils <br>
+ less <br>
+ make <br>
+ mutt <br>
+ ncftp <br>
+ openssh <br>
+ perl <br>
+ procps<br>
+ python <br>
+ sharutils <br>
+ shutdown <br>
+ subversion <br>
+ time <br>
+ unzip <br>
+ util-linux <br>
+ vim<br>
+ wget<br>
+ xinit <br>
+ xterm <br>
+ zip </p>
+ </div>
+ <p> </p>
+ <h3>Apt-cyg Installation Process</h3>
+ <p>The apt-cyg program brings the convenience of the Debian and Ubuntu
+ installer application (apt-get) to Cygwin. This program does require
+ a couple of additional setup steps. This material is drawn from the
+ apt-cyg home page: <a href="https://github.com/transcode-open/apt-cyg">https://github.com/transcode-open/apt-cyg</a></p>
+ <p class="MsoListParagraphCxSpFirst" style="text-indent:-.25in">1.
+ Install the basic Cygwin packages with setup.exe (rather than the long
+ list above), but add these two packages which are not selected by default:</p>
+ <ul>
+ <li>subversion</li>
+ <li>wget</li>
+ </ul>
+ <p class="MsoListParagraphCxSpLast" style="text-indent:-.25in">2.
+ Download and install the apt-cyg program from within a Cygwin bash prompt:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.5in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3">lynx -source
+ rawgit.com/transcode-open/apt-cyg/master/apt-cyg > apt-cyg<br>
+ install apt-cyg /bin</p>
+ </div>
+ <p class="MsoListParagraph" style="text-indent:-.25in">3.
+ Install the packages required for the TestKit:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.5in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:0in;
+margin-left:0in;margin-bottom:.0001pt;background:#DDD9C3">apt-cyg install bc
+ crypt cygutils emacs email expect gcc-g++ git gitk gvim \<br>
+ inetutils less make mutt ncftp openssh perl procps python
+ sharutils \<br>
+ shutdown time unzip util-linux vim xinit xterm zip</p>
+ </div>
+ <p class="MsoListParagraph" style="text-indent:-.25in">4.
+ The installation will run for a while but then should conclude with all
+ required packages installed.</p>
+ <h2> Setting up a Test Suite</h2>
+ <p>Tunning tests in TestKit uses a configuration file called
+ “testkit.config” to define the environment and, optionally, which test
+ scripts to run. This file is the main switchboard that defines where
+ the tests will find users, home directories, queues, containers, and so
+ forth. The configuration file can be specified via the environment
+ variable “TESTKIT_CFG_FILE”. This variable can be set to any
+ location, enabling the configuration file to reside in a directory other
+ than the toolkit folder. If the variable is not defined, then the
+ testing config file defaults to “$TESTKIT_ROOT/testkit.config”.</p>
+ The TESTKIT_ROOT variable is frequently referred to in command
+ examples. It is set up automatically by the prepare_tools script (see
+ below).
+ <h2>Running a Test Suite</h2>
+ <p>Once the TestKit configuration file has been established, running a whole
+ test suite can be accomplished with this command:<br>
+ </p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3"> bash <i>{TESTKIT_FOLDER}</i>/test_driver.sh
+ </p> </div>
+ <p>Where the <i>{TESTKIT_FOLDER}</i> should be replaced with whatever path
+ the TestKit is stored in.</p>
+ <p>Alternatively, if the TESTKIT_ROOT folder is already established, the
+ tests can be run with:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3"> bash "$TESTKIT_ROOT/test_driver.sh"</p>
+ </div>
+ <p></p>
+ <h3> What to Expect From the Test Run</h3>
+ <p>The test_driver.sh script will output a few informative lines of text
+ before printing a table of the tests that it intends to run.</p>
+ <p>After the test plan is shown, all of the tests listed will be executed in
+ the order they are listed in, and they will each produce output.
+ Each individual test (usually a separate bash script) produces a summary
+ at the end of its run with a count of tests and a report of the tests
+ success or failure.</p>
+ <p>At the end of all the tests in the suite, the table of tests is printed
+ again with the results for each test. For example, this is a test
+ run that had no errors in any test (that's good, since it is our super
+ simple example test):</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">$ cd
+ $FEISTY_MEOW_APEX/testkit<br>
+ $ ./test_driver.sh summary<br>
+ ===========================================================<br>
+ Testkit environment loaded.<br>
+ TESTKIT_ROOT=/opt/feistymeow.org/feisty_meow/testkit<br>
+ TESTKIT_CFG_FILE=/opt/feistymeow.org/feisty_meow/testkit/testkit.config<br>
+ TMP=/Users/fred/.tmp<br>
+ TEST_TEMP=/Users/fred/.tmp/testkit_logs_fred<br>
+ ===========================================================<br>
+ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++<br>
+ TestKit running from: /opt/feistymeow.org/feisty_meow/testkit<br>
+ TestKit config file:
+ /opt/feistymeow.org/feisty_meow/testkit/testkit.config<br>
+ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++<br>
+ Full set of tests:<br>
+ 1: /opt/feistymeow.org/feisty_meow/testkit/examples/blank_test.sh<br>
+ <br>
+ ======================================================================<br>
+ Wed Aug 12 14:11:00 EDT 2020: Now running test 1:
+ /opt/feistymeow.org/feisty_meow/testkit/examples/blank_test.sh<br>
+ Test output file:
+ /Users/fred/.tmp/testkit_logs_fred/run_2020_08_12/test_log.vKf7J3<br>
+ OK: successful test run for test
+ /opt/feistymeow.org/feisty_meow/testkit/examples/blank_test.sh<br>
+ <br>
+ <br>
+ Results table for this test run:<br>
+ <br>
+ 01: OKAY -- /opt/feistymeow.org/feisty_meow/testkit/examples/blank_test.sh<br>
+ <br>
+ Total testing duration: 00:00 hh:mm (1 seconds total)<br>
+ OK: All 1 Tests Ran Successfully.<br>
+ <br>
+ <br>
+ <br>
+ <br>
+ dd<br>
+ <br>
+ </div>
+ <p class="Textbody">The above shows the "summary" view, which does not allow
+ the individual tests to output to the console. If the "summary" flag
+ is not passed, then the output from all the test runs is also shown.</p>
+ <p class="Textbody">Even when the summary view is used, all output files can
+ be examined after the run. For example, in the above, the mentioned
+ output file "test_log.vKf7J3" can be checked to see exactly what happened
+ during the test.</p>
+ <p class="Textbody">A test with a failure in it will have “FAIL” next to the
+ test that failed, and the final output line will start with
+ “FAILURE”. For example:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3">01: FAIL –
+ AckPfft_Tests/Gorp_Tests/deslagToaster.sh<br>
+ 02: OKAY – AckPfft_Tests/Gorp_Tests/spumeMerchantry.sh<br>
+ 03: OKAY – AckPfft_Tests/Gorp_Tests/octopusLauncher.sh<br>
+ …<br>
+ 22: OKAY -- Snargle_Tests/scramTests/scramForPetunias.sh</p>
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3">FAILURE: 1 Tests Failed out of 22 Tests.</p>
+ </div>
+ <p>A failed test will also return a non-zero value from the test execution,
+ enabling the run of a test suite to be tested for success when launched
+ externally, such as from a continuous integration system.</p>
+ <h2>Loading the TestKit Environment</h2>
+ <p>If one wishes to run individual tests within the test suite, rather than
+ the entire suite, this is done by loading the TestKit variables into the
+ current shell environment, like so:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;
+background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3">cd <i>{TESTKIT_FOLDER}</i> # replace
+ with actual location of TestKit.<br>
+ source prepare_tools.sh prepare_tools.sh<br>
+ source $TESTKIT_ROOT/library/process_configuration.sh<br>
+ define_and_export_variables</p>
+ # Show the important variables.<br>
+ var $TESTKIT_ROOT $TESTKIT_CFG_FILE</div>
+ <p>After loading the TestKit environment, one can execute a specific test
+ and see its results, for example:</p>
+ <div style="border:solid windowtext 1.0pt;padding:1.0pt 4.0pt 1.0pt 4.0pt;background:#DDD9C3;margin-left:.1in;margin-right:.1in">
+ <p class="Code-Box" style="margin-top:6.0pt;margin-right:0in;margin-bottom:6.0pt;
+margin-left:0in;background:#DDD9C3">cd examples<br>
+ bash blank_test.sh</p>
+ </div>
+ <p>The test will run and output its results to the console (that is, output
+ is sent to standard out and standard error, to be more precise).</p>
+ <p><br>
+ </p>
+ <h3></h3>
+ </body>
+</html>
--- /dev/null
+#!/bin/bash
+
+# Test: X
+# Author: Y
+
+export WORKDIR="$( \cd "$(\dirname "$0")" && \pwd )" # obtain the script's working directory.
+cd "$WORKDIR"
+
+# this needs to be relative to where the test will actually reside; the ../../../../../etc
+# should get to the top of the tools and tests hierarchy.
+source "../prepare_tools.sh" "../prepare_tools.sh"
+if [ -z "$TEST_TEMP" ]; then
+ echo The TestKit could not be automatically located.
+ exit 1
+fi
+
+if [ -z "$TESTKIT_SENTINEL" ]; then echo Please run prepare_tools.sh before testing.; exit 3; fi
+source "$TESTKIT_ROOT/library/establish_environment.sh"
+
+oneTimeSetUp()
+{
+ # a test environment initializer method called directly by shunit.
+ # you can add your own code here that is needed before the test run starts.
+ true
+}
+
+# this exact test should always be the first one in a test suite.
+testSanity()
+{
+ # make sure the test environment is good.
+ sanity_test_and_init
+ assertEquals "sanity test" 0 $?
+}
+
+testCleaningPriorTestRun()
+{
+ echo taking steps to clean last test...
+#if any.
+}
+
+testDoAThing()
+{
+ echo doing one thing
+ assertEquals "doing that thing should work" 0 $?
+}
+
+testDoAnotherThing()
+{
+ echo doing another thing here
+ assertEquals "doing that other thing should work" 0 $?
+
+ echo "about to cause a failure, to test assertNotEquals..."
+ false
+ assertNotEquals "an explicit failure should be seen" 0 $?
+}
+
+oneTimeTearDown() {
+ echo cleaning up after test now...
+#if anything to do.
+}
+
+# load and run shUnit2
+source "$SHUNIT_DIR/shunit2"
+
--- /dev/null
+# This is an example configuration file for the TestKit.
+
+####
+# this is fred t. hamster's personal testkit config file.
+####
+
+##############
+
+# This section defines variables that are used throughout the tests.
+# Many of these need to change to suit your particular configuration.
+
+# The base user name is used for any paths below that refer to the user who
+# will be running the tools and tests. This should be changed to the actual
+# user account under which the tools and tests will be run, if the default
+# value based on USER cannot be relied upon.
+BASE_USER=${USER}
+
+# Used for windows testing; provides the path to the binaries directory of cygwin.
+#CYGWIN_BIN_PATH=c:/cygwin/bin
+
+##############
+
+# define the tests to run. this is the most convenient place to put this.
+# the test suite list obviously will vary a lot based on what is being tested.
+TESTKIT_TEST_SUITE=( \
+ $TESTKIT_ROOT/examples/blank_test.sh \
+)
+
+##############
+
--- /dev/null
+#!/bin/bash
+
+# Supports the TestKit with a few handy functions and many variables.
+#
+# Author: Chris Koeritz
+
+##############
+
+# pull in the really basic functions...
+source "$TESTKIT_ROOT/library/helper_methods.sh"
+
+##############
+
+# this check should be called first, in oneTimeSetUp, in every test script that uses shunit.
+# it will make sure that important facts are true about the test environment.
+#
+#hmmm: need to extend this to allow them to add their own sanity checks to it,
+# similarly to how we need to add log parsing capability as an extension.
+#
+function sanity_test_and_init()
+{
+ if [ -z "$WORKDIR" ]; then
+ echo "The WORKDIR variable is not set. This should be established by each test, near the top."
+ exit 1
+ fi
+ # establish this for shunit so tests do not have to run in current directory.
+ export SHUNIT_PARENT="$WORKDIR/$(basename "$0")"
+
+#hmmm: add other checks here, including the user provided ones.
+
+ return 0
+}
+
+##############
+
+# this is the main source of parameters for the tests.
+export TESTKIT_CFG_FILE
+if [ ! -f "$TESTKIT_CFG_FILE" ]; then
+ # well, no config file variable defined, so we go with our default.
+ # this file must exist or we're stumped.
+ TESTKIT_CFG_FILE="$TESTKIT_ROOT/testkit.config"
+fi
+if [ ! -f "$TESTKIT_CFG_FILE" -a -z "$BADNESS" ]; then
+ echo "----"
+ echo "This script requires that you prepare a customized file in:"
+ echo " $TESTKIT_CFG_FILE"
+ echo " (above is current value of TESTKIT_CFG_FILE variable)"
+ echo "with the details of your testing configuration."
+ echo "There are some example config files in the folder:"
+ echo " $TESTKIT_ROOT/examples"
+ BADNESS=true
+fi
+
+##############
+
+# make sure we aren't premature in processing the config file.
+if [ -z "$TESTKIT_BOOTSTRAPPING" ]; then
+
+ # read the config file and generate environment variables for all the entries.
+ source "$TESTKIT_ROOT/library/process_configuration.sh"
+ define_and_export_variables
+ check_if_failed "Not all variables could be imported properly from the configuration file '$TESTKIT_CFG_FILE'"
+
+fi
+
+##############
+
+# announce status if in debugging mode.
+
+if [ ! -z "$DEBUGGING" -a -z "$SHOWED_SETTINGS_ALREADY" ]; then
+ echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ echo TestKit running from: $TESTKIT_ROOT
+ echo TestKit config file: $TESTKIT_CFG_FILE
+ echo +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+fi
+
+##############
+
+# more steps we're not ready for if still bootstrapping our environment.
+if [ -z "$TESTKIT_BOOTSTRAPPING" ]; then
+ # try to not blast out the above block of info again during this run.
+ export SHOWED_SETTINGS_ALREADY=true
+
+ # now that we have the environment set up, we can pull in all the functions
+ # we use for running the tests...
+ source "$TESTKIT_ROOT/library/runner_functions.sh"
+ source "$TESTKIT_ROOT/library/random_ids_manager.sh"
+ #hmmm: random ids are not used yet, are they? are they working?
+ source "$TESTKIT_ROOT/library/file_management.sh"
+
+fi
+
+##############
+
--- /dev/null
+
+pfx_file="$1"; shift
+if [ -z "$pfx_file" -o ! -f "$pfx_file" ]; then
+ echo "This script requires the full path to a PFX file which will have its"
+ echo "certificate extracted into a DER formatted file ending in '.cer'."
+ exit 1
+fi
+
+intermed_file="$TMP/$(basename "$pfx_file" .pfx).pem"
+final_file="$(dirname "$pfx_file")/$(basename "$pfx_file" .pfx).cer"
+
+echo input PFX file is $pfx_file
+echo generating intermediate PEM file...
+openssl pkcs12 -in "$pfx_file" -out "$intermed_file" -nodes -nokeys
+if [ $? -ne 0 ]; then echo "previous step failed!"; exit 1; fi
+echo generated intermediate PEM file $intermed_file
+echo generating final file in DER format...
+openssl x509 -outform der -in "$intermed_file" -out "$final_file"
+if [ $? -ne 0 ]; then echo "previous step failed!"; exit 1; fi
+echo final DER file successfully saved as $final_file
+
--- /dev/null
+
+# these are the pieces that we'll use to assemble mostly random files.
+RANDOM_CHUNK_FILES=($TEST_TEMP/random.0 $TEST_TEMP/random.1 $TEST_TEMP/random.2 $TEST_TEMP/random.3 $TEST_TEMP/random.4)
+
+# largest chunk of random data we'll actually generate at a time, in each chunk file.
+export MAX_CHUNK_FILE=65536
+
+# returns the file size for the first argument.
+function getFileSize()
+{
+ local file="$1"; shift
+ if isMacOSX; then
+ stat -f%z "$file"
+ else
+ stat --printf="%s" "$file"
+ fi
+}
+
+# outputs the number of seconds since the epoch.
+function getTimeStamp()
+{
+ date +%s
+}
+
+# makes sure the chunk files are all generated.
+function prepareRandomChunks()
+{
+ local i
+ for ((i = 0; i < ${#RANDOM_CHUNK_FILES[@]}; i++)); do
+ # make the chunk files if they don't exist.
+ local currfile="${RANDOM_CHUNK_FILES[$i]}"
+ if [ ! -f "$currfile" ]; then
+ local filesize=$MAX_CHUNK_FILE
+ # pick a value to add or subtract from the constant sized chunk, so we won't always be
+ # using files at the same boundaries or with a power of 2 size.
+ local moddy=$(( ($(echo $RANDOM) % 128) - 64 ))
+ ((filesize -= $moddy))
+#echo creating chunk file $currfile of size $filesize
+ dd if=/dev/urandom of=$currfile bs=1 count=$filesize &>/dev/null
+ assertEquals "creating random chunk file $currfile" 0 $?
+ fi
+ done
+}
+
+# creates a somewhat random file for testing. this will be assembled out of
+# our chunks of random files, so is not truly random, but we've found that the
+# random number generator is a HUGE drag on our testing speed. this is still
+# pretty random data. the first argument is the file name and the second is
+# the desired file size.
+function createRandomFile()
+{
+ local file="$1"; shift
+ local size="$1"; shift
+
+ prepareRandomChunks
+
+ local stampBefore=$(getTimeStamp)
+
+ # truncate any existing stuff.
+ echo -n >"$file"
+
+ while [ $(getFileSize "$file") -lt $size ]; do
+ which_chunker=$(expr $(echo $RANDOM) % ${#RANDOM_CHUNK_FILES[@]})
+#echo choosing chunk file $which_chunker
+ cat "${RANDOM_CHUNK_FILES[$which_chunker]}" >>"$file"
+ done
+
+#echo file size after random chunkings is $(getFileSize "$file")
+
+ local fsizenow="$(getFileSize "$file")"
+#echo size now is $fsizenow and desired is $size
+ if [ $fsizenow -gt $size ]; then
+#echo trying to truncate file
+ truncate -s $size "$file"
+ fi
+#echo file size after truncate is $(getFileSize "$file") and expected size is $size
+
+ local stampAfter=$(getTimeStamp)
+ local secs=$(($stampAfter - $stampBefore))
+ if [ $secs -le 0 ]; then
+ # even though it claims it took zero time, we know better, but we also don't want to
+ # divide by zero, so it loses its credit for being very fast here.
+ secs=1
+ fi
+ local kbs=$(( $size / $secs / 1024))
+
+ local fsizenow="$(getFileSize "$file")"
+ assertEquals "Creating random file of $size bytes at ${kbs} kbps in: $file" $size $fsizenow
+}
+
--- /dev/null
+#!/bin/bash
+
+#hmmm: NOT ported to testkit yet.
+
+# creates a new certificate based on the grid's signing cert.
+#
+# Author: Chris Koeritz
+
+##############
+
+####
+
+export WORKDIR="$( \cd "$(\dirname "$0")" && \pwd )" # obtain the script's working directory.
+
+# pull in the xsede test base support.
+source "$WORKDIR/../prepare_tools.sh" "$WORKDIR/../prepare_tools.sh"
+
+# if that didn't work, complain.
+if [ -z "$TESTKIT_SENTINEL" ]; then echo Please run prepare_tools.sh before testing.; exit 3; fi
+
+# load the bash libraries we need.
+source "$TESTKIT_ROOT/library/helper_methods.sh"
+#source "$TESTKIT_ROOT/library/runner_functions.sh"
+source "$TESTKIT_ROOT/library/security_management.sh"
+
+####
+
+if [ $# -lt 7 ]; then
+ echo "this script needs 7 parameters:"
+ echo "1: signing cert PFX file to base new cert on."
+ echo "2: the password for the signing cert PFX."
+ echo "3: the alias of the key to use within the signing PFX."
+ echo "4: output file to create with new certificate in PFX format."
+ echo "5: password for new certificate PFX file."
+ echo "6: alias for the certificate within the PFX file."
+ echo "7: Common Name (CN) of the identity signified by the new certificate."
+ echo
+ echo "This is an example run using a signing cert for a container:"
+ echo
+ echo 'bash $TESTKIT_ROOT/library/gen_cert_from_ca.sh signing-cert.pfx signer signing-cert $HOME/new_cert.pfx myPassword certalias "Fred Powers"'
+ exit 1
+fi
+
+signing_cert="$1"; shift
+signing_passwd="$1"; shift
+signing_alias="$1"; shift
+output_file="$1"; shift
+output_passwd="$1"; shift
+output_alias="$1"; shift
+output_cn="$1"; shift
+
+create_pfx_using_CA "$signing_cert" "$signing_passwd" "$signing_alias" "$output_file" "$output_passwd" "$output_alias" "$output_cn"
+check_if_failed "generating '$output_file' from '$signing_cert'"
+
+echo "New certificate was generated into: $output_file"
+
+exit 0
+
--- /dev/null
+#!/bin/bash
+
+privkey="$1"; shift
+subject="$1"; shift
+csrfile="$1"; shift
+
+function print_instructions()
+{
+ echo -e "\n\
+This script creates a new CSR (certificate signing request) file for you from\n\
+an existing private key. Getting a new certificate using this CSR ensures\n\
+that previously signed resources can still be considered properly signed, even\n\
+after the original certificate has expired, by using the new certificate for\n\
+validation. After the new CSR file is generated, it must be sent to the\n\
+certificate authority and they can generate a new certificate for you.\n\
+\n\
+The script takes three parameters. The first is the file in which the\n\
+private key is stored in PEM format. The second parameter is the subject\n\
+to use in the certificate (who the certificate is issued to). The third\n\
+parameter is the output file for the generated CSR (certificate signing\n\
+request).\n\
+\n\
+For example:\n\
+ $(basename $0) my-private.key \"Julius Orange\" orange-new-cert-request.csr\n\
+"
+}
+
+if [ -z "$privkey" -o -z "$subject" -o -z "$csrfile" -o ! -f "$privkey" ]; then
+ print_instructions
+ echo -e "\nThere was a missing parameter or the private key file did not exist."
+ exit 1
+fi
+
+openssl req -new -key "$privkey" -nodes -subj "$subject" -out "$csrfile"
+
+
--- /dev/null
+#!/bin/bash
+
+# useful functions that are somewhat general. these are not needed for
+# the basic setup of the test environment, but they are used by other
+# test and tool functions and also by specific tests.
+#
+# Author: Chris Koeritz
+
+# prints out a timestamp with the current date and time up to seconds.
+function date_string()
+{
+ date +"%Y_%b_%e_%H%M_%S" | sed -e 's/ //g'
+}
+
+# prints out the timestamp in a somewhat readable way.
+function readable_date_string()
+{
+ date +"%Y-%m-%d %T"
+}
+
+########
+# (donated by the feisty meow scripts at http://feistymeow.org)
+
+ function is_array() {
+ [[ "$(declare -p $1)" =~ "declare -a" ]]
+ }
+
+ function is_alias() {
+ alias $1 &>/dev/null
+ return $?
+ }
+
+ # displays the value of a variable in bash friendly format.
+ function var() {
+ HOLDIFS="$IFS"
+ IFS=""
+ while true; do
+ local varname="$1"; shift
+ if [ -z "$varname" ]; then
+ break
+ fi
+
+ if is_alias "$varname"; then
+#echo found $varname is alias
+ local tmpfile="$(mktemp $TMP/aliasout.XXXXXX)"
+ alias $varname | sed -e 's/.*=//' >$tmpfile
+ echo "alias $varname=$(cat $tmpfile)"
+ \rm $tmpfile
+ elif [ -z "${!varname}" ]; then
+ echo "$varname undefined"
+ else
+ if is_array "$varname"; then
+#echo found $varname is array var
+ local temparray
+ eval temparray="(\${$varname[@]})"
+ echo "$varname=(${temparray[@]})"
+#hmmm: would be nice to print above with elements enclosed in quotes, so that we can properly
+# see ones that have spaces in them.
+ else
+#echo found $varname is simple
+ echo "$varname=${!varname}"
+ fi
+ fi
+ done
+ IFS="$HOLDIFS"
+ }
+########
+
+# given a file name and a phrase to look for, this replaces all instances of
+# it with a piece of replacement text. note that slashes are okay in the two
+# text pieces, but we are using pound signs as the regular expression
+# separator; phrases including the octothorpe (#) will cause syntax errors.
+function replace_phrase_in_file()
+{
+ local file="$1"; shift
+ local phrase="$1"; shift
+ local replacement="$1"; shift
+ if [ -z "$file" -o -z "$phrase" ]; then
+ echo "replace_phrase_in_file: needs a filename, a phrase to replace, and the"
+ echo "text to replace that phrase with."
+ return 1
+ fi
+ sed -i -e "s%$phrase%$replacement%g" "$file"
+}
+
+# prints an error message (from parameters) and exits if the previous command failed.
+function check_if_failed()
+{
+ if [ $? -ne 0 ]; then
+ echo Step failed: $*
+ exit 1
+ fi
+}
+
+# takes a first parameter that is the name for a combined error and output log,
+# and then runs all the other parameters as a command.
+function logged_command()
+{
+ local my_output="$1"; shift
+# echo "logged_command args: $(printf -- "[%s] " "${@}")"
+ eval "${@}" >>"$my_output" 2>&1
+ local retval=$?
+ if [ $retval == 0 ]; then
+ # good so far, but check for more subtle ways of failing; if there is
+ # an occurrence of our fail message in the output, that also indicates
+ # the command did not succeed.
+ grep "\[FAILURE\]" $my_output
+ # we do not want to see that phrase in the log.
+ if [ $? != 0 ]; then
+ return 0 # fine exit, can ignore log.
+ fi
+ fi
+ if [[ ! "$my_output" =~ .*fuse_output.* ]]; then
+ # this was a failure, so we need to see the log.
+ # fuse errors currently don't count since they are multifarious.
+ cat "$my_output"
+ fi
+ return 1
+}
+
+# runs an arbitrary command. if the command fails, then the output from it is
+# displayed and an error code is returned. otherwise the output is discarded.
+function run_any_command()
+{
+ local my_output="$(mktemp $TEST_TEMP/test_logs/out_run_any_cmd_$(date_string).XXXXXX)"
+ logged_command "$my_output" "${@}"
+ local retval=$?
+ # make the external version of the log file available. if we're multiplexing users,
+ # this will be clobbered constantly, which is why we used unique names above.
+ \cp -f "$my_output" "$TESTKIT_OUTPUT_FILE"
+ # then add the logging results to our huge mongo log of all actions.
+ echo >> "$CONGLOMERATED_TESTKIT_OUTPUT"
+ echo "$(readable_date_string) log from: $my_output" >> "$CONGLOMERATED_TESTKIT_OUTPUT"
+ echo "=======" >> "$CONGLOMERATED_TESTKIT_OUTPUT"
+ cat "$my_output" >> "$CONGLOMERATED_TESTKIT_OUTPUT"
+ echo "=======" >> "$CONGLOMERATED_TESTKIT_OUTPUT"
+ # and now remove the tiny individual log file so we don't litter.
+ \rm -f "$my_output"
+ return $retval
+}
+
+
+# returns 0 if there should be no problems using fuse, or non-zero if this platform
+# does not currently support fuse.
+function fuse_supported()
+{
+ local retval=0
+ local platform="$(uname -a | tr A-Z a-z)"
+ if [[ $platform =~ .*darwin.* ]]; then retval=1; fi
+ if [[ $platform =~ .*cygwin.* ]]; then retval=1; fi
+ if [[ $platform =~ .*ming.* ]]; then retval=1; fi
+ return $retval
+}
+
+# returns 0 if there should be no problems creating links in the file system,
+# or non-zero if this platform does not support symbolic links.
+function links_supported()
+{
+ local retval=0
+ local platform="$(uname -a | tr A-Z a-z)"
+ if [[ $platform =~ .*cygwin.* ]]; then retval=1; fi
+ if [[ $platform =~ .*ming.* ]]; then retval=1; fi
+ return $retval
+}
+
+# Create a test directory (in the first parameter) with $2 subdirectories,
+# each with $3 subdirs, each with $4 files.
+fan_out_directories()
+{
+ local dir_name="$1"; shift
+ local top_count=$1; shift
+ local mid_count=$1; shift
+ local file_count=$1; shift
+ mkdir "$dir_name"
+ for (( di=0 ; di<$top_count ; di++ )); do
+ mkdir "$dir_name"/sub$di
+ for (( dj=0 ; dj<$mid_count ; dj++ )); do
+ mkdir "$dir_name"/sub$di/sub$dj
+ for (( dk=0 ; dk<$file_count ; dk++ )); do
+ echo "file $di$dj$dk" > "$dir_name"/sub$di/sub$dj/file$di$dj$dk
+ done
+ done
+ done
+}
+##############
+
+# copied from open source codebase at: http://feistymeow.org with permission of author (chris koeritz),
+# assigned apache ASL license for this usage.
+ # locates a process given a search pattern to match in the process list.
+ # supports a single command line flag style parameter of "-u USERNAME";
+ # if the -u flag is found, a username is expected afterwards, and only the
+ # processes of that user are considered.
+ function psfind() {
+ local -a patterns=("${@}")
+#echo ====
+#echo patterns list is: "${patterns[@]}"
+#echo ====
+
+ local user_flag
+ if [ "${patterns[0]}" == "-u" ]; then
+ user_flag="-u ${patterns[1]}"
+#echo "found a -u parm and user=${patterns[1]}"
+ # void the two elements with that user flag so we don't use them as patterns.
+ unset patterns[0] patterns[1]
+ else
+ # select all users.
+ user_flag="-e"
+ fi
+
+ local PID_DUMP="$(mktemp "$TMP/zz_pidlist.XXXXXX")"
+ local -a PIDS_SOUGHT
+
+ if [ "$OS" == "Windows_NT" ]; then
+ # gets cygwin's (god awful) ps to show windoze processes also.
+ local EXTRA_DOZER_FLAGS="-W"
+ # pattern to use for peeling off the process numbers.
+ local pid_finder_pattern='s/ *\([0-9][0-9]*\) *.*$/\1/p'
+
+ else
+ # flags which clean up the output on unixes, which apparently cygwin
+ # doesn't count as. their crappy specialized ps doesn't support this.
+ local EXTRA_UNIX_FLAGS="-o pid,args"
+ # pattern to use for peeling off the process numbers.
+ local pid_finder_pattern='s/^[[:space:]]*\([0-9][0-9]*\).*$/\1/p'
+ fi
+
+ /bin/ps $EXTRA_DOZER_FLAGS $EXTRA_UNIX_FLAGS $user_flag | tail -n +2 >$PID_DUMP
+#echo ====
+#echo got all this stuff in the pid dump file:
+#cat $PID_DUMP
+#echo ====
+
+ # search for the pattern the user wants to find, and just pluck the process
+ # ids out of the results.
+ local i
+ for i in "${patterns[@]}"; do
+ PIDS_SOUGHT+=($(cat $PID_DUMP \
+ | grep -i "$i" \
+ | sed -n -e "$pid_finder_pattern"))
+ done
+#echo ====
+#echo pids sought list became:
+#echo "${PIDS_SOUGHT[@]}"
+#echo ====
+
+ if [ ${#PIDS_SOUGHT[*]} -ne 0 ]; then
+ local PIDS_SOUGHT2=$(printf -- '%s\n' ${PIDS_SOUGHT[@]} | sort | uniq)
+ PIDS_SOUGHT=()
+ PIDS_SOUGHT=${PIDS_SOUGHT2[*]}
+ echo ${PIDS_SOUGHT[*]}
+ fi
+ /bin/rm $PID_DUMP
+ }
+
+
+#######
+
+# tests the supposed fuse mount that is passed in as the first parameter.
+function test_fuse_mount()
+{
+ local mount_point="$1"; shift
+ local trunc_mount="$(basename "$(dirname $mount_point)").$(basename "$mount_point")"
+
+ checkMount="$(mount)"
+#echo "checkmount is: '$checkMount'"
+#echo "mount point seeking is: '$trunc_mount'"
+ local retval=1
+ if [[ "$checkMount" =~ .*$trunc_mount.* ]]; then
+#echo found the mount in the list
+ retval=0
+ fi
+ if [ $retval -ne 0 ]; then
+ echo "Finding mount point '$trunc_mount' failed."
+ return 1
+ fi
+ ls -l "$mount_point" &>/dev/null
+ return $?
+}
+
+#######
+
+# also borrowed from feisty meow scripts... by consent of author (chris koeritz).
+
+ # is this the Mac OS X operating system?
+ function isMacOSX()
+ {
+ if [ ! -z "$(echo $OSTYPE | grep -i darwin)" ]; then
+ true
+ else
+ false
+ fi
+ }
+
+ # switches from a /X/path form to an X:/ form. this also processes cygwin paths.
+ function unix_to_dos_path() {
+ # we usually remove dos slashes in favor of forward slashes.
+ if [ ! -z "$SERIOUS_SLASH_TREATMENT" ]; then
+ # unless this flag is set, in which case we force dos slashes.
+ echo "$1" | sed -e 's/\\/\//g' | sed -e 's/\/cygdrive//' | sed -e 's/\/\([a-zA-Z]\)\/\(.*\)/\1:\/\2/' | sed -e 's/\//\\/g'
+ else
+ echo "$1" | sed -e 's/\\/\//g' | sed -e 's/\/cygdrive//' | sed -e 's/\/\([a-zA-Z]\)\/\(.*\)/\1:\/\2/'
+ fi
+ }
+
+ # switches from an X:/ form to an /X/path form.
+ function dos_to_unix_path() {
+ # we always remove dos slashes in favor of forward slashes.
+ echo "$1" | sed -e 's/\\/\//g' | sed -e 's/\([a-zA-Z]\):\/\(.*\)/\/\1\/\2/'
+ }
+
+#######
+
--- /dev/null
+#!/bin/bash
+
+# Processes the XSEDE tools config file to turn variables listed in the
+# file into exported variables in the environment.
+#
+# Author: Chris Koeritz
+
+##############
+
+# this processes the single file of input parameters at the test root and
+# turns it into a collection of environment variables. we then load all those
+# variables into the current environment. we will also automatically fill in
+# some important variables here that we'll use later. in some cases, we will
+# use the existing value if the variable is already set.
+define_and_export_variables()
+{
+ if [ -z "$TESTKIT_SENTINEL" ]; then echo Please run prepare_tools.sh before testing.; return 3; fi
+
+ # create our output folder so we can store logs and temporaries.
+ mkdir -p "$TEST_TEMP" &>/dev/null
+
+ # start writing the environment file.
+ echo > $TEST_TEMP/env_file
+
+ # turn each useful line in input file into an environment variable declaration.
+ while read line; do
+ # match lines that are comments or blank.
+ echo "$line" | grep -e '^[#;]' -e '^[ ]*$' &>/dev/null
+ # only export non-useless lines.
+ if [ $? != 0 ]; then
+ echo "$line" | grep '[a-z0-9A-Z]=(' &>/dev/null
+ if [ $? == 0 ]; then
+ # it's an array variable so don't try to export it or bash loses it for us.
+ echo $line >> $TEST_TEMP/env_file
+ else
+ echo "export" $line >> $TEST_TEMP/env_file
+ fi
+ fi
+ done < "$TESTKIT_CFG_FILE"
+
+ # now run the environment file to add those settings to our environment.
+ chmod +x $TEST_TEMP/env_file
+ source $TEST_TEMP/env_file &>/dev/null
+}
+
--- /dev/null
+#!/bin/bash
+
+# Creates an archive from the test scripts.
+#
+# Author: Chris Koeritz
+
+export WORKDIR="$( \cd "$(\dirname "$0")" && \pwd )" # obtain the script's working directory.
+cd "$WORKDIR"
+export SHOWED_SETTINGS_ALREADY=true
+if [ -z "$TESTKIT_SENTINEL" ]; then
+ source ../prepare_tools.sh ../prepare_tools.sh
+fi
+source "$TESTKIT_ROOT/library/establish_environment.sh"
+
+pushd "$TESTKIT_ROOT/.." &>/dev/null
+justdir="$(basename "$TESTKIT_ROOT")"
+
+date_string="$(date +"%Y_%b_%e_%H%M" | sed -e 's/ //g')"
+
+EXCLUDES=(--exclude=".svn" --exclude="docs" --exclude="random*.dat" --exclude=gzip-1.2.4 --exclude=iozone3_397 --exclude="mount-*" --exclude="releases" --exclude="passwords.txt" --exclude="saved_deployment_info.txt" --exclude="generated_certs" --exclude="gridwide_certs" --exclude="testkit.config*" --exclude="inputfile.txt*")
+
+tar -czf "$HOME/testkit_${date_string}.tar.gz" "$justdir" ${EXCLUDES[*]}
+
+popd &>/dev/null
+
--- /dev/null
+#!/bin/bash
+
+# an unfinished idea for how to manage identifiers that are randomly
+# generated but are also uniquely identified for later use.
+#
+# Author: Chris Koeritz
+
+export RANDOM_IDS=()
+
+# given a name for a randomly assigned number, this will generate a
+# new random value and add it to the RANDOM_IDS list. it is an error
+# to try to change an existing random id, because the id may already have
+# been used in file generation and so forth.
+function setup_random_id()
+{
+ name="$1"; shift
+ if [ ! -z "${RANDOM_IDS[$name]}" ]; then
+ echo "FAILURE: trying to reassign already generated random id for '$name'"
+ return 1
+ fi
+ new_id=$RANDOM-$RANDOM-$RANDOM
+ RANDOM_IDS[$name]=$new_id
+ return 0
+}
+
+# returns the random value assigned under the name. it is an error
+# to request one that does not exist yet; this implies the test has
+# not properly configured the random ids it will use.
+function get_random_id()
+{
+ name="$1"; shift
+ if [ -z "${RANDOM_IDS[$name]}" ]; then
+ echo "FAILURE-to-find-$name"
+ return 1
+ fi
+ echo "${RANDOM_IDS[$name]}"
+ return 0
+}
+
+##
+## # test suite
+## setup_random_id "george"
+## if [ $? -ne 0 ]; then echo TEST failed to set george; fi
+## setup_random_id "lucy"
+## if [ $? -ne 0 ]; then echo TEST failed to set lucy; fi
+## echo "lucy's id is: $(get_random_id lucy)"
+## lucy_id=$(get_random_id lucy)=
+## if [ $? -ne 0 ]; then echo TEST failed to get lucy; fi
+## echo "george's id is: $(get_random_id george)"
+## george_id=$(get_random_id george)
+## if [ $? -ne 0 ]; then echo TEST failed to get george; fi
+##
+## setup_random_id "george" &>/dev/null
+## if [ $? -eq 0 ]; then echo TEST failed to trap george being reset; fi
+## setup_random_id "lucy" &>/dev/null
+## if [ $? -eq 0 ]; then echo TEST failed to trap lucy being reset; fi
+## get_random_id tony
+## if [ $? -eq 0 ]; then echo TEST failed to trap non-existent id request; fi
+##
+
+
--- /dev/null
+#!/bin/bash
+
+# assorted useful ways of running executables.
+#
+# Author: Chris Koeritz
+
+##############
+
+# similar to timed_grid, but for arbitrary commands, and fits in with the timing
+# calculators.
+function timed_command()
+{
+ echo "[started timer $(readable_date_string)]"
+ $(\which time) -p -o "$TESTKIT_TIMING_FILE" $*
+ local retval=$?
+ echo "[stopped timer $(readable_date_string)]"
+ return $retval
+}
+
+##############
+
+# uses the timing file to determine how long the last activity took in
+# seconds and then prints out the value.
+calculateTimeTaken()
+{
+ head -n 1 $TESTKIT_TIMING_FILE | awk '{print $2}'
+}
+
+# calculates the bandwidth for a transfer. this takes the elapsed time as
+# the first parameter and the size transferred as second parameter.
+calculateBandwidth()
+{
+ local real_time="$1"; shift
+ local size="$1"; shift
+ # drop down to kilobytes rather than bytes.
+ size=$(echo $size / 1024 | $(\which bc) -l)
+
+#echo "time=$real_time size=$size"
+
+ local total_sec="$(echo "$real_time" | awk -Fm '{print $1}'| awk -F. '{print $1}' )"
+ local bandwidth=""
+ if [[ "$total_sec" =~ .*exited.* ]]; then
+ echo "FAILURE: Test run failed in some way; setting total seconds to very large number."
+ total_sec="99999999"
+ fi
+ if [ $total_sec -eq 0 ]; then
+ # fake it when we get a math issue where something took less than a second.
+ total_sec=1
+ fi
+ bandwidth="$(echo "scale=3; $size / $total_sec" | $(\which bc) -l)"
+ echo "$bandwidth"
+}
+
+# a wrapper for calculateBandwidth that prints out a nicer form of the
+# bandwidth. it requires the same parameters as calculateBandwidth.
+showBandwidth()
+{
+ echo " Bandwidth $(calculateBandwidth $*) kbps"
+}
+
+##############
+
+# connects to a host as a particular user and executes a command there.
+function run_command_remotely()
+{
+ if [ $# -lt 3 ]; then
+ echo This function connects to a remote host to run a command. It requires
+ echo at least three parameters: the host to connect to, the user name on
+ echo that host which supports passwordless logins, and the command to run.
+ echo The command to run is the third through Nth parameters.
+ fi
+ host="$1"; shift
+ username="$1"; shift
+ # run our expecter to feed commands in, and the last one needs to be exit so we
+ # return to the original host.
+ OUTFILE="$(mktemp $TMP/ssh_run.XXXXXX)"
+ expect $TESTKIT_ROOT/library/ssh_expecter.tcl "$username" "" "$host" "${@}" >"$OUTFILE"
+ reval=$?
+ # make sure we didn't experience a failure on the other side.
+ grep "YO_FAILURE" $OUTFILE &>/dev/null
+ if [ $? -eq 0 ]; then
+ echo Detected failure running command via ssh.
+ ((retval++))
+ fi
+
+#debugging
+echo ========== output from command ============
+cat "$OUTFILE"
+echo ===========================================
+
+ rm "$OUTFILE"
+ return $retval
+}
+
+#testing
+#run_command_remotely serene fred "ls /"
+
+##############
+
--- /dev/null
+#!/bin/bash
+
+# Tears down the demo users previously set up for multi-user testing.
+#
+# Author: Chris Koeritz
+
+export WORKDIR="$( \cd "$(\dirname "$0")" && \pwd )" # obtain the script's working directory.
+cd "$WORKDIR"
+
+if [ -z "$TESTKIT_SENTINEL" ]; then echo Please run prepare_tools.sh before testing.; exit 3; fi
+source "$TESTKIT_ROOT/library/establish_environment.sh"
+
+progname="$(basename "$0")"
+
+if [ $# -lt 1 ]; then
+ echo "$progname: This script needs a single parameter, which is the container"
+ echo "path to use for the authentication (e.g. $STS_LOC)"
+ exit 3
+fi
+
+ADMIN_CONTAINER=$1; shift
+echo "container location is $ADMIN_CONTAINER "
+
+# we test for ten users currently.
+user_count=10
+
+# login the right power user that can delete other user entries.
+testLoginAsAdmin()
+{
+ if [ -z "$NON_INTERACTIVE" ]; then
+ login_a_user admin
+ fi
+}
+
+# now that we're logged in appropriately, delete our corresponding set of users.
+testDeleteUsers()
+{
+ local x
+ for (( x=0; x < ${#MULTI_USER_LIST[*]}; x++ )); do
+ username="${MULTI_USER_LIST[$x]}"
+ echo "Whacking user '$username'..."
+ passwd="${MULTI_PASSWORD_LIST[$x]}"
+ # now do the heavy lifting to get that user set up.
+ silent_grid script "local:'$TESTKIT_ROOT/library/delete_one_user.xml'" "$CONTAINERPATH" "$(basename $username)" "$username" "$SUBMIT_GROUP"
+ assertEquals "Should delete user '$username' successfully" 0 $?
+ done
+}
+
+# make sure we don't leave them logged in as an administrator.
+testLogoutAgain()
+{
+ if [ -z "$NON_INTERACTIVE" ]; then
+ silent_grid logout --all
+ assertEquals "Final logout of the grid" 0 $?
+ fi
+}
+
+testLoginNormalUser()
+{
+ if [ -z "$NON_INTERACTIVE" ]; then
+ login_a_user normal
+ fi
+}
+
+# load and run shUnit2
+source "$SHUNIT_DIR/shunit2"
+
--- /dev/null
+include variables.def
+
+PROJECT = testkit
+FIRST_TARGETS += run_testkit_check
+
+include rules.def
+
+run_testkit_check:
+ $(HIDESH) -c 'bash test_driver.sh'
--- /dev/null
+#!/bin/bash
+
+# Author: Chris Koeritz
+#
+# Note:
+# We do not want to "exit" from this file at all (nor from any file that it
+# invokes either), since this script is intended for use by the bash 'source'
+# command. If we exit, that will exit from the calling shell as well, which
+# torpedoes whatever one was doing in that shell.
+# There is a variable below called BADNESS that indicates when errors
+# occurred during processing, and if it's not empty at the end of the script
+# then we will consider this a failed run, and we will not set the test's
+# sentinel variable which other scripts check to see if the environment
+# was loaded properly.
+
+# make sure whether they have defined the top-level location for us.
+if [ ! -z "$1" ]; then
+ # first attempt is to use the first parameter, if one is provided. this should
+ # be an absolute path reference to this very file, from which we can deduce the
+ # starting directory.
+ GRITTY_TESTING_TOP_LEVEL="$( cd "$( dirname "$1" )" && \pwd )"
+ # for this case, they also don't need to be stranded in a new shell, because we
+ # assume they have sourced this file instead of bashing it.
+ NO_SUBSHELL=true
+fi
+if [ -z "$GRITTY_TESTING_TOP_LEVEL" ]; then
+ # otherwise, if they didn't explicitly set the top-level directory, we will
+ # do it using some unix trickery.
+ if [[ "$0" =~ .*bash ]]; then
+ echo "----"
+ echo "This script was not launched properly with 'source'. The script should"
+ echo "be started like this: source prepare_tools.sh prepare_tools.sh"
+ echo "The double entry is required for bash's source command to find the path."
+ BADNESS=true
+ fi
+ GRITTY_TESTING_TOP_LEVEL="$( cd "$( dirname "$0" 2>/dev/null )" && \pwd )"
+else
+ # we assume they are managing this script more closely and do not need (or want) a bash sub-shell.
+ NO_SUBSHELL=true
+fi
+GRITTY_TESTING_TOP_LEVEL="$(echo "$GRITTY_TESTING_TOP_LEVEL" | sed -e 's/\/cygdrive\/\(.\)/\1:/')"
+
+# the top-level directory for tests, i.e. the root of testing hierarchy.
+export TESTKIT_ROOT="$GRITTY_TESTING_TOP_LEVEL"
+
+# a bit of a dance to not pull in code too early...
+export TESTKIT_BOOTSTRAPPING=true
+source "$TESTKIT_ROOT/library/establish_environment.sh"
+unset TESTKIT_BOOTSTRAPPING
+# done with dancing, ready to pull in anything else from testkit.
+
+#source "$TESTKIT_ROOT/library/helper_methods.sh"
+
+# where the shunit library resides.
+export SHUNIT_DIR="$TESTKIT_ROOT/shunit"
+
+# establish the TMP variable if it's not already set.
+export TMP
+if [ -z "$TMP" ]; then
+ TMP="$HOME/tmp"
+ if [ ! -d "$TMP" ]; then mkdir "$TMP"; fi
+fi
+TMP="$(echo "$TMP" | sed -e 's/\/cygdrive\/\(.\)/\1:/')"
+if [ ! -d "$TMP" ]; then
+ echo "The TMP directory was set as $TMP but cannot be created or found."
+ echo "If there is a file at that location, please move or delete it."
+ exit 1
+fi
+
+##############
+
+# commonly used environment variables...
+
+# TEST_TEMP is a folder where we can generate a collection of junk files.
+export TEST_TEMP="$TMP/testkit_logs_${USER}"
+if [ ! -d "$TEST_TEMP" ]; then
+ mkdir -p "$TEST_TEMP"
+fi
+
+# this variable points to the last output from a grid command.
+export TESTKIT_OUTPUT_FILE="$TEST_TEMP/testkit_output.log"
+export TESTKIT_TIMING_FILE="$TEST_TEMP/testkit_times.log"
+export CONGLOMERATED_TESTKIT_OUTPUT="$TEST_TEMP/full_testkit_output.log"
+
+##############
+
+# uncomment this to enable extra output.
+export DEBUGGING=true
+
+##############
+
+# turn this printout off in non-debugging mode or if the terminal setting
+# seems to indicate that we're running in a login environment (where any
+# echoing to standard out can screw up scp and sftp for that account).
+if [ ! -z "$DEBUGGING" -a -z "$SHOWED_SETTINGS_ALREADY" \
+ -a -z "$BADNESS" -a -z "$SILENT_RUNNING" -a "${TERM}" != "dumb" \
+ -a -z "$PBS_ENVIRONMENT" ]; then
+ echo "==========================================================="
+ echo "Testkit environment loaded."
+ var TESTKIT_ROOT TESTKIT_CFG_FILE TMP TEST_TEMP
+ echo "==========================================================="
+fi
+
+if [ ! -z "$(uname -a | grep -i darwin)" -a -z "$BADNESS" ]; then
+ # add in the mac binaries if this is darwin.
+ export PATH="$TESTKIT_ROOT/bin/macosx:$PATH"
+else
+ # no change, but we want to make sure sub-shells inherit the path.
+ export PATH="$PATH"
+fi
+
+if [ -z "$NO_SUBSHELL" -a -z "$BADNESS" ]; then
+ # at this point we go into a new interactive shell, so as to ensure the
+ # environment parameters stay right.
+ # the self-location code at the top doesn't work properly if this file is
+ # sourced into a current environment.
+ bash
+fi
+
+if [ ! -z "$BADNESS" ]; then
+ echo
+ echo "----"
+ echo "There were errors in setting up the xsede tests--see above messages."
+ unset TESTKIT_SENTINEL TESTKIT_ROOT GRITTY_TESTING_TOP_LEVEL SHUNIT_DIR BADNESS
+else
+ # if things were successful, we can finally set our indicator for the scripts to check.
+ export TESTKIT_SENTINEL=initialized
+fi
+
--- /dev/null
+
+Testkit is a platform agnostic set of tools for running unit tests.
+
+Testkit relies on the ShUnit scripts to run the tests.
+We give our thanks to ShUnit.
+
+NOTE: this product is in active development and may not be suitable for
+any purpose yet.
+Please check back...
--- /dev/null
+#!/bin/bash
+
+# An example of using shunit2.
+#
+# Author: Chris Koeritz
+# license gnu gpl v3
+
+export THISDIR="$( \cd "$(\dirname "$0")" && /bin/pwd )" # obtain the script's working directory.
+if [[ ! "$0" =~ ^/.* ]]; then
+ # re-run the script with an absolute path if it didn't start out that way; otherwise,
+ # shunit is not happy with finding the script.
+ exec "$THISDIR/$(basename $0)" $*
+fi
+cd $THISDIR
+
+oneTimeSetUp()
+{
+ echo "into oneTimeSetUp."
+}
+
+testOneThing()
+{
+ echo "got to primary test case."
+ zero=0
+ assertEquals "zero should be equal to 0" 0 $zero
+ echo "passed tautological test."
+ sleep_time=83
+ echo "$(date): now sleeping for $sleep_time seconds."
+ sleep $sleep_time
+ echo "$(date): woke up."
+}
+
+oneTimeTearDown()
+{
+ echo "into oneTimeTearDown."
+}
+
+# load and run shUnit2
+source $SHUNIT_PATH/shunit2
+
--- /dev/null
+#! /bin/sh
+# $Id: shunit2 335 2011-05-01 20:10:33Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+#
+# shUnit2 -- Unit testing framework for Unix shell scripts.
+# http://code.google.com/p/shunit2/
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is
+# based on the popular JUnit unit testing framework for Java.
+
+# return if shunit already loaded
+[ -n "${SHUNIT_VERSION:-}" ] && exit 0
+
+SHUNIT_VERSION='2.1.6'
+
+SHUNIT_TRUE=0
+SHUNIT_FALSE=1
+SHUNIT_ERROR=2
+
+# enable strict mode by default
+SHUNIT_STRICT=${SHUNIT_STRICT:-${SHUNIT_TRUE}}
+
+_shunit_warn() { echo "shunit2:WARN $@" >&2; }
+_shunit_error() { echo "shunit2:ERROR $@" >&2; }
+_shunit_fatal() { echo "shunit2:FATAL $@" >&2; exit ${SHUNIT_ERROR}; }
+
+# specific shell checks
+if [ -n "${ZSH_VERSION:-}" ]; then
+ setopt |grep "^shwordsplit$" >/dev/null
+ if [ $? -ne ${SHUNIT_TRUE} ]; then
+ _shunit_fatal 'zsh shwordsplit option is required for proper operation'
+ fi
+ if [ -z "${SHUNIT_PARENT:-}" ]; then
+ _shunit_fatal "zsh does not pass \$0 through properly. please declare \
+\"SHUNIT_PARENT=\$0\" before calling shUnit2"
+ fi
+fi
+
+#
+# constants
+#
+
+__SHUNIT_ASSERT_MSG_PREFIX='ASSERT:'
+__SHUNIT_MODE_SOURCED='sourced'
+__SHUNIT_MODE_STANDALONE='standalone'
+__SHUNIT_PARENT=${SHUNIT_PARENT:-$0}
+
+# set the constants readonly
+shunit_constants_=`set |grep '^__SHUNIT_' |cut -d= -f1`
+echo "${shunit_constants_}" |grep '^Binary file' >/dev/null && \
+ shunit_constants_=`set |grep -a '^__SHUNIT_' |cut -d= -f1`
+for shunit_constant_ in ${shunit_constants_}; do
+ shunit_ro_opts_=''
+ case ${ZSH_VERSION:-} in
+ '') ;; # this isn't zsh
+ [123].*) ;; # early versions (1.x, 2.x, 3.x)
+ *) shunit_ro_opts_='-g' ;; # all later versions. declare readonly globally
+ esac
+ readonly ${shunit_ro_opts_} ${shunit_constant_}
+done
+unset shunit_constant_ shunit_constants_ shunit_ro_opts_
+
+# variables
+__shunit_lineno='' # line number of executed test
+__shunit_mode=${__SHUNIT_MODE_SOURCED} # operating mode
+__shunit_reportGenerated=${SHUNIT_FALSE} # is report generated
+__shunit_script='' # filename of unittest script (standalone mode)
+__shunit_skip=${SHUNIT_FALSE} # is skipping enabled
+__shunit_suite='' # suite of tests to execute
+
+# counts of tests
+__shunit_testSuccess=${SHUNIT_TRUE}
+__shunit_testsTotal=0
+__shunit_testsPassed=0
+__shunit_testsFailed=0
+
+# counts of asserts
+__shunit_assertsTotal=0
+__shunit_assertsPassed=0
+__shunit_assertsFailed=0
+__shunit_assertsSkipped=0
+
+# macros
+_SHUNIT_LINENO_='eval __shunit_lineno=""; if [ "${1:-}" = "--lineno" ]; then [ -n "$2" ] && __shunit_lineno="[$2] "; shift 2; fi'
+
+#-----------------------------------------------------------------------------
+# assert functions
+#
+
+# Assert that two values are equal to one another.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertEquals()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertEquals() requires two or three arguments; $# given"
+ _shunit_error "1: ${1:+$1} 2: ${2:+$2} 3: ${3:+$3}${4:+ 4: $4}"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ local assertion_name=""
+ if [ $# -eq 3 ]; then
+ assertion_name="$1"
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if [ "${shunit_expected_}" = "${shunit_actual_}" ]; then
+ if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
+ echo " OKAY: $assertion_name"
+ fi
+ _shunit_assertPass
+ else
+ failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}"
+ shunit_return=${SHUNIT_FALSE}
+ fi
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${shunit_return}
+}
+_ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"'
+
+# Assert that two values are not equal to one another.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotEquals()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertNotEquals() requires two or three arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ local assertion_name=""
+ if [ $# -eq 3 ]; then
+ assertion_name="$1"
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ shunit_return=${SHUNIT_TRUE}
+ if [ "${shunit_expected_}" != "${shunit_actual_}" ]; then
+ if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
+ echo " OKAY: $assertion_name"
+ fi
+ _shunit_assertPass
+ else
+ failSame "${shunit_message_}" "$@"
+ shunit_return=${SHUNIT_FALSE}
+ fi
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${shunit_return}
+}
+_ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"'
+
+# Assert that a value is null (i.e. an empty string)
+#
+# Args:
+# message: string: failure message [optional]
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNull()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "assertNull() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ assertTrue "${shunit_message_}" "[ -z '$1' ]"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+_ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"'
+
+# Assert that a value is not null (i.e. a non-empty string)
+#
+# Args:
+# message: string: failure message [optional]
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotNull()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -gt 2 ]; then # allowing 0 arguments as $1 might actually be null
+ _shunit_error "assertNotNull() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 2 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_actual_=`_shunit_escapeCharactersInString "${1:-}"`
+ test -n "${shunit_actual_}"
+ assertTrue "${shunit_message_}" $?
+ shunit_return=$?
+
+ unset shunit_actual_ shunit_message_
+ return ${shunit_return}
+}
+_ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"'
+
+# Assert that two values are the same (i.e. equal to one another).
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertSame()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertSame() requires two or three arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ assertEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+_ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"'
+
+# Assert that two values are not the same (i.e. not equal to one another).
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertNotSame()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "assertNotSame() requires two or three arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_:-}$1"
+ shift
+ fi
+ assertNotEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+_ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"'
+
+# Assert that a value or shell test condition is true.
+#
+# In shell, a value of 0 is true and a non-zero value is false. Any integer
+# value passed can thereby be tested.
+#
+# Shell supports much more complicated tests though, and a means to support
+# them was needed. As such, this function tests that conditions are true or
+# false through evaluation rather than just looking for a true or false.
+#
+# The following test will succeed:
+# assertTrue 0
+# assertTrue "[ 34 -gt 23 ]"
+# The folloing test will fail with a message:
+# assertTrue 123
+# assertTrue "test failed" "[ -r '/non/existant/file' ]"
+#
+# Args:
+# message: string: failure message [optional]
+# condition: string: integer value or shell conditional statement
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertTrue()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -gt 2 ]; then
+ _shunit_error "assertTrue() takes one two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ local assertion_name=""
+ if [ $# -eq 2 ]; then
+ assertion_name="$1"
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_condition_=$1
+
+ # see if condition is an integer, i.e. a return value
+ shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
+ shunit_return=${SHUNIT_TRUE}
+ if [ -z "${shunit_condition_}" ]; then
+ # null condition
+ shunit_return=${SHUNIT_FALSE}
+ elif [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
+ then
+ # possible return value. treating 0 as true, and non-zero as false.
+ [ ${shunit_condition_} -ne 0 ] && shunit_return=${SHUNIT_FALSE}
+ else
+ # (hopefully) a condition
+ ( eval ${shunit_condition_} ) >/dev/null 2>&1
+ [ $? -ne 0 ] && shunit_return=${SHUNIT_FALSE}
+ fi
+
+ # record the test
+ if [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then
+ if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
+ echo " OKAY: $assertion_name"
+ fi
+ _shunit_assertPass
+ else
+ _shunit_assertFail "${shunit_message_}"
+ fi
+
+ unset shunit_message_ shunit_condition_ shunit_match_
+ return ${shunit_return}
+}
+_ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"'
+
+# Assert that a value or shell test condition is false.
+#
+# In shell, a value of 0 is true and a non-zero value is false. Any integer
+# value passed can thereby be tested.
+#
+# Shell supports much more complicated tests though, and a means to support
+# them was needed. As such, this function tests that conditions are true or
+# false through evaluation rather than just looking for a true or false.
+#
+# The following test will succeed:
+# assertFalse 1
+# assertFalse "[ 'apples' = 'oranges' ]"
+# The folloing test will fail with a message:
+# assertFalse 0
+# assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]"
+#
+# Args:
+# message: string: failure message [optional]
+# condition: string: integer value or shell conditional statement
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+assertFalse()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 1 -o $# -gt 2 ]; then
+ _shunit_error "assertFalse() quires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ local assertion_name=""
+ if [ $# -eq 2 ]; then
+ assertion_name="$1"
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_condition_=$1
+
+ # see if condition is an integer, i.e. a return value
+ shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'`
+ shunit_return=${SHUNIT_TRUE}
+ if [ -z "${shunit_condition_}" ]; then
+ # null condition
+ shunit_return=${SHUNIT_FALSE}
+ elif [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ]
+ then
+ # possible return value. treating 0 as true, and non-zero as false.
+ [ ${shunit_condition_} -eq 0 ] && shunit_return=${SHUNIT_FALSE}
+ else
+ # (hopefully) a condition
+ ( eval ${shunit_condition_} ) >/dev/null 2>&1
+ [ $? -eq 0 ] && shunit_return=${SHUNIT_FALSE}
+ fi
+
+ # record the test
+ if [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then
+ if [ ! -z "$DEBUGGING" -a ! -z "$assertion_name" ]; then
+ echo " OKAY: $assertion_name"
+ fi
+ _shunit_assertPass
+ else
+ _shunit_assertFail "${shunit_message_}"
+ fi
+
+ unset shunit_message_ shunit_condition_ shunit_match_
+ return ${shunit_return}
+}
+_ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"'
+
+#-----------------------------------------------------------------------------
+# failure functions
+#
+
+# Records a test failure.
+#
+# Args:
+# message: string: failure message [optional]
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+fail()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -gt 1 ]; then
+ _shunit_error "fail() requires zero or one arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 1 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+
+ _shunit_assertFail "${shunit_message_}"
+
+ unset shunit_message_
+ return ${SHUNIT_FALSE}
+}
+_FAIL_='eval fail --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values were not equal.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failNotEquals()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failNotEquals() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ shunit_expected_=$1
+ shunit_actual_=$2
+
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>"
+
+ unset shunit_message_ shunit_expected_ shunit_actual_
+ return ${SHUNIT_FALSE}
+}
+_FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values should have been the same.
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failSame()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failSame() requires two or three arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+
+ _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same"
+
+ unset shunit_message_
+ return ${SHUNIT_FALSE}
+}
+_FAIL_SAME_='eval failSame --lineno "${LINENO:-}"'
+
+# Records a test failure, stating two values were not equal.
+#
+# This is functionally equivalent to calling failNotEquals().
+#
+# Args:
+# message: string: failure message [optional]
+# expected: string: expected value
+# actual: string: actual value
+# Returns:
+# integer: success (TRUE/FALSE/ERROR constant)
+failNotSame()
+{
+ ${_SHUNIT_LINENO_}
+ if [ $# -lt 2 -o $# -gt 3 ]; then
+ _shunit_error "failNotEquals() requires one or two arguments; $# given"
+ return ${SHUNIT_ERROR}
+ fi
+ _shunit_shouldSkip && return ${SHUNIT_TRUE}
+
+ shunit_message_=${__shunit_lineno}
+ if [ $# -eq 3 ]; then
+ shunit_message_="${shunit_message_}$1"
+ shift
+ fi
+ failNotEquals "${shunit_message_}" "$1" "$2"
+ shunit_return=$?
+
+ unset shunit_message_
+ return ${shunit_return}
+}
+_FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"'
+
+#-----------------------------------------------------------------------------
+# skipping functions
+#
+
+# Force remaining assert and fail functions to be "skipped".
+#
+# This function forces the remaining assert and fail functions to be "skipped",
+# i.e. they will have no effect. Each function skipped will be recorded so that
+# the total of asserts and fails will not be altered.
+#
+# Args:
+# None
+startSkipping()
+{
+ __shunit_skip=${SHUNIT_TRUE}
+}
+
+# Resume the normal recording behavior of assert and fail calls.
+#
+# Args:
+# None
+endSkipping()
+{
+ __shunit_skip=${SHUNIT_FALSE}
+}
+
+# Returns the state of assert and fail call skipping.
+#
+# Args:
+# None
+# Returns:
+# boolean: (TRUE/FALSE constant)
+isSkipping()
+{
+ return ${__shunit_skip}
+}
+
+#-----------------------------------------------------------------------------
+# suite functions
+#
+
+# Stub. This function should contains all unit test calls to be made.
+#
+# DEPRECATED (as of 2.1.0)
+#
+# This function can be optionally overridden by the user in their test suite.
+#
+# If this function exists, it will be called when shunit2 is sourced. If it
+# does not exist, shunit2 will search the parent script for all functions
+# beginning with the word 'test', and they will be added dynamically to the
+# test suite.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Adds a function name to the list of tests schedule for execution.
+#
+# This function should only be called from within the suite() function.
+#
+# Args:
+# function: string: name of a function to add to current unit test suite
+suite_addTest()
+{
+ shunit_func_=${1:-}
+
+ __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}"
+ __shunit_testsTotal=`expr ${__shunit_testsTotal} + 1`
+
+ unset shunit_func_
+}
+
+# Stub. This function will be called once before any tests are run.
+#
+# Common one-time environment preparation tasks shared by all tests can be
+# defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Stub. This function will be called once after all tests are finished.
+#
+# Common one-time environment cleanup tasks shared by all tests can be defined
+# here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+# Stub. This function will be called before each test is run.
+#
+# Common environment preparation tasks shared by all tests can be defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#setUp() { :; }
+
+# Note: see _shunit_mktempFunc() for actual implementation
+# Stub. This function will be called after each test is run.
+#
+# Common environment cleanup tasks shared by all tests can be defined here.
+#
+# This function should be overridden by the user in their unit test suite.
+# Note: see _shunit_mktempFunc() for actual implementation
+#
+# Args:
+# None
+#tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION
+
+#------------------------------------------------------------------------------
+# internal shUnit2 functions
+#
+
+# Create a temporary directory to store various run-time files in.
+#
+# This function is a cross-platform temporary directory creation tool. Not all
+# OSes have the mktemp function, so one is included here.
+#
+# Args:
+# None
+# Outputs:
+# string: the temporary directory that was created
+_shunit_mktempDir()
+{
+ # try the standard mktemp function
+ ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ) && return
+
+ # the standard mktemp didn't work. doing our own.
+ if [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then
+ _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 </dev/urandom \
+ |sed 's/^[^0-9a-f]*//'`
+ elif [ -n "${RANDOM:-}" ]; then
+ # $RANDOM works
+ _shunit_random_=${RANDOM}${RANDOM}${RANDOM}$$
+ else
+ # $RANDOM doesn't work
+ _shunit_date_=`date '+%Y%m%d%H%M%S'`
+ _shunit_random_=`expr ${_shunit_date_} / $$`
+ fi
+
+ _shunit_tmpDir_="${TMPDIR:-/tmp}/shunit.${_shunit_random_}"
+ ( umask 077 && mkdir "${_shunit_tmpDir_}" ) || \
+ _shunit_fatal 'could not create temporary directory! exiting'
+
+ echo ${_shunit_tmpDir_}
+ unset _shunit_date_ _shunit_random_ _shunit_tmpDir_
+}
+
+# This function is here to work around issues in Cygwin.
+#
+# Args:
+# None
+_shunit_mktempFunc()
+{
+ for _shunit_func_ in oneTimeSetUp oneTimeTearDown setUp tearDown suite noexec
+ do
+ _shunit_file_="${__shunit_tmpDir}/${_shunit_func_}"
+ cat <<EOF >"${_shunit_file_}"
+#! /bin/sh
+exit ${SHUNIT_TRUE}
+EOF
+ chmod +x "${_shunit_file_}"
+ done
+
+ unset _shunit_file_
+}
+
+# Final cleanup function to leave things as we found them.
+#
+# Besides removing the temporary directory, this function is in charge of the
+# final exit code of the unit test. The exit code is based on how the script
+# was ended (e.g. normal exit, or via Ctrl-C).
+#
+# Args:
+# name: string: name of the trap called (specified when trap defined)
+_shunit_cleanup()
+{
+ _shunit_name_=$1
+
+ case ${_shunit_name_} in
+ EXIT) _shunit_signal_=0 ;;
+ INT) _shunit_signal_=2 ;;
+ TERM) _shunit_signal_=15 ;;
+ *)
+ _shunit_warn "unrecognized trap value (${_shunit_name_})"
+ _shunit_signal_=0
+ ;;
+ esac
+
+ # do our work
+ rm -fr "${__shunit_tmpDir}"
+
+ # exit for all non-EXIT signals
+ if [ ${_shunit_name_} != 'EXIT' ]; then
+ _shunit_warn "trapped and now handling the (${_shunit_name_}) signal"
+ # disable EXIT trap
+ trap 0
+ # add 128 to signal and exit
+ exit `expr ${_shunit_signal_} + 128`
+ elif [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ] ; then
+ _shunit_assertFail 'Unknown failure encountered running a test'
+ _shunit_generateReport
+ exit ${SHUNIT_ERROR}
+ fi
+
+ unset _shunit_name_ _shunit_signal_
+}
+
+# The actual running of the tests happens here.
+#
+# Args:
+# None
+_shunit_execSuite()
+{
+ for _shunit_test_ in ${__shunit_suite}; do
+ __shunit_testSuccess=${SHUNIT_TRUE}
+
+ # disable skipping
+ endSkipping
+
+ # execute the per-test setup function
+ setUp
+
+ # execute the test
+ echo
+ echo
+ echo "-------------------------------------------------------"
+ echo "$(date): [${_shunit_test_}]"
+ eval ${_shunit_test_}
+
+ # execute the per-test tear-down function
+ tearDown
+
+ # update stats
+ if [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then
+ __shunit_testsPassed=`expr ${__shunit_testsPassed} + 1`
+ else
+ __shunit_testsFailed=`expr ${__shunit_testsFailed} + 1`
+ fi
+ done
+
+ unset _shunit_test_
+}
+
+# Generates the user friendly report with appropriate OKAY/FAILED message.
+#
+# Args:
+# None
+# Output:
+# string: the report of successful and failed tests, as well as totals.
+_shunit_generateReport()
+{
+ _shunit_ok_=${SHUNIT_TRUE}
+
+ # if no exit code was provided one, determine an appropriate one
+ [ ${__shunit_testsFailed} -gt 0 \
+ -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ] \
+ && _shunit_ok_=${SHUNIT_FALSE}
+
+ echo
+ if [ ${__shunit_testsTotal} -eq 1 ]; then
+ echo "$(date): Ran ${__shunit_testsTotal} test."
+ else
+ echo "$(date): Ran ${__shunit_testsTotal} tests."
+ fi
+
+ _shunit_failures_=''
+ _shunit_skipped_=''
+ [ ${__shunit_assertsFailed} -gt 0 ] \
+ && _shunit_failures_="failures=${__shunit_assertsFailed}"
+ [ ${__shunit_assertsSkipped} -gt 0 ] \
+ && _shunit_skipped_="skipped=${__shunit_assertsSkipped}"
+
+ if [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then
+ _shunit_msg_="$(basename $0) PASSED 100% OKAY"
+ [ -n "${_shunit_skipped_}" ] \
+ && _shunit_msg_="${_shunit_msg_} (${_shunit_skipped_})"
+ else
+ _shunit_msg_="$(basename $0) FAILED (${_shunit_failures_}"
+ [ -n "${_shunit_skipped_}" ] \
+ && _shunit_msg_="${_shunit_msg_},${_shunit_skipped_}"
+ _shunit_msg_="${_shunit_msg_})"
+ fi
+
+ if [ -z "$suite_end" ]; then
+ # make sure we don't get confused, since suite aborted early.
+ suite_end=$(date +%s)
+ fi
+ # we keep duration_s for later printing.
+ duration_s=$(($suite_end - $suite_start))
+ # calculate full minutes count based on seconds.
+ duration_m=$(($duration_s / 60))
+ # calculate how many hours that is.
+ duration_h=$(($duration_m / 60))
+ # fix the minutes since we chopped those hours out.
+ duration_m=$(($duration_m - $duration_h * 60))
+ if [ $duration_m -lt 10 ]; then duration_m="0$duration_m"; fi
+ if [ $duration_h -lt 10 ]; then duration_h="0$duration_h"; fi
+ echo "Test suite ran for $duration_s total seconds [$duration_h:$duration_m hh:mm]"
+ echo
+ echo "$(date): ${_shunit_msg_}"
+ __shunit_reportGenerated=${SHUNIT_TRUE}
+
+ unset _shunit_failures_ _shunit_msg_ _shunit_ok_ _shunit_skipped_
+}
+
+# Test for whether a function should be skipped.
+#
+# Args:
+# None
+# Returns:
+# boolean: whether the test should be skipped (TRUE/FALSE constant)
+_shunit_shouldSkip()
+{
+ [ ${__shunit_skip} -eq ${SHUNIT_FALSE} ] && return ${SHUNIT_FALSE}
+ _shunit_assertSkip
+}
+
+# Records a successful test.
+#
+# Args:
+# None
+_shunit_assertPass()
+{
+ __shunit_assertsPassed=`expr ${__shunit_assertsPassed} + 1`
+ __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
+}
+
+# Records a test failure.
+#
+# Args:
+# message: string: failure message to provide user
+_shunit_assertFail()
+{
+ _shunit_msg_=$1
+
+ __shunit_testSuccess=${SHUNIT_FALSE}
+ __shunit_assertsFailed=`expr ${__shunit_assertsFailed} + 1`
+ __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
+ echo "${__SHUNIT_ASSERT_MSG_PREFIX}${_shunit_msg_}"
+
+ unset _shunit_msg_
+}
+
+# Records a skipped test.
+#
+# Args:
+# None
+_shunit_assertSkip()
+{
+ __shunit_assertsSkipped=`expr ${__shunit_assertsSkipped} + 1`
+ __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1`
+}
+
+# Prepare a script filename for sourcing.
+#
+# Args:
+# script: string: path to a script to source
+# Returns:
+# string: filename prefixed with ./ (if necessary)
+_shunit_prepForSourcing()
+{
+ _shunit_script_=$1
+ case "${_shunit_script_}" in
+ /*|./*) echo "${_shunit_script_}" ;;
+ *) echo "./${_shunit_script_}" ;;
+ esac
+ unset _shunit_script_
+}
+
+# Escape a character in a string.
+#
+# Args:
+# c: string: unescaped character
+# s: string: to escape character in
+# Returns:
+# string: with escaped character(s)
+_shunit_escapeCharInStr()
+{
+ [ -n "$2" ] || return # no point in doing work on an empty string
+
+ # Note: using shorter variable names to prevent conflicts with
+ # _shunit_escapeCharactersInString().
+ _shunit_c_=$1
+ _shunit_s_=$2
+
+
+ # escape the character
+ echo ''${_shunit_s_}'' |sed 's/\'${_shunit_c_}'/\\\'${_shunit_c_}'/g'
+
+ unset _shunit_c_ _shunit_s_
+}
+
+# Escape a character in a string.
+#
+# Args:
+# str: string: to escape characters in
+# Returns:
+# string: with escaped character(s)
+_shunit_escapeCharactersInString()
+{
+ [ -n "$1" ] || return # no point in doing work on an empty string
+
+ _shunit_str_=$1
+
+ # Note: using longer variable names to prevent conflicts with
+ # _shunit_escapeCharInStr().
+ for _shunit_char_ in '"' '$' "'" '`'; do
+ _shunit_str_=`_shunit_escapeCharInStr "${_shunit_char_}" "${_shunit_str_}"`
+ done
+
+ echo "${_shunit_str_}"
+ unset _shunit_char_ _shunit_str_
+}
+
+# Extract list of functions to run tests against.
+#
+# Args:
+# script: string: name of script to extract functions from
+# Returns:
+# string: of function names
+_shunit_extractTestFunctions()
+{
+ _shunit_script_=$1
+
+ # extract the lines with test function names, strip of anything besides the
+ # function name, and output everything on a single line.
+ _shunit_regex_='^[ ]*(function )*test[A-Za-z0-9_]* *\(\)'
+ egrep "${_shunit_regex_}" "${_shunit_script_}" \
+ |sed 's/^[^A-Za-z0-9_]*//;s/^function //;s/\([A-Za-z0-9_]*\).*/\1/g' \
+ |xargs
+
+ unset _shunit_regex_ _shunit_script_
+}
+
+#------------------------------------------------------------------------------
+# main
+#
+
+# determine the operating mode
+if [ $# -eq 0 ]; then
+ __shunit_script=${__SHUNIT_PARENT}
+ __shunit_mode=${__SHUNIT_MODE_SOURCED}
+else
+ __shunit_script=$1
+ [ -r "${__shunit_script}" ] || \
+ _shunit_fatal "unable to read from ${__shunit_script}"
+ __shunit_mode=${__SHUNIT_MODE_STANDALONE}
+fi
+
+# create a temporary storage location
+__shunit_tmpDir=`_shunit_mktempDir`
+
+# provide a public temporary directory for unit test scripts
+# TODO(kward): document this
+SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp"
+mkdir "${SHUNIT_TMPDIR}"
+
+# setup traps to clean up after ourselves
+trap '_shunit_cleanup EXIT' 0
+trap '_shunit_cleanup INT' 2
+trap '_shunit_cleanup TERM' 15
+
+# create phantom functions to work around issues with Cygwin
+_shunit_mktempFunc
+PATH="${__shunit_tmpDir}:${PATH}"
+
+# make sure phantom functions are executable. this will bite if /tmp (or the
+# current $TMPDIR) points to a path on a partition that was mounted with the
+# 'noexec' option. the noexec command was created with _shunit_mktempFunc().
+noexec 2>/dev/null || _shunit_fatal \
+ 'please declare TMPDIR with path on partition with exec permission'
+
+# we must manually source the tests in standalone mode
+if [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then
+ . "`_shunit_prepForSourcing \"${__shunit_script}\"`"
+fi
+
+# record when the tests started running.
+suite_start=$(date +%s)
+
+# execute the oneTimeSetUp function (if it exists)
+oneTimeSetUp
+
+# execute the suite function defined in the parent test script
+# deprecated as of 2.1.0
+suite
+
+# if no suite function was defined, dynamically build a list of functions
+if [ -z "${__shunit_suite}" ]; then
+ shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"`
+ for shunit_func_ in ${shunit_funcs_}; do
+ suite_addTest ${shunit_func_}
+ done
+fi
+unset shunit_func_ shunit_funcs_
+
+# execute the tests
+_shunit_execSuite
+
+# execute the oneTimeTearDown function (if it exists)
+oneTimeTearDown
+
+suite_end=$(date +%s)
+
+# generate the report
+_shunit_generateReport
+
+# that's it folks
+[ ${__shunit_testsFailed} -eq 0 ]
+exit $?
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test.sh 322 2011-04-24 00:09:45Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test suite runner.
+#
+# This script runs all the unit tests that can be found, and generates a nice
+# report of the tests.
+
+MY_NAME=`basename $0`
+MY_PATH=`dirname $0`
+
+PREFIX='shunit2_test_'
+SHELLS='/bin/sh /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
+TESTS=''
+for test in ${PREFIX}[a-z]*.sh; do
+ TESTS="${TESTS} ${test}"
+done
+
+# load common unit test functions
+. ../lib/versions
+. ./shunit2_test_helpers
+
+usage()
+{
+ echo "usage: ${MY_NAME} [-e key=val ...] [-s shell(s)] [-t test(s)]"
+}
+
+env=''
+
+# process command line flags
+while getopts 'e:hs:t:' opt; do
+ case ${opt} in
+ e) # set an environment variable
+ key=`expr "${OPTARG}" : '\([^=]*\)='`
+ val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
+ if [ -z "${key}" -o -z "${val}" ]; then
+ usage
+ exit 1
+ fi
+ eval "${key}='${val}'"
+ export ${key}
+ env="${env:+${env} }${key}"
+ ;;
+ h) usage; exit 0 ;; # output help
+ s) shells=${OPTARG} ;; # list of shells to run
+ t) tests=${OPTARG} ;; # list of tests to run
+ *) usage; exit 1 ;;
+ esac
+done
+shift `expr ${OPTIND} - 1`
+
+# fill shells and/or tests
+shells=${shells:-${SHELLS}}
+tests=${tests:-${TESTS}}
+
+# error checking
+if [ -z "${tests}" ]; then
+ th_error 'no tests found to run; exiting'
+ exit 1
+fi
+
+cat <<EOF
+#------------------------------------------------------------------------------
+# System data
+#
+
+# test run info
+shells: ${shells}
+tests: ${tests}
+EOF
+for key in ${env}; do
+ eval "echo \"${key}=\$${key}\""
+done
+echo
+
+# output system data
+echo "# system info"
+echo "$ date"
+date
+echo
+
+echo "$ uname -mprsv"
+uname -mprsv
+
+#
+# run tests
+#
+
+for shell in ${shells}; do
+ echo
+
+ # check for existance of shell
+ if [ ! -x ${shell} ]; then
+ th_warn "unable to run tests with the ${shell} shell"
+ continue
+ fi
+
+ cat <<EOF
+
+#------------------------------------------------------------------------------
+# Running the test suite with ${shell}
+#
+EOF
+
+ SHUNIT_SHELL=${shell} # pass shell onto tests
+ shell_name=`basename ${shell}`
+ shell_version=`versions_shellVersion "${shell}"`
+
+ echo "shell name: ${shell_name}"
+ echo "shell version: ${shell_version}"
+
+ # execute the tests
+ for suite in ${tests}; do
+ suiteName=`expr "${suite}" : "${PREFIX}\(.*\).sh"`
+ echo
+ echo "--- Executing the '${suiteName}' test suite ---"
+ ( exec ${shell} ./${suite} 2>&1; )
+ done
+done
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test_asserts.sh 312 2011-03-14 22:41:29Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test for assert functions
+
+# load test helpers
+. ./shunit2_test_helpers
+
+#------------------------------------------------------------------------------
+# suite tests
+#
+
+commonEqualsSame()
+{
+ fn=$1
+
+ ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal; with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'abc def' 'abc def' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'equal with spaces' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not equal' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+commonNotEqualsSame()
+{
+ fn=$1
+
+ ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} "${MSG}" 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not same, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertEquals()
+{
+ commonEqualsSame 'assertEquals'
+}
+
+testAssertNotEquals()
+{
+ commonNotEqualsSame 'assertNotEquals'
+}
+
+testAssertSame()
+{
+ commonEqualsSame 'assertSame'
+}
+
+testAssertNotSame()
+{
+ commonNotEqualsSame 'assertNotSame'
+}
+
+testAssertNull()
+{
+ ( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull "${MSG}" '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'null, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertNotNull()
+{
+ ( assertNotNull 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull "${MSG}" 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull 'x"b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with double-quote' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull "x'b" >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with single-quote' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with dollar' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull 'x`b' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'not null, with backtick' $? \
+ "${stdoutF}" "${stderrF}"
+
+ ( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ # there is no test for too few arguments as $1 might actually be null
+
+ ( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertTrue()
+{
+ ( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue "${MSG}" 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'false' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'false condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertTrue arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testAssertFalse()
+{
+ ( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse "${MSG}" 1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false, with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertTrueWithNoOutput 'false condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse 0 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( assertFalse arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+#------------------------------------------------------------------------------
+# suite functions
+#
+
+oneTimeSetUp()
+{
+ tmpDir="${__shunit_tmpDir}/output"
+ mkdir "${tmpDir}"
+ stdoutF="${tmpDir}/stdout"
+ stderrF="${tmpDir}/stderr"
+
+ MSG='This is a test message'
+}
+
+# load and run shUnit2
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ${TH_SHUNIT}
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test_failures.sh 286 2008-11-24 21:42:34Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test for failure functions
+
+# load common unit-test functions
+. ./shunit2_test_helpers
+
+#-----------------------------------------------------------------------------
+# suite tests
+#
+
+testFail()
+{
+ ( fail >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
+
+ ( fail "${MSG}" >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'fail with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( fail arg1 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testFailNotEquals()
+{
+ ( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( failNotEquals arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+testFailSame()
+{
+ ( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame '' '' >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
+
+ ( failSame arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
+ th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
+}
+
+#-----------------------------------------------------------------------------
+# suite functions
+#
+
+oneTimeSetUp()
+{
+ tmpDir="${__shunit_tmpDir}/output"
+ mkdir "${tmpDir}"
+ stdoutF="${tmpDir}/stdout"
+ stderrF="${tmpDir}/stderr"
+
+ MSG='This is a test message'
+}
+
+# load and run shUnit2
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ${TH_SHUNIT}
--- /dev/null
+# $Id: shunit2_test_helpers 286 2008-11-24 21:42:34Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test common functions
+
+# treat unset variables as an error when performing parameter expansion
+set -u
+
+# set shwordsplit for zsh
+[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
+
+#
+# constants
+#
+
+# path to shUnit2 library. can be overridden by setting SHUNIT_INC
+TH_SHUNIT=${SHUNIT_INC:-./shunit2}
+
+# configure debugging. set the DEBUG environment variable to any
+# non-empty value to enable debug output, or TRACE to enable trace
+# output.
+TRACE=${TRACE:+'th_trace '}
+[ -n "${TRACE}" ] && DEBUG=1
+[ -z "${TRACE}" ] && TRACE=':'
+
+DEBUG=${DEBUG:+'th_debug '}
+[ -z "${DEBUG}" ] && DEBUG=':'
+
+#
+# variables
+#
+
+th_RANDOM=0
+
+#
+# functions
+#
+
+# message functions
+th_trace() { echo "${MY_NAME}:TRACE $@" >&2; }
+th_debug() { echo "${MY_NAME}:DEBUG $@" >&2; }
+th_info() { echo "${MY_NAME}:INFO $@" >&2; }
+th_warn() { echo "${MY_NAME}:WARN $@" >&2; }
+th_error() { echo "${MY_NAME}:ERROR $@" >&2; }
+th_fatal() { echo "${MY_NAME}:FATAL $@" >&2; }
+
+# output subtest name
+th_subtest() { echo " $@" >&2; }
+
+# generate a random number
+th_generateRandom()
+{
+ tfgr_random=${th_RANDOM}
+
+ while [ "${tfgr_random}" = "${th_RANDOM}" ]; do
+ if [ -n "${RANDOM:-}" ]; then
+ # $RANDOM works
+ tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
+ elif [ -r '/dev/urandom' ]; then
+ tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
+ else
+ tfgr_date=`date '+%H%M%S'`
+ tfgr_random=`expr ${tfgr_date} \* $$`
+ unset tfgr_date
+ fi
+ [ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
+ done
+
+ th_RANDOM=${tfgr_random}
+ unset tfgr_random
+}
+
+# this section returns the data section from the specified section of a file. a
+# datasection is defined by a [header], one or more lines of data, and then a
+# blank line.
+th_getDataSect()
+{
+ th_sgrep "\\[$1\\]" "$2" |sed '1d'
+}
+
+# this function greps a section from a file. a section is defined as a group of
+# lines preceeded and followed by blank lines.
+th_sgrep()
+{
+ th_pattern_=$1
+ shift
+
+ sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
+
+ unset th_pattern_
+}
+
+# Custom assert that checks for true return value (0), and no output to STDOUT
+# or STDERR. If a non-zero return value is encountered, the output of STDERR
+# will be output.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertTrueWithNoOutput()
+{
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertTrue "${th_test_}; expected return value of zero" ${th_rtrn_}
+ [ ${th_rtrn_} -ne ${SHUNIT_TRUE} ] && cat "${th_stderr_}"
+ assertFalse "${th_test_}; expected no output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertFalse "${th_test_}; expected no output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+# Custom assert that checks for non-zero return value, output to STDOUT, but no
+# output to STDERR.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertFalseWithOutput()
+{
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
+ assertTrue "${th_test_}; expected output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertFalse "${th_test_}; expected no output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+# Custom assert that checks for non-zero return value, no output to STDOUT, but
+# output to STDERR.
+#
+# Args:
+# th_test_: string: name of the subtest
+# th_rtrn_: integer: the return value of the subtest performed
+# th_stdout_: string: filename where stdout was redirected to
+# th_stderr_: string: filename where stderr was redirected to
+th_assertFalseWithError()
+{
+ th_test_=$1
+ th_rtrn_=$2
+ th_stdout_=$3
+ th_stderr_=$4
+
+ assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
+ assertFalse "${th_test_}; expected no output to STDOUT" \
+ "[ -s '${th_stdout_}' ]"
+ assertTrue "${th_test_}; expected output to STDERR" \
+ "[ -s '${th_stderr_}' ]"
+
+ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
+}
+
+#
+# main
+#
+
+${TRACE} 'trace output enabled'
+${DEBUG} 'debug output enabled'
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test_macros.sh 299 2010-05-03 12:44:20Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test for macros.
+
+# load test helpers
+. ./shunit2_test_helpers
+
+#------------------------------------------------------------------------------
+# suite tests
+#
+
+testAssertEquals()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testAssertNotEquals()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testSame()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_SAME_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testNotSame()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testNull()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NULL_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testNotNull()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_NOT_NULL_} '' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stdoutF}" "${stderrF}" >&2
+}
+
+testAssertTrue()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_TRUE_} ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+
+ ( ${_ASSERT_TRUE_} '"some msg"' ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testAssertFalse()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_ASSERT_FALSE_} ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_ASSERT_FALSE_} '"some msg"' ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testFail()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testFailNotEquals()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_NOT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testFailSame()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_SAME_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testFailNotSame()
+{
+ # start skipping if LINENO not available
+ [ -z "${LINENO:-}" ] && startSkipping
+
+ ( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+
+ ( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
+ grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
+ rtrn=$?
+ assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+#------------------------------------------------------------------------------
+# suite functions
+#
+
+oneTimeSetUp()
+{
+ tmpDir="${__shunit_tmpDir}/output"
+ mkdir "${tmpDir}"
+ stdoutF="${tmpDir}/stdout"
+ stderrF="${tmpDir}/stderr"
+}
+
+# load and run shUnit2
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ${TH_SHUNIT}
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test_misc.sh 322 2011-04-24 00:09:45Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2008 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+#
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit tests of miscellaneous things
+
+# load test helpers
+. ./shunit2_test_helpers
+
+#------------------------------------------------------------------------------
+# suite tests
+#
+
+# Note: the test script is prefixed with '#' chars so that shUnit2 does not
+# incorrectly interpret the embedded functions as real functions.
+testUnboundVariable()
+{
+ sed 's/^#//' >"${unittestF}" <<EOF
+## treat unset variables as an error when performing parameter expansion
+#set -u
+#
+#boom() { x=\$1; } # this function goes boom if no parameters are passed!
+#test_boom()
+#{
+# assertEquals 1 1
+# boom # No parameter given
+# assertEquals 0 \$?
+#}
+#. ${TH_SHUNIT}
+EOF
+ ( exec ${SHUNIT_SHELL:-sh} "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
+ assertFalse 'expected a non-zero exit value' $?
+ grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
+ assertTrue 'assert message was not generated' $?
+ grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
+ assertTrue 'test count message was not generated' $?
+ grep '^FAILED' "${stdoutF}" >/dev/null
+ assertTrue 'failure message was not generated' $?
+}
+
+testIssue7()
+{
+ ( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
+ diff "${stdoutF}" - >/dev/null <<EOF
+ASSERT:Some message. expected:<1> but was:<2>
+EOF
+ rtrn=$?
+ assertEquals ${SHUNIT_TRUE} ${rtrn}
+ [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
+}
+
+testPrepForSourcing()
+{
+ assertEquals '/abc' `_shunit_prepForSourcing '/abc'`
+ assertEquals './abc' `_shunit_prepForSourcing './abc'`
+ assertEquals './abc' `_shunit_prepForSourcing 'abc'`
+}
+
+testEscapeCharInStr()
+{
+ actual=`_shunit_escapeCharInStr '\' ''`
+ assertEquals '' "${actual}"
+ assertEquals 'abc\\' `_shunit_escapeCharInStr '\' 'abc\'`
+ assertEquals 'abc\\def' `_shunit_escapeCharInStr '\' 'abc\def'`
+ assertEquals '\\def' `_shunit_escapeCharInStr '\' '\def'`
+
+ actual=`_shunit_escapeCharInStr '"' ''`
+ assertEquals '' "${actual}"
+ assertEquals 'abc\"' `_shunit_escapeCharInStr '"' 'abc"'`
+ assertEquals 'abc\"def' `_shunit_escapeCharInStr '"' 'abc"def'`
+ assertEquals '\"def' `_shunit_escapeCharInStr '"' '"def'`
+
+ actual=`_shunit_escapeCharInStr '$' ''`
+ assertEquals '' "${actual}"
+ assertEquals 'abc\$' `_shunit_escapeCharInStr '$' 'abc$'`
+ assertEquals 'abc\$def' `_shunit_escapeCharInStr '$' 'abc$def'`
+ assertEquals '\$def' `_shunit_escapeCharInStr '$' '$def'`
+
+# actual=`_shunit_escapeCharInStr "'" ''`
+# assertEquals '' "${actual}"
+# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
+# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
+# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
+
+# # must put the backtick in a variable so the shell doesn't misinterpret it
+# # while inside a backticked sequence (e.g. `echo '`'` would fail).
+# backtick='`'
+# actual=`_shunit_escapeCharInStr ${backtick} ''`
+# assertEquals '' "${actual}"
+# assertEquals '\`abc' \
+# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
+# assertEquals 'abc\`' \
+# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
+# assertEquals 'abc\`def' \
+# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
+}
+
+testEscapeCharInStr_specialChars()
+{
+ # make sure our forward slash doesn't upset sed
+ assertEquals '/' `_shunit_escapeCharInStr '\' '/'`
+
+ # some shells escape these differently
+ #assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
+ #assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
+}
+
+# Test the various ways of declaring functions.
+#
+# Prefixing (then stripping) with comment symbol so these functions aren't
+# treated as real functions by shUnit2.
+testExtractTestFunctions()
+{
+ f="${tmpD}/extract_test_functions"
+ sed 's/^#//' <<EOF >"${f}"
+#testABC() { echo 'ABC'; }
+#test_def() {
+# echo 'def'
+#}
+#testG3 ()
+#{
+# echo 'G3'
+#}
+#function test4() { echo '4'; }
+# test5() { echo '5'; }
+#some_test_function() { echo 'some func'; }
+#func_with_test_vars() {
+# testVariable=1234
+#}
+EOF
+
+ actual=`_shunit_extractTestFunctions "${f}"`
+ assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
+}
+
+#------------------------------------------------------------------------------
+# suite functions
+#
+
+setUp()
+{
+ for f in ${expectedF} ${stdoutF} ${stderrF}; do
+ cp /dev/null ${f}
+ done
+ rm -fr "${tmpD}"
+ mkdir "${tmpD}"
+}
+
+oneTimeSetUp()
+{
+ tmpD="${SHUNIT_TMPDIR}/tmp"
+ expectedF="${SHUNIT_TMPDIR}/expected"
+ stdoutF="${SHUNIT_TMPDIR}/stdout"
+ stderrF="${SHUNIT_TMPDIR}/stderr"
+ unittestF="${SHUNIT_TMPDIR}/unittest"
+}
+
+# load and run shUnit2
+[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
+. ${TH_SHUNIT}
--- /dev/null
+#! /bin/sh
+# $Id: shunit2_test_standalone.sh 303 2010-05-03 13:11:27Z kate.ward@forestent.com $
+# vim:et:ft=sh:sts=2:sw=2
+#
+# Copyright 2010 Kate Ward. All Rights Reserved.
+# Released under the LGPL (GNU Lesser General Public License)
+# Author: kate.ward@forestent.com (Kate Ward)
+#
+# shUnit2 unit test for standalone operation.
+#
+# This unit test is purely to test that calling shunit2 directly, while passing
+# the name of a unit test script, works. When run, this script determines if it
+# is running as a standalone program, and calls main() if it is.
+
+ARGV0=`basename "$0"`
+
+# load test helpers
+. ./shunit2_test_helpers
+
+#------------------------------------------------------------------------------
+# suite tests
+#
+
+testStandalone()
+{
+ assertTrue ${SHUNIT_TRUE}
+}
+
+#------------------------------------------------------------------------------
+# main
+#
+
+main()
+{
+ ${TH_SHUNIT} "${ARGV0}"
+}
+
+# are we running as a standalone?
+if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
+ if [ $# -gt 0 ]; then main "$@"; else main; fi
+fi
--- /dev/null
+#!/bin/bash
+
+# This script runs through all the known tests by performing a test sweep.
+# It should claim that all tests succeeded for a new build/configuration/etc.
+# to be considered successful.
+#
+# The way to add tests to the full test set is to add full paths to the
+# "TESTKIT_TEST_SUITE" variable. This can be done in your personal
+# testkit.config file or in an exported variable set prior to running
+# this script.
+#
+# Author: Chris Koeritz
+
+export TESTKIT_DIR="$( \cd "$(\dirname "$0")" && \pwd )" # obtain the script's working directory.
+cd $TESTKIT_DIR
+
+TIME_START="$(date +"%s")"
+
+source prepare_tools.sh prepare_tools.sh
+
+# if that didn't work, complain.
+if [ -z "$TESTKIT_SENTINEL" ]; then echo Please run prepare_tools.sh before testing.; exit 3; fi
+source "$TESTKIT_ROOT/library/establish_environment.sh"
+
+verbosity="$1"; shift
+
+VERBOSE=1
+
+if [ "$verbosity" == "--help" -o "$verbosity" == "-help" -o "$verbosity" == "-h" ]; then
+ echo "$(basename $0): Runs the available suite of tests."
+ echo
+ echo " $(basename $0) {summary | [full]}"
+ echo
+ echo "By default, the report will be a 'full' listing that includes all test"
+ echo "run logging. If 'summary' is passed as the first parameter, then only"
+ echo "the test results will be displayed."
+ exit 0
+fi
+
+if [ "$verbosity" == "summary" ]; then
+ VERBOSE=0
+fi
+
+##############
+
+# clean up any conglomerated log file.
+\rm -f "$CONGLOMERATED_TESTKIT_OUTPUT"
+
+##############
+
+# define the sets of tests we'd like to run.
+
+NETBADGE_TESTS=( \
+ netbadge_integrations/basic_integrations_test.sh \
+)
+
+##############
+
+if [ ! -z "$AUTOBUILD_RUNNING" ]; then
+ # only add some tests for automated, testing, bootstrap builds based on their needs.
+
+true # placeholder
+
+fi
+
+##############
+
+# now that all tests have been defined, we build up our total list of tests.
+
+echo Full set of tests:
+for ((test_iter=0; $test_iter < ${#TESTKIT_TEST_SUITE[*]}; test_iter++)); do
+ echo "$(expr $test_iter + 1): ${TESTKIT_TEST_SUITE[$test_iter]}"
+done
+
+##############
+
+FAIL_COUNT=0
+
+REG_TEMP="$TEST_TEMP/run_$(date +"%Y_%m_%d")"
+if [ ! -d "$REG_TEMP" ]; then
+ mkdir "$REG_TEMP"
+fi
+
+# go to the top of the hierarchy.
+cd "$TESTKIT_ROOT"
+
+for ((test_iter=0; $test_iter < ${#TESTKIT_TEST_SUITE[*]}; test_iter++)); do
+ echo -e "\n======================================================================"
+ echo -n `date`": "
+ echo "Now running test $(expr $test_iter + 1): ${TESTKIT_TEST_SUITE[$test_iter]}"
+ output_file="$(mktemp $REG_TEMP/test_log.XXXXXX)"
+ echo " Test output file: $output_file"
+
+#
+#hmmm: no real way to check for errors in the general case, unless we define a
+# set of sentinels for this. not done yet.
+#-->
+# echo "==============" >"$output_file"
+# echo "Log state prior to test:" >>"$output_file"
+# check_logs_for_errors >>"$output_file"
+# echo "==============" >>"$output_file"
+
+ if [ $VERBOSE -ne 1 ]; then
+ bash "${TESTKIT_TEST_SUITE[$test_iter]}" >>"$output_file" 2>&1
+ retval=$?
+ else
+ bash "${TESTKIT_TEST_SUITE[$test_iter]}" 2>&1 | tee -a "$output_file"
+ retval=${PIPESTATUS[0]}
+ fi
+
+ if [ $retval -ne 0 ]; then
+ ((FAIL_COUNT++))
+ echo "FAILURE: exit code $retval for test ${TESTKIT_TEST_SUITE[$test_iter]}"
+ TEST_RESULTS[$test_iter]="FAIL"
+ else
+ echo "OK: successful test run for test ${TESTKIT_TEST_SUITE[$test_iter]}"
+ TEST_RESULTS[$test_iter]="OKAY"
+ fi
+
+#hmmm: same comment re error checking... define some tags to look for!
+# echo "==============" >>"$output_file"
+# echo "Log state after test:" >>"$output_file"
+# check_logs_for_errors >>"$output_file"
+# echo "==============" >>"$output_file"
+
+done
+
+# final analysis--how did the test run do?
+
+echo -e "\n\nResults table for this test run:\n"
+for ((test_iter=0; $test_iter < ${#TESTKIT_TEST_SUITE[*]}; test_iter++)); do
+ num=$(expr $test_iter + 1)
+ if [ $num -lt 10 ]; then num="0$num"; fi
+ echo "$num: ${TEST_RESULTS[$test_iter]} -- ${TESTKIT_TEST_SUITE[$test_iter]}"
+done
+echo
+
+# figure out how long things took.
+TIME_END="$(date +"%s")"
+duration="$(($TIME_END - $TIME_START))"
+# prepare to print duration in hours and minutes.
+minutes="$(($duration / 60))"
+hours="$(($minutes / 60))"
+# grab out the hours we calculated from the minutes sum.
+minutes="$(($minutes - $hours * 60))"
+if (($minutes < 10)); then minutes="0$minutes"; fi
+if (($hours < 10)); then hours="0$hours"; fi
+echo "Total testing duration: $hours:$minutes hh:mm ($duration seconds total)"
+
+if [ $FAIL_COUNT -ne 0 ]; then
+ echo "FAILURE: $FAIL_COUNT Tests Failed out of ${#TESTKIT_TEST_SUITE[*]} Tests."
+ exit 1
+else
+ echo "OK: All ${#TESTKIT_TEST_SUITE[*]} Tests Ran Successfully."
+ exit 0
+fi
+
+
--- /dev/null
+# This is an example configuration file for the TestKit.
+
+####
+# this is fred t. hamster's personal testkit config file.
+####
+
+##############
+
+# This section defines variables that are used throughout the tests.
+# Many of these need to change to suit your particular configuration.
+
+# The base user name is used for any paths below that refer to the user who
+# will be running the tools and tests. This should be changed to the actual
+# user account under which the tools and tests will be run, if the default
+# value based on USER cannot be relied upon.
+BASE_USER=${USER}
+
+# Used for windows testing; provides the path to the binaries directory of cygwin.
+#CYGWIN_BIN_PATH=c:/cygwin/bin
+
+##############
+
+# define the tests to run. this is the most convenient place to put this.
+# the test suite list obviously will vary a lot based on what is being tested.
+TESTKIT_TEST_SUITE=( \
+ $TESTKIT_ROOT/examples/blank_test.sh \
+)
+
+##############
+
--- /dev/null
+#!/bin/bash
+
+# this script modifies the linux kernel for maximum tcp buffer size, which can
+# improve long-haul transfers over a wan.
+
+# new maximum buffer size to set.
+new_max=2097152
+
+echo "net.core.wmem_max=$new_max" >> /etc/sysctl.conf
+echo "net.core.rmem_max=$new_max" >> /etc/sysctl.conf
+
+echo "net.ipv4.tcp_rmem= 10240 87380 $new_max" >> /etc/sysctl.conf
+echo "net.ipv4.tcp_wmem= 10240 87380 $new_max" >> /etc/sysctl.conf
+
+echo "net.ipv4.tcp_window_scaling = 1" >> /etc/sysctl.conf
+
+echo "net.ipv4.tcp_timestamps = 1" >> /etc/sysctl.conf
+
+echo "net.ipv4.tcp_sack = 1" >> /etc/sysctl.conf
+
+echo "net.ipv4.tcp_no_metrics_save = 1" >> /etc/sysctl.conf
+
+echo "net.core.netdev_max_backlog = 5000" >> /etc/sysctl.conf
+
--- /dev/null
+
+function show_proc()
+{
+ echo ----------------------------------------------
+ echo $*
+ $*
+}
+
+show_proc cat /proc/sys/net/ipv4/tcp_mem
+show_proc cat /proc/sys/net/core/rmem_default
+show_proc cat /proc/sys/net/core/rmem_max
+show_proc cat /proc/sys/net/core/wmem_default
+show_proc cat /proc/sys/net/core/wmem_max
+show_proc cat /proc/sys/net/core/optmem_max
+show_proc cat /proc/net/sockstat
+show_proc cat /proc/sys/net/ipv4/tcp_max_orphans
+