. "${TEST_SCRIPTS_DIR}/common.sh"
-# If we're not running on a real cluster then we need a local copy of
-# ctdb (and other stuff) in $PATH and we will use local daemons.
-if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
- var_dir="${CTDB_DIR}/tests/var"
-
- export CTDB_NODES_SOCKETS=""
- for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
- CTDB_NODES_SOCKETS="${CTDB_NODES_SOCKETS}${CTDB_NODES_SOCKETS:+ }${var_dir}/sock.${i}"
- done
-
- PATH="${CTDB_DIR}/bin:${PATH}"
-
- export CTDB_NODES="$var_dir/nodes.txt"
-fi
-
######################################################################
export CTDB_TIMEOUT=60
if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
else
- CTDB_TEST_WRAPPER="${TEST_SCRIPTS_DIR}/test_wrap"
+ _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
+ CTDB_TEST_WRAPPER="$_d/test_wrap"
fi
export CTDB_TEST_WRAPPER
######################################################################
-ctdb_check_time_logs ()
-{
- local threshold=20
-
- local jump=false
- local prev=""
- local ds_prev=""
- local node=""
-
- out=$(onnode all tail -n 20 /var/log/ctdb.test.time.log 2>&1)
-
- if [ $? -eq 0 ] ; then
- local line
- while read line ; do
- case "$line" in
- \>\>\ NODE:\ *\ \<\<)
- node="${line#>> NODE: }"
- node=${node% <<*}
- ds_prev=""
- ;;
- *\ *)
- set -- $line
- ds_curr="$1${2:0:1}"
- if [ -n "$ds_prev" ] && \
- [ $(($ds_curr - $ds_prev)) -ge $threshold ] ; then
- echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
- jump=true
- fi
- prev="$line"
- ds_prev="$ds_curr"
- ;;
- esac
- done <<<"$out"
- else
- echo Error getting time logs
- fi
- if $jump ; then
- echo "Check time sync (test client first):"
- date
- onnode -p all date
- echo "Information from test client:"
- hostname
- top -b -n 1
- echo "Information from cluster nodes:"
- onnode all "top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
- fi
-}
-
ctdb_test_exit ()
{
local status=$?
echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
- if [ -z "$TEST_LOCAL_DAEMONS" -a -n "$CTDB_TEST_TIME_LOGGING" -a \
- $status -ne 0 ] ; then
- ctdb_check_time_logs
- fi
-
eval "$ctdb_test_exit_hook" || true
unset ctdb_test_exit_hook
ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
}
-ctdb_test_usage()
-{
- local status=${1:-2}
-
- cat <<EOF
-Usage: $0 [option]
-
-Options:
- -h, --help show this screen.
- -v, --version show test case version.
- --category show the test category (ACL, CTDB, Samba ...).
- -d, --description show test case description.
- --summary show short test case summary.
- -x trace test using set -x
-EOF
-
- exit $status
-}
-
-ctdb_test_version ()
-{
- [ -n "$CTDB_DIR" ] || die "Can not determine version."
-
- (cd "$CTDB_DIR" && git describe)
-}
-
-ctdb_test_cmd_options()
-{
- [ -n "$1" ] || return 0
-
- case "$1" in
- -h|--help) ctdb_test_usage 0 ;;
- -v|--version) ctdb_test_version ;;
- --category) echo "CTDB" ;;
- -d|--description) test_info ;;
- -x) set -x ; return 0 ;;
- *)
- echo "Error: Unknown parameter = $1"
- echo
- ctdb_test_usage 2
- ;;
- esac
-
- exit 0
-}
-
ctdb_test_init ()
{
scriptname=$(basename "$0")
testfailures=0
ctdb_test_restart_scheduled=false
- ctdb_test_cmd_options $@
-
trap "ctdb_test_exit" 0
}
if [ "$nodespec" = "-v" ] ; then
verbose=true
else
- onnode_opts="$nodespec"
+ onnode_opts="${onnode_opts}${onnode_opts:+ }${nodespec}"
fi
nodespec="$1" ; shift
done
prev="$ipp"
done <<<"$ips"
- echo "BAD: a node was -1 or IPs are only assigned to one node"
+ echo "BAD: a node was -1 or IPs are only assigned to one node:"
+ echo "$ips"
echo "Are you running an old version of CTDB?"
return 1
}
# This returns a list of "ip node" lines in $out
all_ips_on_node()
{
- local node=$@
- try_command_on_node $node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
+ local node="$1"
+ try_command_on_node $node \
+ "$CTDB ip -X | awk -F'|' 'NR > 1 { print \$2, \$3 }'"
}
-select_test_node_and_ips ()
+_select_test_node_and_ips ()
{
- all_ips_on_node 0
-
- # When selecting test_node we just want a node that has public
- # IPs. This will work and is economically semi-random. :-)
- local x
- read x test_node <<<"$out"
+ try_command_on_node any \
+ "$CTDB ip -X all | awk -F'|' 'NR > 1 { print \$2, \$3 }'"
+ test_node="" # this matches no PNN
test_node_ips=""
local ip pnn
while read ip pnn ; do
+ if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
+ test_node="$pnn"
+ fi
if [ "$pnn" = "$test_node" ] ; then
test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
fi
echo "Selected node ${test_node} with IPs: ${test_node_ips}."
test_ip="${test_node_ips%% *}"
+
+ case "$test_ip" in
+ *:*) test_prefix="${test_ip}/128" ;;
+ *) test_prefix="${test_ip}/32" ;;
+ esac
+
+ [ -n "$test_node" ] || return 1
+}
+
+select_test_node_and_ips ()
+{
+ local timeout=10
+ while ! _select_test_node_and_ips ; do
+ echo "Unable to find a test node with IPs assigned"
+ if [ $timeout -le 0 ] ; then
+ echo "BAD: Too many attempts"
+ return 1
+ fi
+ sleep_for 1
+ timeout=$(($timeout - 1))
+ done
+
+ return 0
+}
+
+# Sets: mask, iface
+get_test_ip_mask_and_iface ()
+{
+ # Find the interface
+ try_command_on_node $test_node "$CTDB ip -v -X | awk -F'|' -v ip=$test_ip '\$2 == ip { print \$4 }'"
+ iface="$out"
+
+ if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
+ # Find the netmask
+ try_command_on_node $test_node ip addr show to $test_ip
+ mask="${out##*/}"
+ mask="${mask%% *}"
+ else
+ mask="24"
+ fi
+
+ echo "$test_ip/$mask is on $iface"
+}
+
+ctdb_get_all_pnns ()
+{
+ try_command_on_node -q all "$CTDB pnn"
+ all_pnns="$out"
+}
+
+# The subtlety is that "ctdb delip" will fail if the IP address isn't
+# configured on a node...
+delete_ip_from_all_nodes ()
+{
+ _ip="$1"
+
+ ctdb_get_all_pnns
+
+ _nodes=""
+
+ for _pnn in $all_pnns ; do
+ all_ips_on_node $_pnn
+ while read _i _n ; do
+ if [ "$_ip" = "$_i" ] ; then
+ _nodes="${_nodes}${_nodes:+,}${_pnn}"
+ fi
+ done <<<"$out" # bashism
+ done
+
+ try_command_on_node -pq "$_nodes" "$CTDB delip $_ip"
}
#######################################
# Wait until either timeout expires or command succeeds. The command
-# will be tried once per second.
+# will be tried once per second, unless timeout has format T/I, where
+# I is the recheck interval.
wait_until ()
{
local timeout="$1" ; shift # "$@" is the command...
+ local interval=1
+ case "$timeout" in
+ */*)
+ interval="${timeout#*/}"
+ timeout="${timeout%/*}"
+ esac
+
local negate=false
if [ "$1" = "!" ] ; then
negate=true
echo "OK"
return 0
fi
- echo -n .
- t=$(($t - 1))
- sleep 1
+ local i
+ for i in $(seq 1 $interval) ; do
+ echo -n .
+ done
+ t=$(($t - $interval))
+ sleep $interval
done
echo "*TIMEOUT*"
_cluster_is_healthy ()
{
- local out x count line
+ $CTDB nodestatus all >/dev/null
+}
- out=$($CTDB -Y status 2>/dev/null) || return 1
+_cluster_is_recovered ()
+{
+ node_has_status 0 recovered
+}
- {
- read x
- count=0
- while read line ; do
- # We need to see valid lines if we're going to be healthy.
- [ "${line#:[0-9]}" != "$line" ] && count=$(($count + 1))
- # A line indicating a node is unhealthy causes failure.
- [ "${line##:*:*:*1:}" != "$line" ] && return 1
- done
- [ $count -gt 0 ] && return $?
- } <<<"$out" # Yay bash!
+_cluster_is_ready ()
+{
+ _cluster_is_healthy && _cluster_is_recovered
}
cluster_is_healthy ()
{
if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
echo "Cluster is HEALTHY"
+ if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
+ echo "WARNING: cluster in recovery mode!"
+ fi
return 0
else
echo "Cluster is UNHEALTHY"
fi
}
-wait_until_healthy ()
+wait_until_ready ()
{
local timeout="${1:-120}"
- echo "Waiting for cluster to become healthy..."
+ echo "Waiting for cluster to become ready..."
- wait_until 120 _cluster_is_healthy
+ wait_until $timeout onnode -q any $CTDB_TEST_WRAPPER _cluster_is_ready
}
# This function is becoming nicely overloaded. Soon it will collapse! :-)
local pnn="$1"
local status="$2"
- local bits fpat mpat
+ local bits fpat mpat rpat
case "$status" in
- (unhealthy) bits="?:?:?:1:*" ;;
- (healthy) bits="?:?:?:0:*" ;;
- (disconnected) bits="1:*" ;;
- (connected) bits="0:*" ;;
- (banned) bits="?:1:*" ;;
- (unbanned) bits="?:0:*" ;;
- (disabled) bits="?:?:1:*" ;;
- (enabled) bits="?:?:0:*" ;;
- (stopped) bits="?:?:?:?:1:*" ;;
- (notstopped) bits="?:?:?:?:0:*" ;;
+ (unhealthy) bits="?|?|?|1|*" ;;
+ (healthy) bits="?|?|?|0|*" ;;
+ (disconnected) bits="1|*" ;;
+ (connected) bits="0|*" ;;
+ (banned) bits="?|1|*" ;;
+ (unbanned) bits="?|0|*" ;;
+ (disabled) bits="?|?|1|*" ;;
+ (enabled) bits="?|?|0|*" ;;
+ (stopped) bits="?|?|?|?|1|*" ;;
+ (notstopped) bits="?|?|?|?|0|*" ;;
(frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
(unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
(monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
(monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
+ (recovered) rpat='^Recovery mode:RECOVERY \(1\)$' ;;
*)
echo "node_has_status: unknown status \"$status\""
return 1
if [ -n "$bits" ] ; then
local out x line
- out=$($CTDB -Y status 2>&1) || return 1
+ out=$($CTDB -X status 2>&1) || return 1
{
read x
while read line ; do
# This needs to be done in 2 steps to avoid false matches.
- local line_bits="${line#:${pnn}:*:}"
+ local line_bits="${line#|${pnn}|*|}"
[ "$line_bits" = "$line" ] && continue
[ "${line_bits#${bits}}" != "$line_bits" ] && return 0
done
$CTDB statistics -n "$pnn" | egrep -q "$fpat"
elif [ -n "$mpat" ] ; then
$CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
+ elif [ -n "$rpat" ] ; then
+ ! $CTDB status -n "$pnn" | egrep -q "$rpat"
else
echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
return 1
}
# Useful for superficially testing IP failover.
-# IPs must be on nodes matching nodeglob.
-ips_are_on_nodeglob ()
+# IPs must be on the given node.
+# If the first argument is '!' then the IPs must not be on the given node.
+ips_are_on_node ()
{
- local nodeglob="$1" ; shift
+ local negating=false
+ if [ "$1" = "!" ] ; then
+ negating=true ; shift
+ fi
+ local node="$1" ; shift
local ips="$*"
local out
- all_ips_on_node 1
+ all_ips_on_node $node
- while read ip pnn ; do
- for check in $ips ; do
+ local check
+ for check in $ips ; do
+ local ip pnn
+ while read ip pnn ; do
if [ "$check" = "$ip" ] ; then
- case "$pnn" in
- ($nodeglob) : ;;
- (*) return 1 ;;
- esac
+ if [ "$pnn" = "$node" ] ; then
+ if $negating ; then return 1 ; fi
+ else
+ if ! $negating ; then return 1 ; fi
+ fi
ips="${ips/${ip}}" # Remove from list
+ break
fi
- done
- done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+ # If we're negating and we didn't see the address then it
+ # isn't hosted by anyone!
+ if $negating ; then
+ ips="${ips/${check}}"
+ fi
+ done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+ done
ips="${ips// }" # Remove any spaces.
[ -z "$ips" ]
}
-wait_until_ips_are_on_nodeglob ()
+wait_until_ips_are_on_node ()
{
- echo "Waiting for IPs to fail over..."
+ # Go to some trouble to print a use description of what is happening
+ local not=""
+ if [ "$1" == "!" ] ; then
+ not="no longer "
+ fi
+ local node=""
+ local ips=""
+ local i
+ for i ; do
+ [ "$i" != "!" ] || continue
+ if [ -z "$node" ] ; then
+ node="$i"
+ continue
+ fi
+ ips="${ips}${ips:+, }${i}"
+ done
+ echo "Waiting for ${ips} to ${not}be assigned to node ${node}"
- wait_until 60 ips_are_on_nodeglob "$@"
+ wait_until 60 ips_are_on_node "$@"
}
node_has_some_ips ()
local out
- all_ips_on_node 1
+ all_ips_on_node $node
while read ip pnn ; do
if [ "$node" = "$pnn" ] ; then
wait_until_node_has_some_ips ()
{
- echo "Waiting for node to have some IPs..."
+ echo "Waiting for some IPs to be assigned to node ${test_node}"
wait_until 60 node_has_some_ips "$@"
}
#######################################
-daemons_stop ()
-{
- echo "Attempting to politely shutdown daemons..."
- onnode 1 $CTDB shutdown -n all || true
-
- echo "Sleeping for a while..."
- sleep_for 1
-
- if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
- echo "Killing remaining daemons..."
- pkill -f $CTDB_DIR/bin/ctdbd
-
- if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
- echo "Once more with feeling.."
- pkill -9 $CTDB_DIR/bin/ctdbd
- fi
- fi
-
- local var_dir=$CTDB_DIR/tests/var
- rm -rf $var_dir/test.db
-}
-
-daemons_setup ()
-{
- local var_dir=$CTDB_DIR/tests/var
-
- mkdir -p $var_dir/test.db/persistent
-
- local public_addresses=$var_dir/public_addresses.txt
- local no_public_addresses=$var_dir/no_public_addresses.txt
- rm -f $CTDB_NODES $public_addresses $no_public_addresses
-
- # If there are (strictly) greater than 2 nodes then we'll randomly
- # choose a node to have no public addresses.
- local no_public_ips=-1
- [ $TEST_LOCAL_DAEMONS -gt 2 ] && no_public_ips=$(($RANDOM % $TEST_LOCAL_DAEMONS))
- echo "$no_public_ips" >$no_public_addresses
-
- local i
- for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
- if [ "${CTDB_USE_IPV6}x" != "x" ]; then
- echo ::$i >> $nodes
- ip addr add ::$i/128 dev lo
- else
- echo 127.0.0.$i >> $CTDB_NODES
- # 2 public addresses on most nodes, just to make things interesting.
- if [ $(($i - 1)) -ne $no_public_ips ] ; then
- echo "192.0.2.$i/24 lo" >> $public_addresses
- echo "192.0.2.$(($i + $TEST_LOCAL_DAEMONS))/24 lo" >> $public_addresses
- fi
- fi
- done
-}
-
-daemons_start_1 ()
-{
- local pnn="$1"
- shift # "$@" gets passed to ctdbd
-
- local var_dir=$CTDB_DIR/tests/var
-
- local public_addresses=$var_dir/public_addresses.txt
- local no_public_addresses=$var_dir/no_public_addresses.txt
-
- local no_public_ips=-1
- [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
-
- if [ "$no_public_ips" = $pnn ] ; then
- echo "Node $no_public_ips will have no public IPs."
- fi
-
- local ctdb_options="--reclock=$var_dir/rec.lock --nlist $CTDB_NODES --nopublicipcheck --event-script-dir=$CTDB_DIR/tests/events.d --logfile=$var_dir/daemons.log -d 3 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent --dbdir-state=$var_dir/test.db/state"
-
- if [ -z "$CTDB_TEST_REAL_CLUSTER" ]; then
- ctdb_options="$ctdb_options --public-interface=lo"
- fi
-
- if [ $pnn -eq $no_public_ips ] ; then
- ctdb_options="$ctdb_options --public-addresses=/dev/null"
- else
- ctdb_options="$ctdb_options --public-addresses=$public_addresses"
- fi
-
- # Need full path so we can use "pkill -f" to kill the daemons.
- $VALGRIND $CTDB_DIR/bin/ctdbd --socket=$var_dir/sock.$pnn $ctdb_options "$@" ||return 1
-}
-
-daemons_start ()
-{
- # "$@" gets passed to ctdbd
-
- echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
-
- for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
- daemons_start_1 $i "$@"
- done
-
- local var_dir=$CTDB_DIR/tests/var
-
- if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
- ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
- fi
-}
-
-#######################################
-
-_ctdb_hack_options ()
-{
- local ctdb_options="$*"
-
- # We really just want to pass CTDB_OPTIONS but on RH
- # /etc/sysconfig/ctdb can, and frequently does, set that variable.
- # So instead, we hack badly. We'll add these as we use them.
- # Note that these may still be overridden by the above file... but
- # we tend to use the exotic options here... so that is unlikely.
-
- case "$ctdb_options" in
- *--start-as-stopped*)
- export CTDB_START_AS_STOPPED="yes"
- esac
-}
-
-_restart_ctdb ()
+_service_ctdb ()
{
- _ctdb_hack_options "$@"
+ cmd="$1"
if [ -e /etc/redhat-release ] ; then
- service ctdb restart
+ service ctdb "$cmd"
else
- /etc/init.d/ctdb restart
+ /etc/init.d/ctdb "$cmd"
fi
}
-_ctdb_start ()
+# Restart CTDB on all nodes. Override for local daemons.
+_restart_ctdb_all ()
{
- _ctdb_hack_options "$@"
-
- /etc/init.d/ctdb start
+ onnode -p all $CTDB_TEST_WRAPPER _service_ctdb restart
}
+# Nothing needed for a cluster. Override for local daemons.
setup_ctdb ()
{
- if [ -n "$CTDB_NODES_SOCKETS" ] ; then
- daemons_setup
- fi
+ :
}
-# Common things to do after starting one or more nodes.
-_ctdb_start_post ()
+start_ctdb_1 ()
{
- onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
-
- echo "Setting RerecoveryTimeout to 1"
- onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
-
- # In recent versions of CTDB, forcing a recovery like this blocks
- # until the recovery is complete. Hopefully this will help the
- # cluster to stabilise before a subsequent test.
- echo "Forcing a recovery..."
- onnode -q 0 $CTDB recover
- sleep_for 1
- echo "Forcing a recovery..."
- onnode -q 0 $CTDB recover
-
- echo "ctdb is ready"
+ onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb start
}
-# This assumes that ctdbd is not running on the given node.
-ctdb_start_1 ()
+stop_ctdb_1 ()
{
- local pnn="$1"
- shift # "$@" is passed to ctdbd start.
-
- echo -n "Starting CTDB on node ${pnn}..."
-
- if [ -n "$CTDB_NODES_SOCKETS" ] ; then
- daemons_start_1 $pnn "$@"
- else
- onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
- fi
+ onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb stop
+}
- # If we're starting only 1 node then we're doing something weird.
- ctdb_restart_when_done
+restart_ctdb_1 ()
+{
+ onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb restart
}
restart_ctdb ()
{
- # "$@" is passed to ctdbd start.
-
echo -n "Restarting CTDB"
if $ctdb_test_restart_scheduled ; then
echo -n " (scheduled)"
local i
for i in $(seq 1 5) ; do
- if [ -n "$CTDB_NODES_SOCKETS" ] ; then
- daemons_stop
- daemons_start "$@"
- else
- onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
- fi || {
+ _restart_ctdb_all || {
echo "Restart failed. Trying again in a few seconds..."
sleep_for 5
continue
}
- onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || {
- echo "Cluster didn't become healthy. Restarting..."
+ wait_until_ready || {
+ echo "Cluster didn't become ready. Restarting..."
continue
}
- local debug_out=$(onnode -p all ctdb status -Y 2>&1; onnode -p all ctdb scriptstatus 2>&1)
-
echo "Setting RerecoveryTimeout to 1"
onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
# help the cluster to stabilise before a subsequent test.
echo "Forcing a recovery..."
onnode -q 0 $CTDB recover
- sleep_for 1
- echo "Forcing a recovery..."
- onnode -q 0 $CTDB recover
+ sleep_for 2
+
+ if ! onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
+ echo "Cluster has gone into recovery again, waiting..."
+ wait_until 30/2 onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered
+ fi
+
# Cluster is still healthy. Good, we're done!
if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
- echo "Cluster become UNHEALTHY again. Restarting..."
+ echo "Cluster became UNHEALTHY again [$(date)]"
+ onnode -p all ctdb status -X 2>&1
+ onnode -p all ctdb scriptstatus 2>&1
+ echo "Restarting..."
continue
fi
done
echo "Cluster UNHEALTHY... too many attempts..."
- echo "$debug_out"
+ onnode -p all ctdb status -X 2>&1
+ onnode -p all ctdb scriptstatus 2>&1
+
# Try to make the calling test fail
status=1
return 1
}
+# Does nothing on cluster and should be overridden for local daemons
+maybe_stop_ctdb ()
+{
+ :
+}
+
ctdb_restart_when_done ()
{
ctdb_test_restart_scheduled=true
}
-#######################################
-
-install_eventscript ()
+get_ctdbd_command_line_option ()
{
- local script_name="$1"
- local script_contents="$2"
+ local pnn="$1"
+ local option="$2"
- if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
- # The quoting here is *very* fragile. However, we do
- # experience the joy of installing a short script using
- # onnode, and without needing to know the IP addresses of the
- # nodes.
- onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
- else
- f="${CTDB_DIR}/tests/events.d/${script_name}"
- echo "$script_contents" >"$f"
- chmod 755 "$f"
- fi
-}
+ try_command_on_node "$pnn" "$CTDB getpid" || \
+ die "Unable to get PID of ctdbd on node $pnn"
-uninstall_eventscript ()
-{
- local script_name="$1"
+ local pid="${out#*:}"
+ try_command_on_node "$pnn" "ps -p $pid -o args hww" || \
+ die "Unable to get command-line of PID $pid"
- if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
- onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
- else
- rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"
- fi
+ # Strip everything up to and including --option
+ local t="${out#*--${option}}"
+ # Strip leading '=' or space if present
+ t="${t#=}"
+ t="${t# }"
+ # Strip any following options and print
+ echo "${t%% -*}"
}
#######################################
-# This section deals with the 99.ctdb_test eventscript.
-
-# Metafunctions: Handle a ctdb-test file on a node.
-# given event.
-ctdb_test_eventscript_file_create ()
+wait_for_monitor_event ()
{
local pnn="$1"
- local type="$2"
+ local timeout=120
- try_command_on_node $pnn touch "/tmp/ctdb-test-${type}.${pnn}"
-}
+ echo "Waiting for a monitor event on node ${pnn}..."
-ctdb_test_eventscript_file_remove ()
-{
- local pnn="$1"
- local type="$2"
+ try_command_on_node "$pnn" $CTDB scriptstatus || {
+ echo "Unable to get scriptstatus from node $pnn"
+ return 1
+ }
- try_command_on_node $pnn rm -f "/tmp/ctdb-test-${type}.${pnn}"
+ local ctdb_scriptstatus_original="$out"
+ wait_until 120 _ctdb_scriptstatus_changed
}
-ctdb_test_eventscript_file_exists ()
+_ctdb_scriptstatus_changed ()
{
- local pnn="$1"
- local type="$2"
+ try_command_on_node "$pnn" $CTDB scriptstatus || {
+ echo "Unable to get scriptstatus from node $pnn"
+ return 1
+ }
- try_command_on_node $pnn test -f "/tmp/ctdb-test-${type}.${pnn}" >/dev/null 2>&1
+ [ "$out" != "$ctdb_scriptstatus_original" ]
}
+#######################################
-# Handle a flag file on a node that is removed by 99.ctdb_test on the
-# given event.
-ctdb_test_eventscript_flag ()
+nfs_test_setup ()
{
- local cmd="$1"
- local pnn="$2"
- local event="$3"
+ select_test_node_and_ips
- ctdb_test_eventscript_file_${cmd} "$pnn" "flag-${event}"
-}
+ nfs_first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
+ echo "Creating test subdirectory..."
+ try_command_on_node $test_node "mktemp -d --tmpdir=$nfs_first_export"
+ nfs_test_dir="$out"
+ try_command_on_node $test_node "chmod 777 $nfs_test_dir"
-# Handle a trigger that causes 99.ctdb_test to fail it's monitor
-# event.
-ctdb_test_eventscript_unhealthy_trigger ()
-{
- local cmd="$1"
- local pnn="$2"
+ nfs_mnt_d=$(mktemp -d)
+ nfs_local_file="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
+ nfs_remote_file="${nfs_test_dir}/TEST_FILE"
- ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-trigger"
+ ctdb_test_exit_hook_add nfs_test_cleanup
+
+ echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
+ mount -o timeo=1,hard,intr,vers=3 \
+ "[${test_ip}]:${nfs_first_export}" ${nfs_mnt_d}
}
-# Handle the file that 99.ctdb_test created to show that it has marked
-# a node unhealthy because it detected the above trigger.
-ctdb_test_eventscript_unhealthy_detected ()
+nfs_test_cleanup ()
{
- local cmd="$1"
- local pnn="$2"
-
- ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-detected"
+ rm -f "$nfs_local_file"
+ umount -f "$nfs_mnt_d"
+ rmdir "$nfs_mnt_d"
+ onnode -q $test_node rmdir "$nfs_test_dir"
}
-# Handle a trigger that causes 99.ctdb_test to timeout it's monitor
-# event. This should cause the node to be banned.
-ctdb_test_eventscript_timeout_trigger ()
+#######################################
+
+# If the given IP is hosted then print 2 items: maskbits and iface
+ip_maskbits_iface ()
{
- local cmd="$1"
- local pnn="$2"
- local event="$3"
+ _addr="$1"
+
+ case "$_addr" in
+ *:*) _family="inet6" ; _bits=128 ;;
+ *) _family="inet" ; _bits=32 ;;
+ esac
- ctdb_test_eventscript_file_${cmd} "$pnn" "${event}-timeout"
+ ip addr show to "${_addr}/${_bits}" 2>/dev/null | \
+ awk -v family="${_family}" \
+ 'NR == 1 { iface = $2; sub(":$", "", iface) } \
+ $1 ~ /inet/ { mask = $2; sub(".*/", "", mask); \
+ print mask, iface, family }'
}
-# Note that the eventscript can't use the above functions!
-ctdb_test_eventscript_install ()
+drop_ip ()
{
+ _addr="${1%/*}" # Remove optional maskbits
- local script='#!/bin/sh
-out=$(ctdb pnn)
-pnn="${out#PNN:}"
-
-rm -vf "/tmp/ctdb-test-flag-${1}.${pnn}"
-
-trigger="/tmp/ctdb-test-unhealthy-trigger.${pnn}"
-detected="/tmp/ctdb-test-unhealthy-detected.${pnn}"
-timeout_trigger="/tmp/ctdb-test-${1}-timeout.${pnn}"
-case "$1" in
- monitor)
- if [ -e "$trigger" ] ; then
- echo "${0}: Unhealthy because \"$trigger\" detected"
- touch "$detected"
- exit 1
- elif [ -e "$detected" -a ! -e "$trigger" ] ; then
- echo "${0}: Healthy again, \"$trigger\" no longer detected"
- rm "$detected"
- fi
+ set -- $(ip_maskbits_iface $_addr)
+ if [ -n "$1" ] ; then
+ _maskbits="$1"
+ _iface="$2"
+ echo "Removing public address $_addr/$_maskbits from device $_iface"
+ ip addr del "$_ip/$_maskbits" dev "$_iface" >/dev/null 2>&1 || true
+ fi
+}
- ;;
- *)
- if [ -e "$timeout_trigger" ] ; then
- echo "${0}: Sleeping for a long time because \"$timeout_trigger\" detected"
- sleep 9999
- fi
- ;;
- *)
+drop_ips ()
+{
+ for _ip ; do
+ drop_ip "$_ip"
+ done
+}
-esac
+#######################################
-exit 0
-'
- install_eventscript "99.ctdb_test" "$script"
+# $1: pnn, $2: DB name
+db_get_path ()
+{
+ try_command_on_node -v $1 $CTDB getdbstatus "$2" |
+ sed -n -e "s@^path: @@p"
}
-ctdb_test_eventscript_uninstall ()
+# $1: pnn, $2: DB name
+db_ctdb_cattdb_count_records ()
{
- uninstall_eventscript "99.ctdb_test"
+ try_command_on_node -v $1 $CTDB cattdb "$2" |
+ grep '^key' | grep -v '__db_sequence_number__' |
+ wc -l
}
-# Note that this only works if you know all other monitor events will
-# succeed. You also need to install the eventscript before using it.
-wait_for_monitor_event ()
+# $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
+db_ctdb_tstore ()
{
- local pnn="$1"
+ _tdb=$(db_get_path $1 "$2")
+ _rsn="${5:-7}"
+ try_command_on_node $1 $CTDB tstore "$_tdb" "$3" "$4" "$_rsn"
+}
- echo "Waiting for a monitor event on node ${pnn}..."
- ctdb_test_eventscript_flag create $pnn "monitor"
+# $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
+db_ctdb_tstore_dbseqnum ()
+{
+ # "__db_sequence_number__" + trailing 0x00
+ _key='0x5f5f64625f73657175656e63655f6e756d6265725f5f00'
- wait_until 120 ! ctdb_test_eventscript_flag exists $pnn "monitor"
+ # Construct 8 byte (unit64_t) database sequence number. This
+ # probably breaks if $3 > 255
+ _value=$(printf "0x%02x%014x" $3 0)
+ db_ctdb_tstore $1 "$2" "$_key" "$_value"
}
+#######################################
+
# Make sure that $CTDB is set.
: ${CTDB:=ctdb}