export CTDB_TIMEOUT=60
if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
- CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
+ CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
else
- _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
- CTDB_TEST_WRAPPER="$_d/test_wrap"
+ _d=$(cd "$TEST_SCRIPTS_DIR" && echo "$PWD")
+ CTDB_TEST_WRAPPER="$_d/test_wrap"
fi
export CTDB_TEST_WRAPPER
######################################################################
+ctdb_test_on_cluster ()
+{
+ [ -z "$CTDB_TEST_LOCAL_DAEMONS" ]
+}
+
ctdb_test_exit ()
{
local status=$?
trap - 0
- [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
- status=$(($testfailures+0))
+ # run_tests.sh pipes stdout into tee. If the tee process is
+ # killed then any attempt to write to stdout (e.g. echo) will
+ # result in SIGPIPE, terminating the caller. Ignore SIGPIPE to
+ # ensure that all clean-up is run.
+ trap '' PIPE
# Avoid making a test fail from this point onwards. The test is
# now complete.
eval "$ctdb_test_exit_hook" || true
unset ctdb_test_exit_hook
- if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
- echo "Restarting CTDB (scheduled)..."
- ctdb_stop_all || true # Might be restarting some daemons were shutdown
-
- echo "Reconfiguring cluster..."
- setup_ctdb
-
- ctdb_start_all
- else
- # This could be made unconditional but then we might get
- # duplication from the recovery in ctdb_start_all(). We want to
- # leave the recovery in ctdb_start_all() so that future tests that
- # might do a manual restart mid-test will benefit.
- echo "Forcing a recovery..."
- onnode 0 $CTDB recover
- fi
+ echo "Stopping cluster..."
+ ctdb_nodes_stop || ctdb_test_error "Cluster shutdown failed"
exit $status
}
ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
}
+# Setting cleanup_pid to <pid>@<node> will cause <pid> to be killed on
+# <node> when the test completes. To cancel, just unset cleanup_pid.
+ctdb_test_cleanup_pid=""
+ctdb_test_cleanup_pid_exit_hook ()
+{
+ if [ -n "$ctdb_test_cleanup_pid" ] ; then
+ local pid="${ctdb_test_cleanup_pid%@*}"
+ local node="${ctdb_test_cleanup_pid#*@}"
+
+ try_command_on_node "$node" "kill ${pid}"
+ fi
+}
+
+ctdb_test_exit_hook_add ctdb_test_cleanup_pid_exit_hook
+
+ctdb_test_cleanup_pid_set ()
+{
+ local node="$1"
+ local pid="$2"
+
+ ctdb_test_cleanup_pid="${pid}@${node}"
+}
+
+ctdb_test_cleanup_pid_clear ()
+{
+ ctdb_test_cleanup_pid=""
+}
+
+# -n option means do not configure/start cluster
ctdb_test_init ()
{
- scriptname=$(basename "$0")
- testfailures=0
- ctdb_test_restart_scheduled=false
+ trap "ctdb_test_exit" 0
+
+ ctdb_nodes_stop >/dev/null 2>&1 || true
+
+ if [ "$1" != "-n" ] ; then
+ echo "Configuring cluster..."
+ setup_ctdb || ctdb_test_error "Cluster configuration failed"
- trap "ctdb_test_exit" 0
+ echo "Starting cluster..."
+ ctdb_init || ctdb_test_error "Cluster startup failed"
+ fi
+
+ echo "*** SETUP COMPLETE AT $(date '+%F %T'), RUNNING TEST..."
+}
+
+ctdb_nodes_start_custom ()
+{
+ if ctdb_test_on_cluster ; then
+ ctdb_test_error "ctdb_nodes_start_custom() on real cluster"
+ fi
+
+ ctdb_nodes_stop >/dev/null 2>&1 || true
+
+ echo "Configuring cluster..."
+ setup_ctdb "$@" || ctdb_test_error "Cluster configuration failed"
+
+ echo "Starting cluster..."
+ ctdb_init || ctdb_test_fail "Cluster startup failed"
+}
+
+ctdb_test_skip_on_cluster ()
+{
+ if ctdb_test_on_cluster ; then
+ ctdb_test_skip \
+ "SKIPPING this test - only runs against local daemons"
+ fi
+}
+
+
+ctdb_nodes_restart ()
+{
+ ctdb_nodes_stop "$@"
+ ctdb_nodes_start "$@"
}
########################################
-# Sets: $out
+# Sets: $out, $outfile
+# * The first 1KB of output is put into $out
+# * Tests should use $outfile for handling large output
+# * $outfile is removed after each test
+out=""
+outfile="${CTDB_TEST_TMP_DIR}/try_command_on_node.out"
+
+outfile_cleanup ()
+{
+ rm -f "$outfile"
+}
+
+ctdb_test_exit_hook_add outfile_cleanup
+
try_command_on_node ()
{
local nodespec="$1" ; shift
local cmd="$*"
- out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
+ local status=0
+ # Intentionally unquoted - might be empty
+ # shellcheck disable=SC2086
+ onnode -q $onnode_opts "$nodespec" "$cmd" >"$outfile" 2>&1 || status=$?
+ out=$(dd if="$outfile" bs=1k count=1 2>/dev/null)
+ if [ $status -ne 0 ] ; then
echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
- echo "$out"
- return 1
- }
+ cat "$outfile"
+ return $status
+ fi
if $verbose ; then
echo "Output of \"$cmd\":"
- echo "$out"
+ cat "$outfile" || true
fi
}
+_run_onnode ()
+{
+ local thing="$1"
+ shift
+
+ local options nodespec
+
+ while : ; do
+ case "$1" in
+ -*)
+ options="${options}${options:+ }${1}"
+ shift
+ ;;
+ *)
+ nodespec="$1"
+ shift
+ break
+ esac
+ done
+
+ # shellcheck disable=SC2086
+ # $options can be multi-word
+ try_command_on_node $options "$nodespec" "${thing} $*"
+}
+
+ctdb_onnode ()
+{
+ _run_onnode "$CTDB" "$@"
+}
+
+testprog_onnode ()
+{
+ _run_onnode "${CTDB_TEST_WRAPPER} ${VALGRIND}" "$@"
+}
+
+function_onnode ()
+{
+ _run_onnode "${CTDB_TEST_WRAPPER}" "$@"
+}
+
sanity_check_output ()
{
local min_lines="$1"
local regexp="$2" # Should be anchored as necessary.
- local output="$3"
local ret=0
- local num_lines=$(echo "$output" | wc -l)
+ local num_lines
+ num_lines=$(wc -l <"$outfile" | tr -d '[:space:]')
echo "There are $num_lines lines of output"
- if [ $num_lines -lt $min_lines ] ; then
- echo "BAD: that's less than the required number (${min_lines})"
- ret=1
+ if [ "$num_lines" -lt "$min_lines" ] ; then
+ ctdb_test_fail "BAD: that's less than the required number (${min_lines})"
fi
local status=0
local unexpected # local doesn't pass through status of command on RHS.
- unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
+ unexpected=$(grep -Ev "$regexp" "$outfile") || status=$?
# Note that this is reversed.
if [ $status -eq 0 ] ; then
return $ret
}
-sanity_check_ips ()
+select_test_node ()
{
- local ips="$1" # list of "ip node" lines
-
- echo "Sanity checking IPs..."
-
- local x ipp prev
- prev=""
- while read x ipp ; do
- [ "$ipp" = "-1" ] && break
- if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
- echo "OK"
- return 0
- fi
- prev="$ipp"
- done <<<"$ips"
+ try_command_on_node any ctdb pnn || return 1
- echo "BAD: a node was -1 or IPs are only assigned to one node:"
- echo "$ips"
- echo "Are you running an old version of CTDB?"
- return 1
+ test_node="$out"
+ echo "Selected node ${test_node}"
}
-# This returns a list of "ip node" lines in $out
+# This returns a list of "ip node" lines in $outfile
all_ips_on_node()
{
local node="$1"
- try_command_on_node $node \
+ try_command_on_node "$node" \
"$CTDB ip -X | awk -F'|' 'NR > 1 { print \$2, \$3 }'"
}
test_node="" # this matches no PNN
test_node_ips=""
local ip pnn
- while read ip pnn ; do
- if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
+ while read -r ip pnn ; do
+ if [ -z "$test_node" ] && [ "$pnn" != "-1" ] ; then
test_node="$pnn"
fi
if [ "$pnn" = "$test_node" ] ; then
- test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
+ test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
fi
- done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+ done <"$outfile"
echo "Selected node ${test_node} with IPs: ${test_node_ips}."
test_ip="${test_node_ips%% *}"
+ # test_prefix used by caller
+ # shellcheck disable=SC2034
case "$test_ip" in
*:*) test_prefix="${test_ip}/128" ;;
*) test_prefix="${test_ip}/32" ;;
while ! _select_test_node_and_ips ; do
echo "Unable to find a test node with IPs assigned"
if [ $timeout -le 0 ] ; then
- echo "BAD: Too many attempts"
+ ctdb_test_error "BAD: Too many attempts"
return 1
fi
sleep_for 1
- timeout=$(($timeout - 1))
+ timeout=$((timeout - 1))
done
return 0
get_test_ip_mask_and_iface ()
{
# Find the interface
- try_command_on_node $test_node "$CTDB ip -v -X | awk -F'|' -v ip=$test_ip '\$2 == ip { print \$4 }'"
- iface="$out"
+ ctdb_onnode "$test_node" "ip -v -X"
+ iface=$(awk -F'|' -v ip="$test_ip" '$2 == ip { print $4 }' "$outfile")
- if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
+ if ctdb_test_on_cluster ; then
# Find the netmask
- try_command_on_node $test_node ip addr show to $test_ip
+ try_command_on_node "$test_node" ip addr show to "$test_ip"
mask="${out##*/}"
mask="${mask%% *}"
else
_nodes=""
for _pnn in $all_pnns ; do
- all_ips_on_node $_pnn
- while read _i _n ; do
+ all_ips_on_node "$_pnn"
+ while read -r _i _ ; do
if [ "$_ip" = "$_i" ] ; then
_nodes="${_nodes}${_nodes:+,}${_pnn}"
fi
- done <<<"$out" # bashism
+ done <"$outfile"
done
try_command_on_node -pq "$_nodes" "$CTDB delip $_ip"
sleep_for ()
{
echo -n "=${1}|"
- for i in $(seq 1 $1) ; do
+ for i in $(seq 1 "$1") ; do
echo -n '.'
sleep 1
done
cluster_is_healthy ()
{
- if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
- echo "Cluster is HEALTHY"
- if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
- echo "WARNING: cluster in recovery mode!"
+ if onnode 0 "$CTDB_TEST_WRAPPER" _cluster_is_healthy ; then
+ echo "Cluster is HEALTHY"
+ if ! onnode 0 "$CTDB_TEST_WRAPPER" _cluster_is_recovered ; then
+ echo "WARNING: cluster in recovery mode!"
+ fi
+ return 0
fi
- return 0
- else
+
echo "Cluster is UNHEALTHY"
- if ! ${ctdb_test_restart_scheduled:-false} ; then
- echo "DEBUG AT $(date '+%F %T'):"
- local i
- for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
+
+ echo "DEBUG AT $(date '+%F %T'):"
+ local i
+ for i in "onnode -q 0 $CTDB status" \
+ "onnode -q 0 onnode all $CTDB scriptstatus" ; do
echo "$i"
$i || true
- done
- fi
+ done
+
return 1
- fi
}
wait_until_ready ()
echo "Waiting for cluster to become ready..."
- wait_until $timeout onnode -q any $CTDB_TEST_WRAPPER _cluster_is_ready
+ wait_until "$timeout" onnode -q any "$CTDB_TEST_WRAPPER" _cluster_is_ready
}
# This function is becoming nicely overloaded. Soon it will collapse! :-)
node_has_status ()
{
- local pnn="$1"
- local status="$2"
-
- local bits fpat mpat rpat
- case "$status" in
- (unhealthy) bits="?|?|?|1|*" ;;
- (healthy) bits="?|?|?|0|*" ;;
- (disconnected) bits="1|*" ;;
- (connected) bits="0|*" ;;
- (banned) bits="?|1|*" ;;
- (unbanned) bits="?|0|*" ;;
- (disabled) bits="?|?|1|*" ;;
- (enabled) bits="?|?|0|*" ;;
- (stopped) bits="?|?|?|?|1|*" ;;
- (notstopped) bits="?|?|?|?|0|*" ;;
- (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
- (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
- (recovered) rpat='^Recovery mode:RECOVERY \(1\)$' ;;
- (notlmaster) rpat="^hash:.* lmaster:${pnn}\$" ;;
+ local pnn="$1"
+ local status="$2"
+
+ case "$status" in
+ recovered)
+ ! $CTDB status -n "$pnn" | \
+ grep -Eq '^Recovery mode:RECOVERY \(1\)$'
+ return
+ ;;
+ notlmaster)
+ ! $CTDB status | grep -Eq "^hash:.* lmaster:${pnn}\$"
+ return
+ ;;
+ esac
+
+ local bits
+ case "$status" in
+ unhealthy) bits="?|?|?|?|1|*" ;;
+ healthy) bits="?|?|?|?|0|*" ;;
+ disconnected) bits="1|*" ;;
+ connected) bits="0|*" ;;
+ banned) bits="?|?|1|*" ;;
+ unbanned) bits="?|?|0|*" ;;
+ disabled) bits="?|?|?|1|*" ;;
+ enabled) bits="?|?|?|0|*" ;;
+ stopped) bits="?|?|?|?|?|1|*" ;;
+ notstopped) bits="?|?|?|?|?|0|*" ;;
*)
- echo "node_has_status: unknown status \"$status\""
- return 1
- esac
-
- if [ -n "$bits" ] ; then
- local out x line
+ echo "node_has_status: unknown status \"$status\""
+ return 1
+ esac
+ local out _ line
out=$($CTDB -X status 2>&1) || return 1
{
- read x
- while read line ; do
- # This needs to be done in 2 steps to avoid false matches.
- local line_bits="${line#|${pnn}|*|}"
- [ "$line_bits" = "$line" ] && continue
- [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
- done
- return 1
+ read -r _
+ while read -r line ; do
+ # This needs to be done in 2 steps to
+ # avoid false matches.
+ local line_bits="${line#|"${pnn}"|*|}"
+ [ "$line_bits" = "$line" ] && continue
+ # shellcheck disable=SC2295
+ # This depends on $bits being a pattern
+ [ "${line_bits#${bits}}" != "$line_bits" ] && \
+ return 0
+ done
+ return 1
} <<<"$out" # Yay bash!
- elif [ -n "$fpat" ] ; then
- $CTDB statistics -n "$pnn" | egrep -q "$fpat"
- elif [ -n "$rpat" ] ; then
- ! $CTDB status -n "$pnn" | egrep -q "$rpat"
- else
- echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
- return 1
- fi
}
wait_until_node_has_status ()
echo "Waiting until node $pnn has status \"$status\"..."
- if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
+ if ! wait_until "$timeout" onnode "$proxy_pnn" \
+ "$CTDB_TEST_WRAPPER" node_has_status "$pnn" "$status" ; then
+
for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
echo "$i"
$i || true
local out
- all_ips_on_node $node
+ all_ips_on_node "$node"
local check
for check in $ips ; do
local ip pnn
- while read ip pnn ; do
+ while read -r ip pnn ; do
if [ "$check" = "$ip" ] ; then
if [ "$pnn" = "$node" ] ; then
if $negating ; then return 1 ; fi
if $negating ; then
ips="${ips/${check}}"
fi
- done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+ done <"$outfile"
done
ips="${ips// }" # Remove any spaces.
local out
- all_ips_on_node $node
+ all_ips_on_node "$node"
- while read ip pnn ; do
+ while read -r ip pnn ; do
if [ "$node" = "$pnn" ] ; then
return 0
fi
- done <<<"$out" # bashism to avoid problem setting variable in pipeline.
+ done <"$outfile"
return 1
}
wait_until 60 node_has_some_ips "$@"
}
-#######################################
-
-_service_ctdb ()
+wait_until_node_has_no_ips ()
{
- cmd="$1"
+ echo "Waiting until no IPs are assigned to node ${test_node}"
- if [ -e /etc/redhat-release ] ; then
- service ctdb "$cmd"
- else
- /etc/init.d/ctdb "$cmd"
- fi
+ wait_until 60 ! node_has_some_ips "$@"
}
-# Stop/start CTDB on all nodes. Override for local daemons.
-ctdb_stop_all ()
-{
- onnode -p all $CTDB_TEST_WRAPPER _service_ctdb stop
-}
-_ctdb_start_all ()
-{
- onnode -p all $CTDB_TEST_WRAPPER _service_ctdb start
-}
+#######################################
-# Nothing needed for a cluster. Override for local daemons.
-setup_ctdb ()
+ctdb_init ()
{
- :
-}
+ if ! ctdb_nodes_start ; then
+ echo "Cluster start failed"
+ return 1
+ fi
-start_ctdb_1 ()
-{
- onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb start
+ if ! wait_until_ready 120 ; then
+ echo "Cluster didn't become ready"
+ return 1
+ fi
+
+ echo "Setting RerecoveryTimeout to 1"
+ onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
+
+ echo "Forcing a recovery..."
+ onnode -q 0 "$CTDB recover"
+ sleep_for 2
+
+ if ! onnode -q all "$CTDB_TEST_WRAPPER _cluster_is_recovered" ; then
+ echo "Cluster has gone into recovery again, waiting..."
+ if ! wait_until 30/2 onnode -q all \
+ "$CTDB_TEST_WRAPPER _cluster_is_recovered" ; then
+ echo "Cluster did not come out of recovery"
+ return 1
+ fi
+ fi
+
+ if ! onnode 0 "$CTDB_TEST_WRAPPER _cluster_is_healthy" ; then
+ echo "Cluster became UNHEALTHY again [$(date)]"
+ return 1
+ fi
+
+ echo "Doing a sync..."
+ onnode -q 0 "$CTDB sync"
+
+ echo "ctdb is ready"
+ return 0
}
-stop_ctdb_1 ()
+ctdb_base_show ()
{
- onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb stop
+ echo "${CTDB_BASE:-${CTDB_SCRIPTS_BASE}}"
}
-restart_ctdb_1 ()
+#######################################
+
+# sets: leader
+_leader_get ()
{
- onnode "$1" $CTDB_TEST_WRAPPER _service_ctdb restart
+ local node="$1"
+
+ ctdb_onnode "$node" leader
+ # shellcheck disable=SC2154
+ # $out set by ctdb_onnode() above
+ leader="$out"
}
-ctdb_start_all ()
+leader_get ()
{
- local i
- for i in $(seq 1 5) ; do
- _ctdb_start_all || {
- echo "Start failed. Trying again in a few seconds..."
- sleep_for 5
- continue
- }
+ local node="$1"
- wait_until_ready || {
- echo "Cluster didn't become ready. Restarting..."
- continue
- }
-
- echo "Setting RerecoveryTimeout to 1"
- onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
+ echo "Get leader"
+ _leader_get "$node"
+ echo "Leader is ${leader}"
+ echo
+}
- # In recent versions of CTDB, forcing a recovery like this
- # blocks until the recovery is complete. Hopefully this will
- # help the cluster to stabilise before a subsequent test.
- echo "Forcing a recovery..."
- onnode -q 0 $CTDB recover
- sleep_for 2
+_leader_has_changed ()
+{
+ local node="$1"
+ local leader_old="$2"
- if ! onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
- echo "Cluster has gone into recovery again, waiting..."
- wait_until 30/2 onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered
- fi
+ _leader_get "$node"
+ [ "$leader" != "$leader_old" ]
+}
- # Cluster is still healthy. Good, we're done!
- if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
- echo "Cluster became UNHEALTHY again [$(date)]"
- onnode -p all ctdb status -X 2>&1
- onnode -p all ctdb scriptstatus 2>&1
- echo "Restarting..."
- continue
- fi
+# uses: leader
+wait_until_leader_has_changed ()
+{
+ local node="$1"
- echo "Doing a sync..."
- onnode -q 0 $CTDB sync
+ echo
+ echo "Wait until leader changes..."
+ wait_until 30 _leader_has_changed "$node" "$leader"
+ echo "Leader changed to ${leader}"
+}
- echo "ctdb is ready"
- return 0
- done
+#######################################
- echo "Cluster UNHEALTHY... too many attempts..."
- onnode -p all ctdb status -X 2>&1
- onnode -p all ctdb scriptstatus 2>&1
+# sets: generation
+_generation_get ()
+{
+ local node="$1"
- # Try to make the calling test fail
- status=1
- return 1
+ ctdb_onnode "$node" status
+ # shellcheck disable=SC2154
+ # $outfile set by ctdb_onnode() above
+ generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile")
}
-# Does nothing on cluster and should be overridden for local daemons
-maybe_stop_ctdb ()
+generation_get ()
{
- :
+ local node="$1"
+
+ echo "Get generation"
+ _generation_get "$node"
+ echo "Generation is ${generation}"
+ echo
}
-ctdb_restart_when_done ()
+_generation_has_changed ()
{
- ctdb_test_restart_scheduled=true
+ local node="$1"
+ local generation_old="$2"
+
+ _generation_get "$node"
+
+ [ "$generation" != "$generation_old" ]
}
-ctdb_base_show ()
+# uses: generation
+wait_until_generation_has_changed ()
{
- echo "${CTDB_BASE:-${CTDB_SCRIPTS_BASE}}"
+ local node="$1"
+
+ echo "Wait until generation changes..."
+ wait_until 30 _generation_has_changed "$node" "$generation"
+ echo "Generation changed to ${generation}"
+ echo
}
#######################################
echo "Waiting for a monitor event on node ${pnn}..."
- try_command_on_node "$pnn" $CTDB scriptstatus || {
+ ctdb_onnode "$pnn" scriptstatus || {
echo "Unable to get scriptstatus from node $pnn"
return 1
}
- local ctdb_scriptstatus_original="$out"
+ mv "$outfile" "${outfile}.orig"
+
wait_until 120 _ctdb_scriptstatus_changed
}
_ctdb_scriptstatus_changed ()
{
- try_command_on_node "$pnn" $CTDB scriptstatus || {
+ ctdb_onnode "$pnn" scriptstatus || {
echo "Unable to get scriptstatus from node $pnn"
return 1
}
- [ "$out" != "$ctdb_scriptstatus_original" ]
-}
-
-#######################################
-
-nfs_test_setup ()
-{
- select_test_node_and_ips
-
- nfs_first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
-
- echo "Creating test subdirectory..."
- try_command_on_node $test_node "mktemp -d --tmpdir=$nfs_first_export"
- nfs_test_dir="$out"
- try_command_on_node $test_node "chmod 777 $nfs_test_dir"
-
- nfs_mnt_d=$(mktemp -d)
- nfs_local_file="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
- nfs_remote_file="${nfs_test_dir}/TEST_FILE"
-
- ctdb_test_exit_hook_add nfs_test_cleanup
-
- echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
- mount -o timeo=1,hard,intr,vers=3 \
- "[${test_ip}]:${nfs_first_export}" ${nfs_mnt_d}
-}
-
-nfs_test_cleanup ()
-{
- rm -f "$nfs_local_file"
- umount -f "$nfs_mnt_d"
- rmdir "$nfs_mnt_d"
- onnode -q $test_node rmdir "$nfs_test_dir"
+ ! diff "$outfile" "${outfile}.orig" >/dev/null
}
#######################################
*) _family="inet" ; _bits=32 ;;
esac
+ # Literal backslashes in awk script
+ # shellcheck disable=SC1004
ip addr show to "${_addr}/${_bits}" 2>/dev/null | \
awk -v family="${_family}" \
'NR == 1 { iface = $2; sub(":$", "", iface) } \
{
_addr="${1%/*}" # Remove optional maskbits
+ # Intentional word splitting
+ # shellcheck disable=SC2046,SC2086
set -- $(ip_maskbits_iface $_addr)
if [ -n "$1" ] ; then
_maskbits="$1"
# $1: pnn, $2: DB name
db_get_path ()
{
- try_command_on_node -v $1 $CTDB getdbstatus "$2" |
- sed -n -e "s@^path: @@p"
+ ctdb_onnode -v "$1" "getdbstatus $2" | sed -n -e "s@^path: @@p"
}
# $1: pnn, $2: DB name
db_ctdb_cattdb_count_records ()
{
- try_command_on_node -v $1 $CTDB cattdb "$2" |
- grep '^key' | grep -v '__db_sequence_number__' |
- wc -l
+ # Count the number of keys, excluding any that begin with '_'.
+ # This excludes at least the sequence number record in
+ # persistent/replicated databases. The trailing "|| :" forces
+ # the command to succeed when no records are matched.
+ ctdb_onnode "$1" "cattdb $2 | grep -c '^key([0-9][0-9]*) = \"[^_]' || :"
+ echo "$out"
}
# $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
db_ctdb_tstore ()
{
- _tdb=$(db_get_path $1 "$2")
+ _tdb=$(db_get_path "$1" "$2")
_rsn="${5:-7}"
- try_command_on_node $1 $CTDB tstore "$_tdb" "$3" "$4" "$_rsn"
+ ctdb_onnode "$1" tstore "$_tdb" "$3" "$4" "$_rsn"
}
# $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
# Construct 8 byte (unit64_t) database sequence number. This
# probably breaks if $3 > 255
- _value=$(printf "0x%02x%014x" $3 0)
+ _value=$(printf "0x%02x%014x" "$3" 0)
- db_ctdb_tstore $1 "$2" "$_key" "$_value"
+ db_ctdb_tstore "$1" "$2" "$_key" "$_value"
}
-#######################################
+########################################
# Make sure that $CTDB is set.
-: ${CTDB:=ctdb}
+if [ -z "$CTDB" ] ; then
+ CTDB="ctdb"
+fi
+
+if ctdb_test_on_cluster ; then
+ . "${TEST_SCRIPTS_DIR}/integration_real_cluster.bash"
+else
+ . "${TEST_SCRIPTS_DIR}/integration_local_daemons.bash"
+fi
+
-local="${TEST_SUBDIR}/scripts/local.bash"
+local="${CTDB_TEST_SUITE_DIR}/scripts/local.bash"
if [ -r "$local" ] ; then
. "$local"
fi