1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 . "${TEST_SCRIPTS_DIR}/common.sh"
5 # If we're not running on a real cluster then we need a local copy of
6 # ctdb (and other stuff) in $PATH and we will use local daemons.
7 if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
8 var_dir="${CTDB_DIR}/tests/var"
10 export CTDB_NODES_SOCKETS=""
11 for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
12 CTDB_NODES_SOCKETS="${CTDB_NODES_SOCKETS}${CTDB_NODES_SOCKETS:+ }${var_dir}/sock.${i}"
15 PATH="${CTDB_DIR}/bin:${PATH}"
17 export CTDB_NODES="$var_dir/nodes.txt"
20 ######################################################################
22 export CTDB_TIMEOUT=60
24 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
25 CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
27 _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
28 CTDB_TEST_WRAPPER="$_d/test_wrap"
30 export CTDB_TEST_WRAPPER
32 # If $VALGRIND is set then use it whenever ctdb is called, but only if
33 # $CTDB is not already set.
34 [ -n "$CTDB" ] || export CTDB="${VALGRIND}${VALGRIND:+ }ctdb"
37 PATH="${TEST_SCRIPTS_DIR}:${PATH}"
39 ######################################################################
41 ctdb_check_time_logs ()
50 out=$(onnode all tail -n 20 /var/log/ctdb.test.time.log 2>&1)
52 if [ $? -eq 0 ] ; then
57 node="${line#>> NODE: }"
64 if [ -n "$ds_prev" ] && \
65 [ $(($ds_curr - $ds_prev)) -ge $threshold ] ; then
66 echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
75 echo Error getting time logs
78 echo "Check time sync (test client first):"
81 echo "Information from test client:"
84 echo "Information from cluster nodes:"
85 onnode all "top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
95 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
96 status=$(($testfailures+0))
98 # Avoid making a test fail from this point onwards. The test is
102 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
104 if [ -z "$TEST_LOCAL_DAEMONS" -a -n "$CTDB_TEST_TIME_LOGGING" -a \
105 $status -ne 0 ] ; then
109 eval "$ctdb_test_exit_hook" || true
110 unset ctdb_test_exit_hook
112 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
116 # This could be made unconditional but then we might get
117 # duplication from the recovery in restart_ctdb. We want to
118 # leave the recovery in restart_ctdb so that future tests that
119 # might do a manual restart mid-test will benefit.
120 echo "Forcing a recovery..."
121 onnode 0 $CTDB recover
127 ctdb_test_exit_hook_add ()
129 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
134 scriptname=$(basename "$0")
136 ctdb_test_restart_scheduled=false
138 trap "ctdb_test_exit" 0
141 ########################################
144 try_command_on_node ()
146 local nodespec="$1" ; shift
151 while [ "${nodespec#-}" != "$nodespec" ] ; do
152 if [ "$nodespec" = "-v" ] ; then
155 onnode_opts="$nodespec"
157 nodespec="$1" ; shift
162 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
164 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
170 echo "Output of \"$cmd\":"
175 sanity_check_output ()
178 local regexp="$2" # Should be anchored as necessary.
183 local num_lines=$(echo "$output" | wc -l)
184 echo "There are $num_lines lines of output"
185 if [ $num_lines -lt $min_lines ] ; then
186 echo "BAD: that's less than the required number (${min_lines})"
191 local unexpected # local doesn't pass through status of command on RHS.
192 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
194 # Note that this is reversed.
195 if [ $status -eq 0 ] ; then
196 echo "BAD: unexpected lines in output:"
197 echo "$unexpected" | cat -A
200 echo "Output lines look OK"
208 local ips="$1" # list of "ip node" lines
210 echo "Sanity checking IPs..."
214 while read x ipp ; do
215 [ "$ipp" = "-1" ] && break
216 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
223 echo "BAD: a node was -1 or IPs are only assigned to one node"
224 echo "Are you running an old version of CTDB?"
228 # This returns a list of "ip node" lines in $out
232 try_command_on_node $node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
235 select_test_node_and_ips ()
239 # When selecting test_node we just want a node that has public
240 # IPs. This will work and is economically semi-random. :-)
242 read x test_node <<<"$out"
246 while read ip pnn ; do
247 if [ "$pnn" = "$test_node" ] ; then
248 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
250 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
252 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
253 test_ip="${test_node_ips%% *}"
256 #######################################
258 # Wait until either timeout expires or command succeeds. The command
259 # will be tried once per second.
262 local timeout="$1" ; shift # "$@" is the command...
265 if [ "$1" = "!" ] ; then
270 echo -n "<${timeout}|"
272 while [ $t -gt 0 ] ; do
275 if { ! $negate && [ $rc -eq 0 ] ; } || \
276 { $negate && [ $rc -ne 0 ] ; } ; then
277 echo "|$(($timeout - $t))|"
294 for i in $(seq 1 $1) ; do
301 _cluster_is_healthy ()
303 local out x count line
305 out=$($CTDB -Y status 2>/dev/null) || return 1
311 # We need to see valid lines if we're going to be healthy.
312 [ "${line#:[0-9]}" != "$line" ] && count=$(($count + 1))
313 # A line indicating a node is unhealthy causes failure.
314 [ "${line##:*:*:*1:}" != "$line" ] && return 1
316 [ $count -gt 0 ] && return $?
317 } <<<"$out" # Yay bash!
320 cluster_is_healthy ()
322 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
323 echo "Cluster is HEALTHY"
326 echo "Cluster is UNHEALTHY"
327 if ! ${ctdb_test_restart_scheduled:-false} ; then
328 echo "DEBUG AT $(date '+%F %T'):"
330 for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
339 wait_until_healthy ()
341 local timeout="${1:-120}"
343 echo "Waiting for cluster to become healthy..."
345 wait_until 120 _cluster_is_healthy
348 # This function is becoming nicely overloaded. Soon it will collapse! :-)
356 (unhealthy) bits="?:?:?:1:*" ;;
357 (healthy) bits="?:?:?:0:*" ;;
358 (disconnected) bits="1:*" ;;
359 (connected) bits="0:*" ;;
360 (banned) bits="?:1:*" ;;
361 (unbanned) bits="?:0:*" ;;
362 (disabled) bits="?:?:1:*" ;;
363 (enabled) bits="?:?:0:*" ;;
364 (stopped) bits="?:?:?:?:1:*" ;;
365 (notstopped) bits="?:?:?:?:0:*" ;;
366 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
367 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
368 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
369 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
371 echo "node_has_status: unknown status \"$status\""
375 if [ -n "$bits" ] ; then
378 out=$($CTDB -Y status 2>&1) || return 1
383 # This needs to be done in 2 steps to avoid false matches.
384 local line_bits="${line#:${pnn}:*:}"
385 [ "$line_bits" = "$line" ] && continue
386 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
389 } <<<"$out" # Yay bash!
390 elif [ -n "$fpat" ] ; then
391 $CTDB statistics -n "$pnn" | egrep -q "$fpat"
392 elif [ -n "$mpat" ] ; then
393 $CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
395 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
400 wait_until_node_has_status ()
404 local timeout="${3:-30}"
405 local proxy_pnn="${4:-any}"
407 echo "Waiting until node $pnn has status \"$status\"..."
409 if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
410 for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
420 # Useful for superficially testing IP failover.
421 # IPs must be on nodes matching nodeglob.
422 ips_are_on_nodeglob ()
424 local nodeglob="$1" ; shift
431 while read ip pnn ; do
432 for check in $ips ; do
433 if [ "$check" = "$ip" ] ; then
438 ips="${ips/${ip}}" # Remove from list
441 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
443 ips="${ips// }" # Remove any spaces.
447 wait_until_ips_are_on_nodeglob ()
449 echo "Waiting for IPs to fail over..."
451 wait_until 60 ips_are_on_nodeglob "$@"
462 while read ip pnn ; do
463 if [ "$node" = "$pnn" ] ; then
466 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
471 wait_until_node_has_some_ips ()
473 echo "Waiting for node to have some IPs..."
475 wait_until 60 node_has_some_ips "$@"
478 #######################################
482 echo "Attempting to politely shutdown daemons..."
483 onnode 1 $CTDB shutdown -n all || true
485 echo "Sleeping for a while..."
488 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
489 echo "Killing remaining daemons..."
490 pkill -f $CTDB_DIR/bin/ctdbd
492 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
493 echo "Once more with feeling.."
494 pkill -9 $CTDB_DIR/bin/ctdbd
498 local var_dir=$CTDB_DIR/tests/var
499 rm -rf $var_dir/test.db
504 local var_dir=$CTDB_DIR/tests/var
506 mkdir -p $var_dir/test.db/persistent
508 local public_addresses=$var_dir/public_addresses.txt
509 local no_public_addresses=$var_dir/no_public_addresses.txt
510 rm -f $CTDB_NODES $public_addresses $no_public_addresses
512 # If there are (strictly) greater than 2 nodes then we'll randomly
513 # choose a node to have no public addresses.
514 local no_public_ips=-1
515 [ $TEST_LOCAL_DAEMONS -gt 2 ] && no_public_ips=$(($RANDOM % $TEST_LOCAL_DAEMONS))
516 echo "$no_public_ips" >$no_public_addresses
519 for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
520 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
522 ip addr add ::$i/128 dev lo
524 echo 127.0.0.$i >> $CTDB_NODES
525 # 2 public addresses on most nodes, just to make things interesting.
526 if [ $(($i - 1)) -ne $no_public_ips ] ; then
527 echo "192.0.2.$i/24 lo" >> $public_addresses
528 echo "192.0.2.$(($i + $TEST_LOCAL_DAEMONS))/24 lo" >> $public_addresses
537 shift # "$@" gets passed to ctdbd
539 local var_dir=$CTDB_DIR/tests/var
541 local public_addresses=$var_dir/public_addresses.txt
542 local no_public_addresses=$var_dir/no_public_addresses.txt
544 local no_public_ips=-1
545 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
547 if [ "$no_public_ips" = $pnn ] ; then
548 echo "Node $no_public_ips will have no public IPs."
551 local ctdb_options="--reclock=$var_dir/rec.lock --nlist $CTDB_NODES --nopublicipcheck --event-script-dir=$CTDB_DIR/tests/events.d --logfile=$var_dir/daemons.log -d 3 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent --dbdir-state=$var_dir/test.db/state"
553 if [ -z "$CTDB_TEST_REAL_CLUSTER" ]; then
554 ctdb_options="$ctdb_options --public-interface=lo"
557 if [ $pnn -eq $no_public_ips ] ; then
558 ctdb_options="$ctdb_options --public-addresses=/dev/null"
560 ctdb_options="$ctdb_options --public-addresses=$public_addresses"
563 # Need full path so we can use "pkill -f" to kill the daemons.
564 $VALGRIND $CTDB_DIR/bin/ctdbd --socket=$var_dir/sock.$pnn $ctdb_options "$@" ||return 1
569 # "$@" gets passed to ctdbd
571 echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
573 for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
574 daemons_start_1 $i "$@"
577 local var_dir=$(cd $CTDB_DIR/tests/var; echo $PWD)
579 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
580 ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
584 #######################################
586 _ctdb_hack_options ()
588 local ctdb_options="$*"
590 # We really just want to pass CTDB_OPTIONS but on RH
591 # /etc/sysconfig/ctdb can, and frequently does, set that variable.
592 # So instead, we hack badly. We'll add these as we use them.
593 # Note that these may still be overridden by the above file... but
594 # we tend to use the exotic options here... so that is unlikely.
596 case "$ctdb_options" in
597 *--start-as-stopped*)
598 export CTDB_START_AS_STOPPED="yes"
604 _ctdb_hack_options "$@"
606 if [ -e /etc/redhat-release ] ; then
609 /etc/init.d/ctdb restart
615 _ctdb_hack_options "$@"
617 /etc/init.d/ctdb start
622 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
627 # Common things to do after starting one or more nodes.
630 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
632 echo "Setting RerecoveryTimeout to 1"
633 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
635 # In recent versions of CTDB, forcing a recovery like this blocks
636 # until the recovery is complete. Hopefully this will help the
637 # cluster to stabilise before a subsequent test.
638 echo "Forcing a recovery..."
639 onnode -q 0 $CTDB recover
641 echo "Forcing a recovery..."
642 onnode -q 0 $CTDB recover
647 # This assumes that ctdbd is not running on the given node.
651 shift # "$@" is passed to ctdbd start.
653 echo -n "Starting CTDB on node ${pnn}..."
655 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
656 daemons_start_1 $pnn "$@"
658 onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
661 # If we're starting only 1 node then we're doing something weird.
662 ctdb_restart_when_done
667 # "$@" is passed to ctdbd start.
669 echo -n "Restarting CTDB"
670 if $ctdb_test_restart_scheduled ; then
671 echo -n " (scheduled)"
676 for i in $(seq 1 5) ; do
677 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
681 onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
683 echo "Restart failed. Trying again in a few seconds..."
688 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || {
689 echo "Cluster didn't become healthy. Restarting..."
693 local debug_out=$(onnode -p all ctdb status -Y 2>&1; onnode -p all ctdb scriptstatus 2>&1)
695 echo "Setting RerecoveryTimeout to 1"
696 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
698 # In recent versions of CTDB, forcing a recovery like this
699 # blocks until the recovery is complete. Hopefully this will
700 # help the cluster to stabilise before a subsequent test.
701 echo "Forcing a recovery..."
702 onnode -q 0 $CTDB recover
704 echo "Forcing a recovery..."
705 onnode -q 0 $CTDB recover
707 # Cluster is still healthy. Good, we're done!
708 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
709 echo "Cluster become UNHEALTHY again. Restarting..."
713 echo "Doing a sync..."
714 onnode -q 0 $CTDB sync
720 echo "Cluster UNHEALTHY... too many attempts..."
722 # Try to make the calling test fail
727 ctdb_restart_when_done ()
729 ctdb_test_restart_scheduled=true
732 #######################################
734 install_eventscript ()
736 local script_name="$1"
737 local script_contents="$2"
739 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
740 # The quoting here is *very* fragile. However, we do
741 # experience the joy of installing a short script using
742 # onnode, and without needing to know the IP addresses of the
744 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
746 f="${CTDB_DIR}/tests/events.d/${script_name}"
747 echo "$script_contents" >"$f"
752 uninstall_eventscript ()
754 local script_name="$1"
756 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
757 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
759 rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"
763 #######################################
765 # This section deals with the 99.ctdb_test eventscript.
767 # Metafunctions: Handle a ctdb-test file on a node.
769 ctdb_test_eventscript_file_create ()
774 try_command_on_node $pnn touch "/tmp/ctdb-test-${type}.${pnn}"
777 ctdb_test_eventscript_file_remove ()
782 try_command_on_node $pnn rm -f "/tmp/ctdb-test-${type}.${pnn}"
785 ctdb_test_eventscript_file_exists ()
790 try_command_on_node $pnn test -f "/tmp/ctdb-test-${type}.${pnn}" >/dev/null 2>&1
794 # Handle a flag file on a node that is removed by 99.ctdb_test on the
796 ctdb_test_eventscript_flag ()
802 ctdb_test_eventscript_file_${cmd} "$pnn" "flag-${event}"
806 # Handle a trigger that causes 99.ctdb_test to fail it's monitor
808 ctdb_test_eventscript_unhealthy_trigger ()
813 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-trigger"
816 # Handle the file that 99.ctdb_test created to show that it has marked
817 # a node unhealthy because it detected the above trigger.
818 ctdb_test_eventscript_unhealthy_detected ()
823 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-detected"
826 # Handle a trigger that causes 99.ctdb_test to timeout it's monitor
827 # event. This should cause the node to be banned.
828 ctdb_test_eventscript_timeout_trigger ()
834 ctdb_test_eventscript_file_${cmd} "$pnn" "${event}-timeout"
837 # Note that the eventscript can't use the above functions!
838 ctdb_test_eventscript_install ()
841 local script='#!/bin/sh
845 rm -vf "/tmp/ctdb-test-flag-${1}.${pnn}"
847 trigger="/tmp/ctdb-test-unhealthy-trigger.${pnn}"
848 detected="/tmp/ctdb-test-unhealthy-detected.${pnn}"
849 timeout_trigger="/tmp/ctdb-test-${1}-timeout.${pnn}"
852 if [ -e "$trigger" ] ; then
853 echo "${0}: Unhealthy because \"$trigger\" detected"
856 elif [ -e "$detected" -a ! -e "$trigger" ] ; then
857 echo "${0}: Healthy again, \"$trigger\" no longer detected"
863 if [ -e "$timeout_trigger" ] ; then
864 echo "${0}: Sleeping for a long time because \"$timeout_trigger\" detected"
874 install_eventscript "99.ctdb_test" "$script"
877 ctdb_test_eventscript_uninstall ()
879 uninstall_eventscript "99.ctdb_test"
882 # Note that this only works if you know all other monitor events will
883 # succeed. You also need to install the eventscript before using it.
884 wait_for_monitor_event ()
888 echo "Waiting for a monitor event on node ${pnn}..."
889 ctdb_test_eventscript_flag create $pnn "monitor"
891 wait_until 120 ! ctdb_test_eventscript_flag exists $pnn "monitor"
895 # Make sure that $CTDB is set.
898 local="${TEST_SUBDIR}/scripts/local.bash"
899 if [ -r "$local" ] ; then