1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 . "${TEST_SCRIPTS_DIR}/common.sh"
5 # If we're not running on a real cluster then we need a local copy of
6 # ctdb (and other stuff) in $PATH and we will use local daemons.
7 if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
8 export CTDB_NODES_SOCKETS=""
9 for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
10 CTDB_NODES_SOCKETS="${CTDB_NODES_SOCKETS}${CTDB_NODES_SOCKETS:+ }${TEST_VAR_DIR}/sock.${i}"
13 # Use in-tree binaries if running against local daemons.
14 # Otherwise CTDB need to be installed on all nodes.
15 if [ -n "$ctdb_dir" -a -d "${ctdb_dir}/bin" ] ; then
16 PATH="${ctdb_dir}/bin:${PATH}"
19 export CTDB_NODES="${TEST_VAR_DIR}/nodes.txt"
22 ######################################################################
24 export CTDB_TIMEOUT=60
26 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
27 CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
29 _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
30 CTDB_TEST_WRAPPER="$_d/test_wrap"
32 export CTDB_TEST_WRAPPER
34 # If $VALGRIND is set then use it whenever ctdb is called, but only if
35 # $CTDB is not already set.
36 [ -n "$CTDB" ] || export CTDB="${VALGRIND}${VALGRIND:+ }ctdb"
39 PATH="${TEST_SCRIPTS_DIR}:${PATH}"
41 ######################################################################
43 ctdb_check_time_logs ()
52 out=$(onnode all tail -n 20 "${TEST_VAR_DIR}/ctdb.test.time.log" 2>&1)
54 if [ $? -eq 0 ] ; then
59 node="${line#>> NODE: }"
66 if [ -n "$ds_prev" ] && \
67 [ $(($ds_curr - $ds_prev)) -ge $threshold ] ; then
68 echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
77 echo Error getting time logs
80 echo "Check time sync (test client first):"
83 echo "Information from test client:"
86 echo "Information from cluster nodes:"
87 onnode all "top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
97 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
98 status=$(($testfailures+0))
100 # Avoid making a test fail from this point onwards. The test is
104 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
106 if [ -z "$TEST_LOCAL_DAEMONS" -a -n "$CTDB_TEST_TIME_LOGGING" -a \
107 $status -ne 0 ] ; then
111 eval "$ctdb_test_exit_hook" || true
112 unset ctdb_test_exit_hook
114 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
118 # This could be made unconditional but then we might get
119 # duplication from the recovery in restart_ctdb. We want to
120 # leave the recovery in restart_ctdb so that future tests that
121 # might do a manual restart mid-test will benefit.
122 echo "Forcing a recovery..."
123 onnode 0 $CTDB recover
129 ctdb_test_exit_hook_add ()
131 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
136 scriptname=$(basename "$0")
138 ctdb_test_restart_scheduled=false
140 trap "ctdb_test_exit" 0
143 ########################################
146 try_command_on_node ()
148 local nodespec="$1" ; shift
153 while [ "${nodespec#-}" != "$nodespec" ] ; do
154 if [ "$nodespec" = "-v" ] ; then
157 onnode_opts="$nodespec"
159 nodespec="$1" ; shift
164 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
166 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
172 echo "Output of \"$cmd\":"
177 sanity_check_output ()
180 local regexp="$2" # Should be anchored as necessary.
185 local num_lines=$(echo "$output" | wc -l)
186 echo "There are $num_lines lines of output"
187 if [ $num_lines -lt $min_lines ] ; then
188 echo "BAD: that's less than the required number (${min_lines})"
193 local unexpected # local doesn't pass through status of command on RHS.
194 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
196 # Note that this is reversed.
197 if [ $status -eq 0 ] ; then
198 echo "BAD: unexpected lines in output:"
199 echo "$unexpected" | cat -A
202 echo "Output lines look OK"
210 local ips="$1" # list of "ip node" lines
212 echo "Sanity checking IPs..."
216 while read x ipp ; do
217 [ "$ipp" = "-1" ] && break
218 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
225 echo "BAD: a node was -1 or IPs are only assigned to one node"
226 echo "Are you running an old version of CTDB?"
230 # This returns a list of "ip node" lines in $out
234 try_command_on_node $node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
237 _select_test_node_and_ips ()
241 test_node="" # this matches no PNN
244 while read ip pnn ; do
245 if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
248 if [ "$pnn" = "$test_node" ] ; then
249 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
251 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
253 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
254 test_ip="${test_node_ips%% *}"
256 [ -n "$test_node" ] || return 1
259 select_test_node_and_ips ()
262 while ! _select_test_node_and_ips ; do
263 echo "Unable to find a test node with IPs assigned"
264 if [ $timeout -le 0 ] ; then
265 echo "BAD: Too many attempts"
269 timeout=$(($timeout - 1))
275 #######################################
277 # Wait until either timeout expires or command succeeds. The command
278 # will be tried once per second.
281 local timeout="$1" ; shift # "$@" is the command...
284 if [ "$1" = "!" ] ; then
289 echo -n "<${timeout}|"
291 while [ $t -gt 0 ] ; do
294 if { ! $negate && [ $rc -eq 0 ] ; } || \
295 { $negate && [ $rc -ne 0 ] ; } ; then
296 echo "|$(($timeout - $t))|"
313 for i in $(seq 1 $1) ; do
320 _cluster_is_healthy ()
322 local out x count line
324 out=$($CTDB -Y status 2>/dev/null) || return 1
330 # We need to see valid lines if we're going to be healthy.
331 [ "${line#:[0-9]}" != "$line" ] && count=$(($count + 1))
332 # A line indicating a node is unhealthy causes failure.
333 [ "${line##:*:*:*1:}" != "$line" ] && return 1
335 [ $count -gt 0 ] && return $?
336 } <<<"$out" # Yay bash!
339 cluster_is_healthy ()
341 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
342 echo "Cluster is HEALTHY"
345 echo "Cluster is UNHEALTHY"
346 if ! ${ctdb_test_restart_scheduled:-false} ; then
347 echo "DEBUG AT $(date '+%F %T'):"
349 for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
358 wait_until_healthy ()
360 local timeout="${1:-120}"
362 echo "Waiting for cluster to become healthy..."
364 wait_until 120 _cluster_is_healthy
367 # This function is becoming nicely overloaded. Soon it will collapse! :-)
375 (unhealthy) bits="?:?:?:1:*" ;;
376 (healthy) bits="?:?:?:0:*" ;;
377 (disconnected) bits="1:*" ;;
378 (connected) bits="0:*" ;;
379 (banned) bits="?:1:*" ;;
380 (unbanned) bits="?:0:*" ;;
381 (disabled) bits="?:?:1:*" ;;
382 (enabled) bits="?:?:0:*" ;;
383 (stopped) bits="?:?:?:?:1:*" ;;
384 (notstopped) bits="?:?:?:?:0:*" ;;
385 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
386 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
387 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
388 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
390 echo "node_has_status: unknown status \"$status\""
394 if [ -n "$bits" ] ; then
397 out=$($CTDB -Y status 2>&1) || return 1
402 # This needs to be done in 2 steps to avoid false matches.
403 local line_bits="${line#:${pnn}:*:}"
404 [ "$line_bits" = "$line" ] && continue
405 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
408 } <<<"$out" # Yay bash!
409 elif [ -n "$fpat" ] ; then
410 $CTDB statistics -n "$pnn" | egrep -q "$fpat"
411 elif [ -n "$mpat" ] ; then
412 $CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
414 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
419 wait_until_node_has_status ()
423 local timeout="${3:-30}"
424 local proxy_pnn="${4:-any}"
426 echo "Waiting until node $pnn has status \"$status\"..."
428 if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
429 for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
439 # Useful for superficially testing IP failover.
440 # IPs must be on nodes matching nodeglob.
441 ips_are_on_nodeglob ()
443 local nodeglob="$1" ; shift
450 while read ip pnn ; do
451 for check in $ips ; do
452 if [ "$check" = "$ip" ] ; then
457 ips="${ips/${ip}}" # Remove from list
460 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
462 ips="${ips// }" # Remove any spaces.
466 wait_until_ips_are_on_nodeglob ()
468 echo "Waiting for IPs to fail over..."
470 wait_until 60 ips_are_on_nodeglob "$@"
481 while read ip pnn ; do
482 if [ "$node" = "$pnn" ] ; then
485 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
490 wait_until_node_has_some_ips ()
492 echo "Waiting for node to have some IPs..."
494 wait_until 60 node_has_some_ips "$@"
497 #######################################
501 echo "Attempting to politely shutdown daemons..."
502 onnode 1 $CTDB shutdown -n all || true
504 echo "Sleeping for a while..."
507 local pat="ctdbd --socket=.* --nlist .* --nopublicipcheck"
508 if pgrep -f "$pat" >/dev/null ; then
509 echo "Killing remaining daemons..."
512 if pgrep -f "$pat" >/dev/null ; then
513 echo "Once more with feeling.."
518 rm -rf "${TEST_VAR_DIR}/test.db"
523 mkdir -p "${TEST_VAR_DIR}/test.db/persistent"
525 local public_addresses_all="${TEST_VAR_DIR}/public_addresses_all"
526 local no_public_addresses="${TEST_VAR_DIR}/no_public_addresses.txt"
527 rm -f $CTDB_NODES $public_addresses_all $no_public_addresses
529 # If there are (strictly) greater than 2 nodes then we'll randomly
530 # choose a node to have no public addresses.
531 local no_public_ips=-1
532 [ $TEST_LOCAL_DAEMONS -gt 2 ] && no_public_ips=$(($RANDOM % $TEST_LOCAL_DAEMONS))
533 echo "$no_public_ips" >$no_public_addresses
535 # When running certain tests we add and remove eventscripts, so we
536 # need to be able to modify the events.d/ directory. Therefore,
537 # we use a temporary events.d/ directory under $TEST_VAR_DIR. We
538 # copy the actual test eventscript(s) in there from the original
539 # events.d/ directory that sits alongside $TEST_SCRIPT_DIR.
540 local top=$(dirname "$TEST_SCRIPTS_DIR")
541 local events_d="${top}/events.d"
542 mkdir -p "${TEST_VAR_DIR}/events.d"
543 cp -p "${events_d}/"* "${TEST_VAR_DIR}/events.d/"
546 for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
547 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
548 echo ::$i >>"$CTDB_NODES"
549 ip addr add ::$i/128 dev lo
551 echo 127.0.0.$i >>"$CTDB_NODES"
552 # 2 public addresses on most nodes, just to make things interesting.
553 if [ $(($i - 1)) -ne $no_public_ips ] ; then
554 echo "192.0.2.$i/24 lo" >>"$public_addresses_all"
555 echo "192.0.2.$(($i + $TEST_LOCAL_DAEMONS))/24 lo" >>"$public_addresses_all"
564 shift # "$@" gets passed to ctdbd
566 local public_addresses_all="${TEST_VAR_DIR}/public_addresses_all"
567 local public_addresses_mine="${TEST_VAR_DIR}/public_addresses.${pnn}"
568 local no_public_addresses="${TEST_VAR_DIR}/no_public_addresses.txt"
570 local no_public_ips=-1
571 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
573 if [ "$no_public_ips" = $pnn ] ; then
574 echo "Node $no_public_ips will have no public IPs."
577 local node_ip=$(sed -n -e "$(($pnn + 1))p" "$CTDB_NODES")
578 local ctdb_options="--reclock=${TEST_VAR_DIR}/rec.lock --nlist $CTDB_NODES --nopublicipcheck --node-ip=${node_ip} --event-script-dir=${TEST_VAR_DIR}/events.d --logfile=${TEST_VAR_DIR}/daemon.${pnn}.log -d 3 --log-ringbuf-size=10000 --dbdir=${TEST_VAR_DIR}/test.db --dbdir-persistent=${TEST_VAR_DIR}/test.db/persistent --dbdir-state=${TEST_VAR_DIR}/test.db/state"
580 if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
581 ctdb_options="$ctdb_options --public-interface=lo"
584 if [ $pnn -eq $no_public_ips ] ; then
585 ctdb_options="$ctdb_options --public-addresses=/dev/null"
587 cp "$public_addresses_all" "$public_addresses_mine"
588 ctdb_options="$ctdb_options --public-addresses=$public_addresses_mine"
591 # We'll use "pkill -f" to kill the daemons with
592 # "--socket=.* --nlist .* --nopublicipcheck" as context.
593 $VALGRIND ctdbd --socket="${TEST_VAR_DIR}/sock.$pnn" $ctdb_options "$@" ||return 1
598 # "$@" gets passed to ctdbd
600 echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
602 for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
603 daemons_start_1 $i "$@"
606 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
607 ln -sf "${TEST_VAR_DIR}/sock.0" /tmp/ctdb.socket || return 1
611 #######################################
613 _ctdb_hack_options ()
615 local ctdb_options="$*"
617 # We really just want to pass CTDB_OPTIONS but on RH
618 # /etc/sysconfig/ctdb can, and frequently does, set that variable.
619 # So instead, we hack badly. We'll add these as we use them.
620 # Note that these may still be overridden by the above file... but
621 # we tend to use the exotic options here... so that is unlikely.
623 case "$ctdb_options" in
624 *--start-as-stopped*)
625 export CTDB_START_AS_STOPPED="yes"
631 _ctdb_hack_options "$@"
633 if [ -e /etc/redhat-release ] ; then
636 /etc/init.d/ctdb restart
642 _ctdb_hack_options "$@"
644 /etc/init.d/ctdb start
649 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
654 # Common things to do after starting one or more nodes.
657 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
659 echo "Setting RerecoveryTimeout to 1"
660 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
662 # In recent versions of CTDB, forcing a recovery like this blocks
663 # until the recovery is complete. Hopefully this will help the
664 # cluster to stabilise before a subsequent test.
665 echo "Forcing a recovery..."
666 onnode -q 0 $CTDB recover
668 echo "Forcing a recovery..."
669 onnode -q 0 $CTDB recover
674 # This assumes that ctdbd is not running on the given node.
678 shift # "$@" is passed to ctdbd start.
680 echo -n "Starting CTDB on node ${pnn}..."
682 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
683 daemons_start_1 $pnn "$@"
685 onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
688 # If we're starting only 1 node then we're doing something weird.
689 ctdb_restart_when_done
694 # "$@" is passed to ctdbd start.
696 echo -n "Restarting CTDB"
697 if $ctdb_test_restart_scheduled ; then
698 echo -n " (scheduled)"
703 for i in $(seq 1 5) ; do
704 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
708 onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
710 echo "Restart failed. Trying again in a few seconds..."
715 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || {
716 echo "Cluster didn't become healthy. Restarting..."
720 local debug_out=$(onnode -p all ctdb status -Y 2>&1; onnode -p all ctdb scriptstatus 2>&1)
722 echo "Setting RerecoveryTimeout to 1"
723 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
725 # In recent versions of CTDB, forcing a recovery like this
726 # blocks until the recovery is complete. Hopefully this will
727 # help the cluster to stabilise before a subsequent test.
728 echo "Forcing a recovery..."
729 onnode -q 0 $CTDB recover
731 echo "Forcing a recovery..."
732 onnode -q 0 $CTDB recover
734 # Cluster is still healthy. Good, we're done!
735 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
736 echo "Cluster become UNHEALTHY again. Restarting..."
740 echo "Doing a sync..."
741 onnode -q 0 $CTDB sync
747 echo "Cluster UNHEALTHY... too many attempts..."
749 # Try to make the calling test fail
754 ctdb_restart_when_done ()
756 ctdb_test_restart_scheduled=true
759 get_ctdbd_command_line_option ()
764 try_command_on_node "$pnn" "$CTDB getpid" || \
765 die "Unable to get PID of ctdbd on node $pnn"
767 local pid="${out#*:}"
768 try_command_on_node "$pnn" "ps -p $pid -o args hww" || \
769 die "Unable to get command-line of PID $pid"
771 # Strip everything up to and including --option
772 local t="${out#*--${option}}"
773 # Strip leading '=' or space if present
776 # Strip any following options and print
780 #######################################
782 install_eventscript ()
784 local script_name="$1"
785 local script_contents="$2"
787 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
788 # The quoting here is *very* fragile. However, we do
789 # experience the joy of installing a short script using
790 # onnode, and without needing to know the IP addresses of the
792 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
794 f="${TEST_VAR_DIR}/events.d/${script_name}"
795 echo "$script_contents" >"$f"
800 uninstall_eventscript ()
802 local script_name="$1"
804 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
805 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
807 rm -vf "${TEST_VAR_DIR}/events.d/${script_name}"
811 #######################################
813 # This section deals with the 99.ctdb_test eventscript.
815 # Metafunctions: Handle a ctdb-test file on a node.
817 ctdb_test_eventscript_file_create ()
822 try_command_on_node $pnn touch "/tmp/ctdb-test-${type}.${pnn}"
825 ctdb_test_eventscript_file_remove ()
830 try_command_on_node $pnn rm -f "/tmp/ctdb-test-${type}.${pnn}"
833 ctdb_test_eventscript_file_exists ()
838 try_command_on_node $pnn test -f "/tmp/ctdb-test-${type}.${pnn}" >/dev/null 2>&1
842 # Handle a flag file on a node that is removed by 99.ctdb_test on the
844 ctdb_test_eventscript_flag ()
850 ctdb_test_eventscript_file_${cmd} "$pnn" "flag-${event}"
854 # Handle a trigger that causes 99.ctdb_test to fail it's monitor
856 ctdb_test_eventscript_unhealthy_trigger ()
861 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-trigger"
864 # Handle the file that 99.ctdb_test created to show that it has marked
865 # a node unhealthy because it detected the above trigger.
866 ctdb_test_eventscript_unhealthy_detected ()
871 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-detected"
874 # Handle a trigger that causes 99.ctdb_test to timeout it's monitor
875 # event. This should cause the node to be banned.
876 ctdb_test_eventscript_timeout_trigger ()
882 ctdb_test_eventscript_file_${cmd} "$pnn" "${event}-timeout"
885 # Note that the eventscript can't use the above functions!
886 ctdb_test_eventscript_install ()
889 local script='#!/bin/sh
893 rm -vf "/tmp/ctdb-test-flag-${1}.${pnn}"
895 trigger="/tmp/ctdb-test-unhealthy-trigger.${pnn}"
896 detected="/tmp/ctdb-test-unhealthy-detected.${pnn}"
897 timeout_trigger="/tmp/ctdb-test-${1}-timeout.${pnn}"
900 if [ -e "$trigger" ] ; then
901 echo "${0}: Unhealthy because \"$trigger\" detected"
904 elif [ -e "$detected" -a ! -e "$trigger" ] ; then
905 echo "${0}: Healthy again, \"$trigger\" no longer detected"
911 if [ -e "$timeout_trigger" ] ; then
912 echo "${0}: Sleeping for a long time because \"$timeout_trigger\" detected"
922 install_eventscript "99.ctdb_test" "$script"
925 ctdb_test_eventscript_uninstall ()
927 uninstall_eventscript "99.ctdb_test"
930 # Note that this only works if you know all other monitor events will
931 # succeed. You also need to install the eventscript before using it.
932 wait_for_monitor_event ()
936 echo "Waiting for a monitor event on node ${pnn}..."
937 ctdb_test_eventscript_flag create $pnn "monitor"
939 wait_until 120 ! ctdb_test_eventscript_flag exists $pnn "monitor"
943 # Make sure that $CTDB is set.
946 local="${TEST_SUBDIR}/scripts/local.bash"
947 if [ -r "$local" ] ; then