1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
9 ######################################################################
15 teststarttime=$(date '+%s')
18 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
19 echo "Running test $name ($(date '+%T'))"
20 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
25 local name="$1" ; shift
26 local status="$1" ; shift
27 # "$@" is command-line
29 local interp="SKIPPED"
30 local statstr=" (reason $*)"
31 if [ -n "$status" ] ; then
32 if [ $status -eq 0 ] ; then
38 statstr=" (status $status)"
39 testfailures=$(($testfailures+1))
43 testduration=$(($(date +%s)-$teststarttime))
45 echo "=========================================================================="
46 echo "TEST ${interp}: ${name}${statstr} (duration: ${testduration}s)"
47 echo "=========================================================================="
53 exit $(($testfailures+0))
56 ctdb_check_time_logs ()
65 out=$(onnode all tail -n 20 /var/log/ctdb.test.time.log 2>&1)
67 if [ $? -eq 0 ] ; then
72 node="${line#>> NODE: }"
79 if [ -n "$ds_prev" ] && \
80 [ $(($ds_curr - $ds_prev)) -ge $threshold ] ; then
81 echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
90 echo Error getting time logs
93 echo "Check time sync (test client first):"
96 echo "Information from test client:"
99 echo "Information from cluster nodes:"
100 onnode all "top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
110 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
111 status=$(($testfailures+0))
113 # Avoid making a test fail from this point onwards. The test is
117 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
119 if [ -n "$CTDB_TEST_REAL_CLUSTER" -a $status -ne 0 ] ; then
123 eval "$ctdb_test_exit_hook" || true
124 unset ctdb_test_exit_hook
126 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
130 # This could be made unconditional but then we might get
131 # duplication from the recovery in restart_ctdb. We want to
132 # leave the recovery in restart_ctdb so that future tests that
133 # might do a manual restart mid-test will benefit.
134 echo "Forcing a recovery..."
135 onnode 0 $CTDB recover
141 ctdb_test_exit_hook_add ()
143 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
148 local name="$1" ; shift
150 [ -n "$1" ] || set -- "$name"
152 ctdb_test_begin "$name"
157 ctdb_test_end "$name" "$status" "$*"
170 -h, --help show this screen.
171 -v, --version show test case version.
172 --category show the test category (ACL, CTDB, Samba ...).
173 -d, --description show test case description.
174 --summary show short test case summary.
175 -x trace test using set -x
183 [ -n "$CTDB_DIR" ] || fail "Can not determine version."
185 (cd "$CTDB_DIR" && git describe)
188 ctdb_test_cmd_options()
190 [ -n "$1" ] || return 0
193 -h|--help) ctdb_test_usage 0 ;;
194 -v|--version) ctdb_test_version ;;
195 --category) echo "CTDB" ;;
196 -d|--description) test_info ;;
197 -x) set -x ; return 0 ;;
199 echo "Error: Unknown parameter = $1"
210 scriptname=$(basename "$0")
212 ctdb_test_restart_scheduled=false
214 ctdb_test_cmd_options $@
216 trap "ctdb_test_exit" 0
219 ctdb_test_check_real_cluster ()
221 [ -n "$CTDB_TEST_REAL_CLUSTER" ] && return 0
223 echo "ERROR: This test must be run on a real/virtual cluster, not local daemons."
227 ########################################
230 try_command_on_node ()
232 local nodespec="$1" ; shift
237 while [ "${nodespec#-}" != "$nodespec" ] ; do
238 if [ "$nodespec" = "-v" ] ; then
241 onnode_opts="$nodespec"
243 nodespec="$1" ; shift
248 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
250 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
256 echo "Output of \"$cmd\":"
261 sanity_check_output ()
264 local regexp="$2" # Should be anchored as necessary.
269 local num_lines=$(echo "$output" | wc -l)
270 echo "There are $num_lines lines of output"
271 if [ $num_lines -lt $min_lines ] ; then
272 echo "BAD: that's less than the required number (${min_lines})"
277 local unexpected # local doesn't pass through status of command on RHS.
278 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
280 # Note that this is reversed.
281 if [ $status -eq 0 ] ; then
282 echo "BAD: unexpected lines in output:"
283 echo "$unexpected" | cat -A
286 echo "Output lines look OK"
294 local ips="$1" # list of "ip node" lines
296 echo "Sanity checking IPs..."
300 while read x ipp ; do
301 [ "$ipp" = "-1" ] && break
302 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
309 echo "BAD: a node was -1 or IPs are only assigned to one node"
310 echo "Are you running an old version of CTDB?"
314 # This returns a list of "ip node" lines in $out
318 try_command_on_node $node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
321 select_test_node_and_ips ()
325 # When selecting test_node we just want a node that has public
326 # IPs. This will work and is economically semi-random. :-)
328 read x test_node <<<"$out"
332 while read ip pnn ; do
333 if [ "$pnn" = "$test_node" ] ; then
334 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
336 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
338 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
339 test_ip="${test_node_ips%% *}"
342 #######################################
344 # Wait until either timeout expires or command succeeds. The command
345 # will be tried once per second.
348 local timeout="$1" ; shift # "$@" is the command...
351 if [ "$1" = "!" ] ; then
356 echo -n "<${timeout}|"
358 while [ $t -gt 0 ] ; do
361 if { ! $negate && [ $rc -eq 0 ] ; } || \
362 { $negate && [ $rc -ne 0 ] ; } ; then
363 echo "|$(($timeout - $t))|"
380 for i in $(seq 1 $1) ; do
387 _cluster_is_healthy ()
389 local out x count line
391 out=$($CTDB -Y status 2>/dev/null) || return 1
397 # We need to see valid lines if we're going to be healthy.
398 [ "${line#:[0-9]}" != "$line" ] && count=$(($count + 1))
399 # A line indicating a node is unhealthy causes failure.
400 [ "${line##:*:*:*1:}" != "$line" ] && return 1
402 [ $count -gt 0 ] && return $?
403 } <<<"$out" # Yay bash!
406 cluster_is_healthy ()
408 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
409 echo "Cluster is HEALTHY"
412 echo "Cluster is UNHEALTHY"
413 if ! ${ctdb_test_restart_scheduled:-false} ; then
414 echo "DEBUG AT $(date '+%F %T'):"
416 for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
425 wait_until_healthy ()
427 local timeout="${1:-120}"
429 echo "Waiting for cluster to become healthy..."
431 wait_until 120 _cluster_is_healthy
434 # This function is becoming nicely overloaded. Soon it will collapse! :-)
442 (unhealthy) bits="?:?:?:1:*" ;;
443 (healthy) bits="?:?:?:0:*" ;;
444 (disconnected) bits="1:*" ;;
445 (connected) bits="0:*" ;;
446 (banned) bits="?:1:*" ;;
447 (unbanned) bits="?:0:*" ;;
448 (disabled) bits="?:?:1:*" ;;
449 (enabled) bits="?:?:0:*" ;;
450 (stopped) bits="?:?:?:?:1:*" ;;
451 (notstopped) bits="?:?:?:?:0:*" ;;
452 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
453 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
454 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
455 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
457 echo "node_has_status: unknown status \"$status\""
461 if [ -n "$bits" ] ; then
464 out=$($CTDB -Y status 2>&1) || return 1
469 # This needs to be done in 2 steps to avoid false matches.
470 local line_bits="${line#:${pnn}:*:}"
471 [ "$line_bits" = "$line" ] && continue
472 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
475 } <<<"$out" # Yay bash!
476 elif [ -n "$fpat" ] ; then
477 $CTDB statistics -n "$pnn" | egrep -q "$fpat"
478 elif [ -n "$mpat" ] ; then
479 $CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
481 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
486 wait_until_node_has_status ()
490 local timeout="${3:-30}"
491 local proxy_pnn="${4:-any}"
493 echo "Waiting until node $pnn has status \"$status\"..."
495 if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
496 for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
506 # Useful for superficially testing IP failover.
507 # IPs must be on nodes matching nodeglob.
508 ips_are_on_nodeglob ()
510 local nodeglob="$1" ; shift
517 while read ip pnn ; do
518 for check in $ips ; do
519 if [ "$check" = "$ip" ] ; then
524 ips="${ips/${ip}}" # Remove from list
527 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
529 ips="${ips// }" # Remove any spaces.
533 wait_until_ips_are_on_nodeglob ()
535 echo "Waiting for IPs to fail over..."
537 wait_until 60 ips_are_on_nodeglob "$@"
548 while read ip pnn ; do
549 if [ "$node" = "$pnn" ] ; then
552 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
557 wait_until_node_has_some_ips ()
559 echo "Waiting for node to have some IPs..."
561 wait_until 60 node_has_some_ips "$@"
567 local dst_socket="$2"
571 local pat="^${proto}[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[^[:space:]]+[[:space:]]+${dst_socket//./\\.}[[:space:]]+ESTABLISHED[[:space:]]+${pid}/${prog}[[:space:]]*\$"
572 out=$(netstat -tanp |
579 wait_until_get_src_socket ()
582 local dst_socket="$2"
586 echo "Waiting for ${prog} to establish connection to ${dst_socket}..."
588 wait_until 5 get_src_socket "$@"
591 #######################################
593 # filename will be in $tcpdump_filename, pid in $tcpdump_pid
596 tcpdump_filter="$1" # global
598 echo "Running tcpdump..."
599 tcpdump_filename=$(mktemp)
600 ctdb_test_exit_hook_add "rm -f $tcpdump_filename"
602 # The only way of being sure that tcpdump is listening is to send
603 # some packets that it will see. So we use dummy pings - the -U
604 # option to tcpdump ensures that packets are flushed to the file
605 # as they are captured.
606 local dummy_addr="127.3.2.1"
607 local dummy="icmp and dst host ${dummy_addr} and icmp[icmptype] == icmp-echo"
608 tcpdump -n -p -s 0 -e -U -w $tcpdump_filename -i any "($tcpdump_filter) or ($dummy)" &
609 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
611 echo "Waiting for tcpdump output file to be ready..."
612 ping -q "$dummy_addr" >/dev/null 2>&1 &
613 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
615 tcpdump_listen_for_dummy ()
617 tcpdump -n -r $tcpdump_filename -c 1 "$dummy" >/dev/null 2>&1
620 wait_until 10 tcpdump_listen_for_dummy
623 # By default, wait for 1 matching packet.
626 local count="${1:-1}"
627 local filter="${2:-${tcpdump_filter}}"
631 local found=$(tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null | wc -l)
632 [ $found -ge $count ]
635 echo "Waiting for tcpdump to capture some packets..."
636 if ! wait_until 30 tcpdump_check ; then
637 echo "DEBUG AT $(date '+%F %T'):"
639 for i in "onnode -q 0 $CTDB status" "netstat -tanp" "tcpdump -n -e -r $tcpdump_filename" ; do
649 local filter="${1:-${tcpdump_filter}}"
651 tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null
654 tcptickle_sniff_start ()
659 local in="src host ${dst%:*} and tcp src port ${dst##*:} and dst host ${src%:*} and tcp dst port ${src##*:}"
660 local out="src host ${src%:*} and tcp src port ${src##*:} and dst host ${dst%:*} and tcp dst port ${dst##*:}"
661 local tickle_ack="${in} and (tcp[tcpflags] & tcp-ack != 0) and (tcp[14] == 4) and (tcp[15] == 210)" # win == 1234
662 local ack_ack="${out} and (tcp[tcpflags] & tcp-ack != 0)"
663 tcptickle_reset="${in} and tcp[tcpflags] & tcp-rst != 0"
664 local filter="(${tickle_ack}) or (${ack_ack}) or (${tcptickle_reset})"
666 tcpdump_start "$filter"
669 tcptickle_sniff_wait_show ()
671 tcpdump_wait 1 "$tcptickle_reset"
673 echo "GOOD: here are some TCP tickle packets:"
677 gratarp_sniff_start ()
679 tcpdump_start "arp host ${test_ip}"
682 gratarp_sniff_wait_show ()
686 echo "GOOD: this should be the some gratuitous ARPs:"
691 #######################################
695 echo "Attempting to politely shutdown daemons..."
696 onnode 1 $CTDB shutdown -n all || true
698 echo "Sleeping for a while..."
701 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
702 echo "Killing remaining daemons..."
703 pkill -f $CTDB_DIR/bin/ctdbd
705 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
706 echo "Once more with feeling.."
707 pkill -9 $CTDB_DIR/bin/ctdbd
711 local var_dir=$CTDB_DIR/tests/var
712 rm -rf $var_dir/test.db
717 local num_nodes="${CTDB_TEST_NUM_DAEMONS:-2}" # default is 2 nodes
719 local var_dir=$CTDB_DIR/tests/var
721 mkdir -p $var_dir/test.db/persistent
723 local public_addresses=$var_dir/public_addresses.txt
724 local no_public_addresses=$var_dir/no_public_addresses.txt
725 rm -f $CTDB_NODES $public_addresses $no_public_addresses
727 # If there are (strictly) greater than 2 nodes then we'll randomly
728 # choose a node to have no public addresses.
729 local no_public_ips=-1
730 [ $num_nodes -gt 2 ] && no_public_ips=$(($RANDOM % $num_nodes))
731 echo "$no_public_ips" >$no_public_addresses
734 for i in $(seq 1 $num_nodes) ; do
735 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
737 ip addr add ::$i/128 dev lo
739 echo 127.0.0.$i >> $CTDB_NODES
740 # 2 public addresses on most nodes, just to make things interesting.
741 if [ $(($i - 1)) -ne $no_public_ips ] ; then
742 echo "192.0.2.$i/24 lo" >> $public_addresses
743 echo "192.0.2.$(($i + $num_nodes))/24 lo" >> $public_addresses
752 shift # "$@" gets passed to ctdbd
754 local var_dir=$CTDB_DIR/tests/var
756 local public_addresses=$var_dir/public_addresses.txt
757 local no_public_addresses=$var_dir/no_public_addresses.txt
759 local no_public_ips=-1
760 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
762 if [ "$no_public_ips" = $pnn ] ; then
763 echo "Node $no_public_ips will have no public IPs."
766 local ctdb_options="--reclock=$var_dir/rec.lock --nlist $CTDB_NODES --nopublicipcheck --event-script-dir=$CTDB_DIR/tests/events.d --logfile=$var_dir/daemons.log -d 3 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent --dbdir-state=$var_dir/test.db/state"
768 if [ -z "$CTDB_TEST_REAL_CLUSTER" ]; then
769 ctdb_options="$ctdb_options --public-interface=lo"
772 if [ $pnn -eq $no_public_ips ] ; then
773 ctdb_options="$ctdb_options --public-addresses=/dev/null"
775 ctdb_options="$ctdb_options --public-addresses=$public_addresses"
778 # Need full path so we can use "pkill -f" to kill the daemons.
779 $VALGRIND $CTDB_DIR/bin/ctdbd --socket=$var_dir/sock.$pnn $ctdb_options "$@" ||return 1
784 # "$@" gets passed to ctdbd
786 local num_nodes="${CTDB_TEST_NUM_DAEMONS:-2}" # default is 2 nodes
788 echo "Starting $num_nodes ctdb daemons..."
790 for i in $(seq 0 $(($num_nodes - 1))) ; do
791 daemons_start_1 $i "$@"
794 local var_dir=$CTDB_DIR/tests/var
796 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
797 ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
801 #######################################
803 _ctdb_hack_options ()
805 local ctdb_options="$*"
807 # We really just want to pass CTDB_OPTIONS but on RH
808 # /etc/sysconfig/ctdb can, and frequently does, set that variable.
809 # So instead, we hack badly. We'll add these as we use them.
810 # Note that these may still be overridden by the above file... but
811 # we tend to use the exotic options here... so that is unlikely.
813 case "$ctdb_options" in
814 *--start-as-stopped*)
815 export CTDB_START_AS_STOPPED="yes"
821 _ctdb_hack_options "$@"
823 if [ -e /etc/redhat-release ] ; then
826 /etc/init.d/ctdb restart
832 _ctdb_hack_options "$@"
834 /etc/init.d/ctdb start
839 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
844 # Common things to do after starting one or more nodes.
847 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
849 echo "Setting RerecoveryTimeout to 1"
850 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
852 # In recent versions of CTDB, forcing a recovery like this blocks
853 # until the recovery is complete. Hopefully this will help the
854 # cluster to stabilise before a subsequent test.
855 echo "Forcing a recovery..."
856 onnode -q 0 $CTDB recover
858 echo "Forcing a recovery..."
859 onnode -q 0 $CTDB recover
864 # This assumes that ctdbd is not running on the given node.
868 shift # "$@" is passed to ctdbd start.
870 echo -n "Starting CTDB on node ${pnn}..."
872 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
873 daemons_start_1 $pnn "$@"
875 onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
878 # If we're starting only 1 node then we're doing something weird.
879 ctdb_restart_when_done
884 # "$@" is passed to ctdbd start.
886 echo -n "Restarting CTDB"
887 if $ctdb_test_restart_scheduled ; then
888 echo -n " (scheduled)"
893 for i in $(seq 1 5) ; do
894 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
898 onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
900 echo "Restart failed. Trying again in a few seconds..."
905 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || {
906 echo "Cluster didn't become healthy. Restarting..."
910 local debug_out=$(onnode -p all ctdb status -Y 2>&1; onnode -p all ctdb scriptstatus 2>&1)
912 echo "Setting RerecoveryTimeout to 1"
913 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
915 # In recent versions of CTDB, forcing a recovery like this
916 # blocks until the recovery is complete. Hopefully this will
917 # help the cluster to stabilise before a subsequent test.
918 echo "Forcing a recovery..."
919 onnode -q 0 $CTDB recover
921 echo "Forcing a recovery..."
922 onnode -q 0 $CTDB recover
924 # Cluster is still healthy. Good, we're done!
925 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
926 echo "Cluster become UNHEALTHY again. Restarting..."
930 echo "Doing a sync..."
931 onnode -q 0 $CTDB sync
937 echo "Cluster UNHEALTHY... too many attempts..."
939 # Try to make the calling test fail
944 ctdb_restart_when_done ()
946 ctdb_test_restart_scheduled=true
949 #######################################
951 install_eventscript ()
953 local script_name="$1"
954 local script_contents="$2"
956 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
957 # The quoting here is *very* fragile. However, we do
958 # experience the joy of installing a short script using
959 # onnode, and without needing to know the IP addresses of the
961 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
963 f="${CTDB_DIR}/tests/events.d/${script_name}"
964 echo "$script_contents" >"$f"
969 uninstall_eventscript ()
971 local script_name="$1"
973 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
974 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
976 rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"
980 #######################################
982 # This section deals with the 99.ctdb_test eventscript.
984 # Metafunctions: Handle a ctdb-test file on a node.
986 ctdb_test_eventscript_file_create ()
991 try_command_on_node $pnn touch "/tmp/ctdb-test-${type}.${pnn}"
994 ctdb_test_eventscript_file_remove ()
999 try_command_on_node $pnn rm -f "/tmp/ctdb-test-${type}.${pnn}"
1002 ctdb_test_eventscript_file_exists ()
1007 try_command_on_node $pnn test -f "/tmp/ctdb-test-${type}.${pnn}" >/dev/null 2>&1
1011 # Handle a flag file on a node that is removed by 99.ctdb_test on the
1013 ctdb_test_eventscript_flag ()
1019 ctdb_test_eventscript_file_${cmd} "$pnn" "flag-${event}"
1023 # Handle a trigger that causes 99.ctdb_test to fail it's monitor
1025 ctdb_test_eventscript_unhealthy_trigger ()
1030 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-trigger"
1033 # Handle the file that 99.ctdb_test created to show that it has marked
1034 # a node unhealthy because it detected the above trigger.
1035 ctdb_test_eventscript_unhealthy_detected ()
1040 ctdb_test_eventscript_file_${cmd} "$pnn" "unhealthy-detected"
1043 # Handle a trigger that causes 99.ctdb_test to timeout it's monitor
1044 # event. This should cause the node to be banned.
1045 ctdb_test_eventscript_timeout_trigger ()
1051 ctdb_test_eventscript_file_${cmd} "$pnn" "${event}-timeout"
1054 # Note that the eventscript can't use the above functions!
1055 ctdb_test_eventscript_install ()
1058 local script='#!/bin/sh
1062 rm -vf "/tmp/ctdb-test-flag-${1}.${pnn}"
1064 trigger="/tmp/ctdb-test-unhealthy-trigger.${pnn}"
1065 detected="/tmp/ctdb-test-unhealthy-detected.${pnn}"
1066 timeout_trigger="/tmp/ctdb-test-${1}-timeout.${pnn}"
1069 if [ -e "$trigger" ] ; then
1070 echo "${0}: Unhealthy because \"$trigger\" detected"
1073 elif [ -e "$detected" -a ! -e "$trigger" ] ; then
1074 echo "${0}: Healthy again, \"$trigger\" no longer detected"
1080 if [ -e "$timeout_trigger" ] ; then
1081 echo "${0}: Sleeping for a long time because \"$timeout_trigger\" detected"
1091 install_eventscript "99.ctdb_test" "$script"
1094 ctdb_test_eventscript_uninstall ()
1096 uninstall_eventscript "99.ctdb_test"
1099 # Note that this only works if you know all other monitor events will
1100 # succeed. You also need to install the eventscript before using it.
1101 wait_for_monitor_event ()
1105 echo "Waiting for a monitor event on node ${pnn}..."
1106 ctdb_test_eventscript_flag create $pnn "monitor"
1108 wait_until 120 ! ctdb_test_eventscript_flag exists $pnn "monitor"
1112 # Make sure that $CTDB is set.