1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
9 ######################################################################
15 teststarttime=$(date '+%s')
18 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
19 echo "Running test $name ($(date '+%T'))"
20 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
25 local name="$1" ; shift
26 local status="$1" ; shift
27 # "$@" is command-line
29 local interp="SKIPPED"
30 local statstr=" (reason $*)"
31 if [ -n "$status" ] ; then
32 if [ $status -eq 0 ] ; then
38 statstr=" (status $status)"
39 testfailures=$(($testfailures+1))
43 testduration=$(($(date +%s)-$teststarttime))
45 echo "=========================================================================="
46 echo "TEST ${interp}: ${name}${statstr} (duration: ${testduration}s)"
47 echo "=========================================================================="
53 exit $(($testfailures+0))
62 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
63 status=$(($testfailures+0))
65 # Avoid making a test fail from this point onwards. The test is
69 echo "*** TEST COMPLETE (RC=$status), CLEANING UP..."
71 eval "$ctdb_test_exit_hook" || true
72 unset ctdb_test_exit_hook
74 if $ctdb_test_restart_scheduled || \
75 ! onnode 0 CTDB_TEST_CLEANING_UP=1 $CTDB_TEST_WRAPPER cluster_is_healthy ; then
79 # This could be made unconditional but then we might get
80 # duplication from the recovery in restart_ctdb. We want to
81 # leave the recovery in restart_ctdb so that future tests that
82 # might do a manual restart mid-test will benefit.
83 echo "Forcing a recovery..."
90 ctdb_test_exit_hook_add ()
92 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
97 local name="$1" ; shift
99 [ -n "$1" ] || set -- "$name"
101 ctdb_test_begin "$name"
106 ctdb_test_end "$name" "$status" "$*"
119 -h, --help show this screen.
120 -v, --version show test case version.
121 --category show the test category (ACL, CTDB, Samba ...).
122 -d, --description show test case description.
123 --summary show short test case summary.
131 [ -n "$CTDB_DIR" ] || fail "Can not determine version."
133 (cd "$CTDB_DIR" && git describe)
136 ctdb_test_cmd_options()
138 [ -n "$1" ] || return 0
141 -h|--help) ctdb_test_usage 0 ;;
142 -v|--version) ctdb_test_version ;;
143 --category) echo "CTDB" ;;
144 -d|--description) test_info ;;
145 --summary) test_info | head -1 ;;
147 echo "Error: Unknown parameter = $1"
158 scriptname=$(basename "$0")
160 ctdb_test_restart_scheduled=false
162 ctdb_test_cmd_options $@
164 trap "ctdb_test_exit" 0
167 ctdb_test_check_real_cluster ()
169 [ -n "$CTDB_TEST_REAL_CLUSTER" ] && return 0
171 echo "ERROR: This test must be run on a real/virtual cluster, not local daemons."
175 ########################################
178 try_command_on_node ()
180 local nodespec="$1" ; shift
185 while [ "${nodespec#-}" != "$nodespec" ] ; do
186 if [ "$nodespec" = "-v" ] ; then
189 onnode_opts="$nodespec"
191 nodespec="$1" ; shift
196 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
198 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
204 echo "Output of \"$cmd\":"
209 sanity_check_output ()
212 local regexp="$2" # Should be anchored as necessary.
217 local num_lines=$(echo "$output" | wc -l)
218 echo "There are $num_lines lines of output"
219 if [ $num_lines -lt $min_lines ] ; then
220 echo "BAD: that's less than the required number (${min_lines})"
225 local unexpected # local doesn't pass through status of command on RHS.
226 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
228 # Note that this is reversed.
229 if [ $status -eq 0 ] ; then
230 echo "BAD: unexpected lines in output:"
231 echo "$unexpected" | cat -A
234 echo "Output lines look OK"
242 local ips="$1" # Output of "ctdb ip -n all"
244 echo "Sanity checking IPs..."
248 while read x ipp ; do
249 [ "$ipp" = "-1" ] && break
250 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
257 echo "BAD: a node was -1 or IPs are only assigned to one node"
258 echo "Are you running an old version of CTDB?"
262 #######################################
264 # Wait until either timeout expires or command succeeds. The command
265 # will be tried once per second.
268 local timeout="$1" ; shift # "$@" is the command...
270 echo -n "<${timeout}|"
272 while [ $t -gt 0 ] ; do
274 echo "|$(($timeout - $t))|"
291 for i in $(seq 1 $1) ; do
298 _cluster_is_healthy ()
300 local out x count line
302 out=$(ctdb -Y status 2>&1) || return 1
308 count=$(($count + 1))
309 [ "${line#:*:*:}" != "0:0:0:0:" ] && return 1
311 [ $count -gt 0 ] && return $?
312 } <<<"$out" # Yay bash!
315 cluster_is_healthy ()
317 if _cluster_is_healthy ; then
318 echo "Cluster is HEALTHY"
321 echo "Cluster is UNHEALTHY"
322 if [ -z "$CTDB_TEST_CLEANING_UP" ] ; then
325 for i in "ctdb status" "onnode -q 0 onnode all ctdb scriptstatus" ; do
334 wait_until_healthy ()
336 local timeout="${1:-120}"
338 echo "Waiting for cluster to become healthy..."
340 wait_until 120 _cluster_is_healthy
343 # This function is becoming nicely overloaded. Soon it will collapse! :-)
351 (unhealthy) bits="?:?:?:1" ;;
352 (healthy) bits="?:?:?:0" ;;
353 (disconnected) bits="1:?:?:?" ;;
354 (connected) bits="0:?:?:?" ;;
355 (banned) bits="?:1:?:?" ;;
356 (unbanned) bits="?:0:?:?" ;;
357 (disabled) bits="?:?:1:?" ;;
358 (enabled) bits="?:?:0:?" ;;
359 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
360 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
361 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
362 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
364 echo "node_has_status: unknown status \"$status\""
368 if [ -n "$bits" ] ; then
371 out=$(ctdb -Y status 2>&1) || return 1
376 [ "${line#:${pnn}:*:${bits}:}" = "" ] && return 0
379 } <<<"$out" # Yay bash!
380 elif [ -n "$fpat" ] ; then
381 ctdb statistics -n "$pnn" | egrep -q "$fpat"
382 elif [ -n "$mpat" ] ; then
383 ctdb getmonmode -n "$pnn" | egrep -q "$mpat"
385 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
390 wait_until_node_has_status ()
394 local timeout="${3:-30}"
396 echo "Waiting until node $pnn has status \"$status\"..."
398 wait_until $timeout node_has_status "$pnn" "$status"
401 # Useful for superficially testing IP failover.
402 # IPs must be on nodes matching nodeglob.
403 ips_are_on_nodeglob ()
405 local nodeglob="$1" ; shift
410 try_command_on_node 1 ctdb ip -n all
412 while read ip pnn ; do
413 for check in $ips ; do
414 if [ "$check" = "$ip" ] ; then
419 ips="${ips/${ip}}" # Remove from list
422 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
424 ips="${ips// }" # Remove any spaces.
428 wait_until_ips_are_on_nodeglob ()
430 echo "Waiting for IPs to fail over..."
432 wait_until 60 ips_are_on_nodeglob "$@"
438 local dst_socket="$2"
442 local pat="^${proto}[[:space:]]+[[:digit:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[^[:space:]]+[[:space:]]+${dst_socket//./\\.}[[:space:]]+ESTABLISHED[[:space:]]+${pid}/${prog}[[:space:]]*\$"
443 out=$(netstat -tanp |
450 wait_until_get_src_socket ()
453 local dst_socket="$2"
457 echo "Waiting for ${prog} to establish connection to ${dst_socket}..."
459 wait_until 5 get_src_socket "$@"
462 #######################################
464 # filename will be in $tcpdump_filename, pid in $tcpdump_pid
467 tcpdump_filter="$1" # global
469 echo "Running tcpdump..."
470 tcpdump_filename=$(mktemp)
471 ctdb_test_exit_hook_add "rm -f $tcpdump_filename"
473 # The only way of being sure that tcpdump is listening is to send
474 # some packets that it will see. So we use dummy pings - the -U
475 # option to tcpdump ensures that packets are flushed to the file
476 # as they are captured.
477 local dummy_addr="127.3.2.1"
478 local dummy="icmp and dst host ${dummy_addr} and icmp[icmptype] == icmp-echo"
479 tcpdump -n -p -s 0 -e -U -w $tcpdump_filename -i any "($tcpdump_filter) or ($dummy)" &
480 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
482 echo "Waiting for tcpdump output file to be ready..."
483 ping -q "$dummy_addr" >/dev/null 2>&1 &
484 ctdb_test_exit_hook_add "kill $! >/dev/null 2>&1"
486 tcpdump_listen_for_dummy ()
488 tcpdump -n -r $tcpdump_filename -c 1 "$dummy" >/dev/null 2>&1
491 wait_until 10 tcpdump_listen_for_dummy
494 # By default, wait for 1 matching packet.
497 local count="${1:-1}"
498 local filter="${2:-${tcpdump_filter}}"
502 local found=$(tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null | wc -l)
503 [ $found -ge $count ]
506 echo "Waiting for tcpdump to capture some packets..."
507 if ! wait_until 30 tcpdump_check ; then
510 for i in "ctdb status" "netstat -tanp" "tcpdump -n -e -r $tcpdump_filename" ; do
520 local filter="${1:-${tcpdump_filter}}"
522 tcpdump -n -r $tcpdump_filename "$filter" 2>/dev/null
525 tcptickle_sniff_start ()
530 local in="src host ${dst%:*} and tcp src port ${dst##*:} and dst host ${src%:*} and tcp dst port ${src##*:}"
531 local out="src host ${src%:*} and tcp src port ${src##*:} and dst host ${dst%:*} and tcp dst port ${dst##*:}"
532 local tickle_ack="${in} and (tcp[tcpflags] & tcp-ack != 0) and (tcp[14] == 4) and (tcp[15] == 210)" # win == 1234
533 local ack_ack="${out} and (tcp[tcpflags] & tcp-ack != 0)"
534 tcptickle_reset="${in} and tcp[tcpflags] & tcp-rst != 0"
535 local filter="(${tickle_ack}) or (${ack_ack}) or (${tcptickle_reset})"
537 tcpdump_start "$filter"
540 tcptickle_sniff_wait_show ()
542 tcpdump_wait 1 "$tcptickle_reset"
544 echo "GOOD: here are some TCP tickle packets:"
549 #######################################
553 echo "Attempting to politely shutdown daemons..."
554 onnode 1 ctdb shutdown -n all || true
556 echo "Sleeping for a while..."
559 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
560 echo "Killing remaining daemons..."
561 pkill -f $CTDB_DIR/bin/ctdbd
563 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
564 echo "Once more with feeling.."
565 pkill -9 $CTDB_DIR/bin/ctdbd
569 local var_dir=$CTDB_DIR/tests/var
570 rm -rf $var_dir/test.db
575 local num_nodes="${1:-2}" # default is 2 nodes
577 local var_dir=$CTDB_DIR/tests/var
579 mkdir -p $var_dir/test.db/persistent
581 local nodes=$var_dir/nodes.txt
582 local public_addresses=$var_dir/public_addresses.txt
583 local no_public_addresses=$var_dir/no_public_addresses.txt
584 rm -f $nodes $public_addresses $no_public_addresses
586 # If there are (strictly) greater than 2 nodes then we'll randomly
587 # choose a node to have no public addresses.
588 local no_public_ips=-1
589 [ $num_nodes -gt 2 ] && no_public_ips=$(($RANDOM % $num_nodes))
590 echo "$no_public_ips" >$no_public_addresses
593 for i in $(seq 1 $num_nodes) ; do
594 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
596 ip addr add ::$i/128 dev lo
598 echo 127.0.0.$i >> $nodes
599 # 2 public addresses on most nodes, just to make things interesting.
600 if [ $(($i - 1)) -ne $no_public_ips ] ; then
601 echo "192.0.2.$i/24 lo" >> $public_addresses
602 echo "192.0.2.$(($i + $num_nodes))/24 lo" >> $public_addresses
610 local num_nodes="${1:-2}" # default is 2 nodes
611 shift # "$@" gets passed to ctdbd
613 local var_dir=$CTDB_DIR/tests/var
615 local nodes=$var_dir/nodes.txt
616 local public_addresses=$var_dir/public_addresses.txt
617 local no_public_addresses=$var_dir/no_public_addresses.txt
619 local no_public_ips=-1
620 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
622 local ctdb_options="--reclock=$var_dir/rec.lock --nlist $nodes --nopublicipcheck --event-script-dir=$CTDB_DIR/tests/events.d --logfile=$var_dir/daemons.log -d 0 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent"
624 echo "Starting $num_nodes ctdb daemons..."
625 if [ "$no_public_ips" != -1 ] ; then
626 echo "Node $no_public_ips will have no public IPs."
629 for i in $(seq 0 $(($num_nodes - 1))) ; do
630 if [ $(id -u) -eq 0 ]; then
631 ctdb_options="$ctdb_options --public-interface=lo"
634 if [ $i -eq $no_public_ips ] ; then
635 ctdb_options="$ctdb_options --public-addresses=/dev/null"
637 ctdb_options="$ctdb_options --public-addresses=$public_addresses"
640 # Need full path so we can use "pkill -f" to kill the daemons.
641 $VALGRIND $CTDB_DIR/bin/ctdbd --socket=$var_dir/sock.$i $ctdb_options "$@" ||return 1
644 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
645 ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
649 #######################################
653 if [ -e /etc/redhat-release ] ; then
656 /etc/init.d/ctdb restart
662 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
663 daemons_setup $CTDB_TEST_NUM_DAEMONS
669 echo -n "Restarting CTDB"
670 if $ctdb_test_restart_scheduled ; then
671 echo -n " (scheduled)"
675 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
677 daemons_start $CTDB_TEST_NUM_DAEMONS
679 onnode -pq all $CTDB_TEST_WRAPPER _restart_ctdb
682 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
684 echo "Setting RerecoveryTimeout to 1"
685 onnode -pq all "ctdb setvar RerecoveryTimeout 1"
687 # In recent versions of CTDB, forcing a recovery like this blocks
688 # until the recovery is complete. Hopefully this will help the
689 # cluster to stabilise before a subsequent test.
690 echo "Forcing a recovery..."
691 onnode -q 0 ctdb recover
693 echo "Forcing a recovery..."
694 onnode -q 0 ctdb recover
699 ctdb_restart_when_done ()
701 ctdb_test_restart_scheduled=true
704 #######################################
706 install_eventscript ()
708 local script_name="$1"
709 local script_contents="$2"
711 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
712 # The quoting here is *very* fragile. However, we do
713 # experience the joy of installing a short script using
714 # onnode, and without needing to know the IP addresses of the
716 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
718 f="${CTDB_DIR}/tests/events.d/${script_name}"
719 echo "$script_contents" >"$f"
724 uninstall_eventscript ()
726 local script_name="$1"
728 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
729 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
731 rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"