1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 . "${TEST_SCRIPTS_DIR}/common.sh"
5 ######################################################################
9 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
10 CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
12 _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
13 CTDB_TEST_WRAPPER="$_d/test_wrap"
15 export CTDB_TEST_WRAPPER
17 # If $VALGRIND is set then use it whenever ctdb is called, but only if
18 # $CTDB is not already set.
19 [ -n "$CTDB" ] || export CTDB="${VALGRIND}${VALGRIND:+ }ctdb"
22 PATH="${TEST_SCRIPTS_DIR}:${PATH}"
24 ######################################################################
26 ctdb_check_time_logs ()
35 out=$(onnode all tail -n 20 "${TEST_VAR_DIR}/ctdb.test.time.log" 2>&1)
37 if [ $? -eq 0 ] ; then
42 node="${line#>> NODE: }"
49 if [ -n "$ds_prev" ] && \
50 [ $(($ds_curr - $ds_prev)) -ge $threshold ] ; then
51 echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
60 echo Error getting time logs
63 echo "Check time sync (test client first):"
66 echo "Information from test client:"
69 echo "Information from cluster nodes:"
70 onnode all "top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
80 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
81 status=$(($testfailures+0))
83 # Avoid making a test fail from this point onwards. The test is
87 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
89 if [ -z "$TEST_LOCAL_DAEMONS" -a -n "$CTDB_TEST_TIME_LOGGING" -a \
90 $status -ne 0 ] ; then
94 eval "$ctdb_test_exit_hook" || true
95 unset ctdb_test_exit_hook
97 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
101 # This could be made unconditional but then we might get
102 # duplication from the recovery in restart_ctdb. We want to
103 # leave the recovery in restart_ctdb so that future tests that
104 # might do a manual restart mid-test will benefit.
105 echo "Forcing a recovery..."
106 onnode 0 $CTDB recover
112 ctdb_test_exit_hook_add ()
114 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
119 scriptname=$(basename "$0")
121 ctdb_test_restart_scheduled=false
123 trap "ctdb_test_exit" 0
126 ########################################
129 try_command_on_node ()
131 local nodespec="$1" ; shift
136 while [ "${nodespec#-}" != "$nodespec" ] ; do
137 if [ "$nodespec" = "-v" ] ; then
140 onnode_opts="${onnode_opts}${onnode_opts:+ }${nodespec}"
142 nodespec="$1" ; shift
147 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
149 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
155 echo "Output of \"$cmd\":"
160 sanity_check_output ()
163 local regexp="$2" # Should be anchored as necessary.
168 local num_lines=$(echo "$output" | wc -l)
169 echo "There are $num_lines lines of output"
170 if [ $num_lines -lt $min_lines ] ; then
171 echo "BAD: that's less than the required number (${min_lines})"
176 local unexpected # local doesn't pass through status of command on RHS.
177 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
179 # Note that this is reversed.
180 if [ $status -eq 0 ] ; then
181 echo "BAD: unexpected lines in output:"
182 echo "$unexpected" | cat -A
185 echo "Output lines look OK"
193 local ips="$1" # list of "ip node" lines
195 echo "Sanity checking IPs..."
199 while read x ipp ; do
200 [ "$ipp" = "-1" ] && break
201 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
208 echo "BAD: a node was -1 or IPs are only assigned to one node"
209 echo "Are you running an old version of CTDB?"
213 # This returns a list of "ip node" lines in $out
217 try_command_on_node $node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
220 _select_test_node_and_ips ()
224 test_node="" # this matches no PNN
227 while read ip pnn ; do
228 if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
231 if [ "$pnn" = "$test_node" ] ; then
232 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
234 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
236 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
237 test_ip="${test_node_ips%% *}"
239 [ -n "$test_node" ] || return 1
242 select_test_node_and_ips ()
245 while ! _select_test_node_and_ips ; do
246 echo "Unable to find a test node with IPs assigned"
247 if [ $timeout -le 0 ] ; then
248 echo "BAD: Too many attempts"
252 timeout=$(($timeout - 1))
258 #######################################
260 # Wait until either timeout expires or command succeeds. The command
261 # will be tried once per second.
264 local timeout="$1" ; shift # "$@" is the command...
267 if [ "$1" = "!" ] ; then
272 echo -n "<${timeout}|"
274 while [ $t -gt 0 ] ; do
277 if { ! $negate && [ $rc -eq 0 ] ; } || \
278 { $negate && [ $rc -ne 0 ] ; } ; then
279 echo "|$(($timeout - $t))|"
296 for i in $(seq 1 $1) ; do
303 _cluster_is_healthy ()
305 $CTDB nodestatus all >/dev/null && \
306 node_has_status 0 recovered
309 cluster_is_healthy ()
311 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
312 echo "Cluster is HEALTHY"
315 echo "Cluster is UNHEALTHY"
316 if ! ${ctdb_test_restart_scheduled:-false} ; then
317 echo "DEBUG AT $(date '+%F %T'):"
319 for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
328 wait_until_healthy ()
330 local timeout="${1:-120}"
332 echo "Waiting for cluster to become healthy..."
334 wait_until 120 _cluster_is_healthy
337 # This function is becoming nicely overloaded. Soon it will collapse! :-)
343 local bits fpat mpat rpat
345 (unhealthy) bits="?:?:?:1:*" ;;
346 (healthy) bits="?:?:?:0:*" ;;
347 (disconnected) bits="1:*" ;;
348 (connected) bits="0:*" ;;
349 (banned) bits="?:1:*" ;;
350 (unbanned) bits="?:0:*" ;;
351 (disabled) bits="?:?:1:*" ;;
352 (enabled) bits="?:?:0:*" ;;
353 (stopped) bits="?:?:?:?:1:*" ;;
354 (notstopped) bits="?:?:?:?:0:*" ;;
355 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
356 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
357 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
358 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
359 (recovered) rpat='^Recovery mode:NORMAL \(0\)$' ;;
361 echo "node_has_status: unknown status \"$status\""
365 if [ -n "$bits" ] ; then
368 out=$($CTDB -Y status 2>&1) || return 1
373 # This needs to be done in 2 steps to avoid false matches.
374 local line_bits="${line#:${pnn}:*:}"
375 [ "$line_bits" = "$line" ] && continue
376 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
379 } <<<"$out" # Yay bash!
380 elif [ -n "$fpat" ] ; then
381 $CTDB statistics -n "$pnn" | egrep -q "$fpat"
382 elif [ -n "$mpat" ] ; then
383 $CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
384 elif [ -n "$rpat" ] ; then
385 $CTDB status -n "$pnn" | egrep -q "$rpat"
387 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
392 wait_until_node_has_status ()
396 local timeout="${3:-30}"
397 local proxy_pnn="${4:-any}"
399 echo "Waiting until node $pnn has status \"$status\"..."
401 if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
402 for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
412 # Useful for superficially testing IP failover.
413 # IPs must be on nodes matching nodeglob.
414 # If the first argument is '!' then the IPs must not be on nodes
416 ips_are_on_nodeglob ()
419 if [ "$1" = "!" ] ; then
420 negating=true ; shift
422 local nodeglob="$1" ; shift
429 for check in $ips ; do
430 while read ip pnn ; do
431 if [ "$check" = "$ip" ] ; then
433 ($nodeglob) if $negating ; then return 1 ; fi ;;
434 (*) if ! $negating ; then return 1 ; fi ;;
436 ips="${ips/${ip}}" # Remove from list
439 # If we're negating and we didn't see the address then it
440 # isn't hosted by anyone!
442 ips="${ips/${check}}"
444 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
447 ips="${ips// }" # Remove any spaces.
451 wait_until_ips_are_on_nodeglob ()
453 echo "Waiting for IPs to fail over..."
455 wait_until 60 ips_are_on_nodeglob "$@"
466 while read ip pnn ; do
467 if [ "$node" = "$pnn" ] ; then
470 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
475 wait_until_node_has_some_ips ()
477 echo "Waiting for node to have some IPs..."
479 wait_until 60 node_has_some_ips "$@"
482 #######################################
484 _ctdb_hack_options ()
486 local ctdb_options="$*"
488 case "$ctdb_options" in
489 *--start-as-stopped*)
490 export CTDB_START_AS_STOPPED="yes"
496 _ctdb_hack_options "$@"
498 if [ -e /etc/redhat-release ] ; then
501 /etc/init.d/ctdb restart
507 _ctdb_hack_options "$@"
509 /etc/init.d/ctdb start
514 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
519 # Common things to do after starting one or more nodes.
522 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
524 echo "Setting RerecoveryTimeout to 1"
525 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
527 # In recent versions of CTDB, forcing a recovery like this blocks
528 # until the recovery is complete. Hopefully this will help the
529 # cluster to stabilise before a subsequent test.
530 echo "Forcing a recovery..."
531 onnode -q 0 $CTDB recover
537 # This assumes that ctdbd is not running on the given node.
541 shift # "$@" is passed to ctdbd start.
543 echo -n "Starting CTDB on node ${pnn}..."
545 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
546 daemons_start_1 $pnn "$@"
548 onnode $pnn $CTDB_TEST_WRAPPER _ctdb_start "$@"
551 # If we're starting only 1 node then we're doing something weird.
552 ctdb_restart_when_done
557 # "$@" is passed to ctdbd start.
559 echo -n "Restarting CTDB"
560 if $ctdb_test_restart_scheduled ; then
561 echo -n " (scheduled)"
566 for i in $(seq 1 5) ; do
567 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
571 onnode -p all $CTDB_TEST_WRAPPER _restart_ctdb "$@"
573 echo "Restart failed. Trying again in a few seconds..."
578 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || {
579 echo "Cluster didn't become healthy. Restarting..."
583 echo "Setting RerecoveryTimeout to 1"
584 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
586 # In recent versions of CTDB, forcing a recovery like this
587 # blocks until the recovery is complete. Hopefully this will
588 # help the cluster to stabilise before a subsequent test.
589 echo "Forcing a recovery..."
590 onnode -q 0 $CTDB recover
593 # Cluster is still healthy. Good, we're done!
594 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
595 echo "Cluster became UNHEALTHY again [$(date)]"
596 onnode -p all ctdb status -Y 2>&1
597 onnode -p all ctdb scriptstatus 2>&1
602 echo "Doing a sync..."
603 onnode -q 0 $CTDB sync
609 echo "Cluster UNHEALTHY... too many attempts..."
610 onnode -p all ctdb status -Y 2>&1
611 onnode -p all ctdb scriptstatus 2>&1
613 # Try to make the calling test fail
618 ctdb_restart_when_done ()
620 ctdb_test_restart_scheduled=true
623 get_ctdbd_command_line_option ()
628 try_command_on_node "$pnn" "$CTDB getpid" || \
629 die "Unable to get PID of ctdbd on node $pnn"
631 local pid="${out#*:}"
632 try_command_on_node "$pnn" "ps -p $pid -o args hww" || \
633 die "Unable to get command-line of PID $pid"
635 # Strip everything up to and including --option
636 local t="${out#*--${option}}"
637 # Strip leading '=' or space if present
640 # Strip any following options and print
644 #######################################
646 wait_for_monitor_event ()
651 echo "Waiting for a monitor event on node ${pnn}..."
653 try_command_on_node "$pnn" $CTDB scriptstatus || {
654 echo "Unable to get scriptstatus from node $pnn"
658 local ctdb_scriptstatus_original="$out"
659 wait_until 120 _ctdb_scriptstatus_changed
662 _ctdb_scriptstatus_changed ()
664 try_command_on_node "$pnn" $CTDB scriptstatus || {
665 echo "Unable to get scriptstatus from node $pnn"
669 [ "$out" != "$ctdb_scriptstatus_original" ]
672 #######################################
676 select_test_node_and_ips
678 nfs_first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
680 echo "Creating test subdirectory..."
681 try_command_on_node $test_node "mktemp -d --tmpdir=$nfs_first_export"
683 try_command_on_node $test_node "chmod 777 $nfs_test_dir"
685 nfs_mnt_d=$(mktemp -d)
686 nfs_local_file="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
687 nfs_remote_file="${nfs_test_dir}/TEST_FILE"
689 ctdb_test_exit_hook_add nfs_test_cleanup
691 echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
692 mount -o timeo=1,hard,intr,vers=3 \
693 ${test_ip}:${nfs_first_export} ${nfs_mnt_d}
698 rm -f "$nfs_local_file"
699 umount -f "$nfs_mnt_d"
701 onnode -q $test_node rmdir "$nfs_test_dir"
704 #######################################
706 # $1: pnn, $2: DB name
709 try_command_on_node -v $1 $CTDB getdbstatus "$2" |
710 sed -n -e "s@^path: @@p"
713 # $1: pnn, $2: DB name
714 db_ctdb_cattdb_count_records ()
716 try_command_on_node -v $1 $CTDB cattdb "$2" |
717 grep '^key' | grep -v '__db_sequence_number__' |
721 # $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
724 _tdb=$(db_get_path $1 "$2")
726 try_command_on_node $1 $CTDB tstore "$_tdb" "$3" "$4" "$_rsn"
729 # $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
730 db_ctdb_tstore_dbseqnum ()
732 # "__db_sequence_number__" + trailing 0x00
733 _key='0x5f5f64625f73657175656e63655f6e756d6265725f5f00'
735 # Construct 8 byte (unit64_t) database sequence number. This
736 # probably breaks if $3 > 255
737 _value=$(printf "0x%02x%014x" $3 0)
739 db_ctdb_tstore $1 "$2" "$_key" "$_value"
742 #######################################
744 # Make sure that $CTDB is set.
747 local="${TEST_SUBDIR}/scripts/local.bash"
748 if [ -r "$local" ] ; then