1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
9 ######################################################################
15 teststarttime=$(date '+%s')
18 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
19 echo "Running test $name ($(date '+%T'))"
20 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
25 local name="$1" ; shift
26 local status="$1" ; shift
27 # "$@" is command-line
29 local interp="SKIPPED"
30 local statstr=" (reason $*)"
31 if [ -n "$status" ] ; then
32 if [ $status -eq 0 ] ; then
38 statstr=" (status $status)"
39 testfailures=$(($testfailures+1))
43 testduration=$(($(date +%s)-$teststarttime))
45 echo "=========================================================================="
46 echo "TEST ${interp}: ${name}${statstr} (duration: ${testduration}s)"
47 echo "=========================================================================="
53 exit $(($testfailures+0))
62 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
64 eval "$ctdb_test_exit_hook"
65 unset ctdb_test_exit_hook
67 if ! onnode 0 $CTDB_TEST_WRAPPER cluster_is_healthy ; then
68 echo "Restarting ctdb on all nodes to get back into known state..."
71 # This could be made unconditional but then we might get
72 # duplication from the recovery in restart_ctdb. We want to
73 # leave the recovery in restart_ctdb so that future tests that
74 # might do a manual restart mid-test will benefit.
75 echo "Forcing a recovery..."
84 local name="$1" ; shift
86 [ -n "$1" ] || set -- "$name"
88 ctdb_test_begin "$name"
93 ctdb_test_end "$name" "$status" "$*"
106 -h, --help show this screen.
107 -v, --version show test case version.
108 --category show the test category (ACL, CTDB, Samba ...).
109 -d, --description show test case description.
110 --summary show short test case summary.
118 [ -n "$CTDB_DIR" ] || fail "Can not determine version."
120 (cd "$CTDB_DIR" && git describe)
123 ctdb_test_cmd_options()
125 [ -n "$1" ] || return 0
128 -h|--help) ctdb_test_usage 0 ;;
129 -v|--version) ctdb_test_version ;;
130 --category) echo "CTDB" ;;
131 -d|--description) test_info ;;
132 --summary) test_info | head -1 ;;
134 echo "Error: Unknown parameter = $1"
145 scriptname=$(basename "$0")
148 ctdb_test_cmd_options $@
150 trap "ctdb_test_exit" 0
153 ########################################
156 try_command_on_node ()
158 local nodespec="$1" ; shift
163 while [ "${nodespec#-}" != "$nodespec" ] ; do
164 if [ "$nodespec" = "-v" ] ; then
167 onnode_opts="$nodespec"
169 nodespec="$1" ; shift
174 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
176 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
182 echo "Output of \"$cmd\":"
187 sanity_check_output ()
190 local regexp="$2" # Should be anchored as necessary.
195 local num_lines=$(echo "$output" | wc -l)
196 echo "There are $num_lines lines of output"
197 if [ $num_lines -lt $min_lines ] ; then
198 echo "BAD: that's less than the required number (${min_lines})"
203 local unexpected # local doesn't pass through status of command on RHS.
204 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
206 # Note that this is reversed.
207 if [ $status -eq 0 ] ; then
208 echo "BAD: unexpected lines in output:"
209 echo "$unexpected" | cat -A
212 echo "Output lines look OK"
218 #######################################
220 # Wait until either timeout expires or command succeeds. The command
221 # will be tried once per second.
224 local timeout="$1" ; shift # "$@" is the command...
226 echo -n "<${timeout}|"
227 while [ $timeout -gt 0 ] ; do
234 timeout=$(($timeout - 1))
246 for i in $(seq 1 $1) ; do
253 _cluster_is_healthy ()
255 local out x count line
257 out=$(ctdb -Y status 2>&1) || return 1
263 count=$(($count + 1))
264 [ "${line#:*:*:}" != "0:0:0:0:" ] && return 1
266 [ $count -gt 0 ] && return $?
267 } <<<"$out" # Yay bash!
270 cluster_is_healthy ()
272 if _cluster_is_healthy ; then
273 echo "Cluster is HEALTHY"
276 echo "Cluster is UNHEALTHY"
281 wait_until_healthy ()
283 local timeout="${1:-120}"
285 echo "Waiting for cluster to become healthy..."
287 wait_until 120 _cluster_is_healthy
290 # This function is becoming nicely overloaded. Soon it will collapse! :-)
298 (unhealthy) bits="?:?:?:1" ;;
299 (healthy) bits="?:?:?:0" ;;
300 (disconnected) bits="1:?:?:?" ;;
301 (connected) bits="0:?:?:?" ;;
302 (banned) bits="?:1:?:?" ;;
303 (unbanned) bits="?:0:?:?" ;;
304 (disabled) bits="?:?:1:?" ;;
305 (enabled) bits="?:?:0:?" ;;
306 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
307 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
308 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
309 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
311 echo "node_has_status: unknown status \"$status\""
315 if [ -n "$bits" ] ; then
318 out=$(ctdb -Y status 2>&1) || return 1
323 [ "${line#:${pnn}:*:${bits}:}" = "" ] && return 0
326 } <<<"$out" # Yay bash!
327 elif [ -n "$fpat" ] ; then
328 ctdb statistics -n "$pnn" | egrep -q "$fpat"
329 elif [ -n "$mpat" ] ; then
330 ctdb getmonmode -n "$pnn" | egrep -q "$mpat"
332 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
337 wait_until_node_has_status ()
341 local timeout="${3:-30}"
343 echo "Waiting until node $pnn has status \"$status\"..."
345 wait_until $timeout node_has_status "$pnn" "$status"
348 # Useful for superficially testing IP failover.
349 # IPs must be on nodes matching nodeglob.
350 ips_are_on_nodeglob ()
352 local nodeglob="$1" ; shift
357 try_command_on_node 1 ctdb ip -n all
359 while read ip pnn ; do
360 for check in $ips ; do
361 if [ "$check" = "$ip" ] ; then
366 ips="${ips/${ip}}" # Remove from list
369 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
371 ips="${ips// }" # Remove any spaces.
375 wait_until_ips_are_on_nodeglob ()
377 echo "Waiting for IPs to fail over..."
379 wait_until 60 ips_are_on_nodeglob "$@"
382 #######################################
386 echo "Attempting to politely shutdown daemons..."
387 onnode 1 ctdb shutdown -n all || true
389 echo "Sleeping for a while..."
392 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
393 echo "Killing remaining daemons..."
394 pkill -f $CTDB_DIR/bin/ctdbd
396 if pgrep -f $CTDB_DIR/bin/ctdbd >/dev/null ; then
397 echo "Once more with feeling.."
398 pkill -9 $CTDB_DIR/bin/ctdbd
402 local var_dir=$CTDB_DIR/tests/var
403 rm -rf $var_dir/test.db
408 local num_nodes="${1:-2}" # default is 2 nodes
410 local var_dir=$CTDB_DIR/tests/var
412 mkdir -p $var_dir/test.db/persistent
414 local nodes=$var_dir/nodes.txt
415 local public_addresses=$var_dir/public_addresses.txt
416 local no_public_addresses=$var_dir/no_public_addresses.txt
417 rm -f $nodes $public_addresses $no_public_addresses
419 # If there are (strictly) greater than 2 nodes then we'll randomly
420 # choose a node to have no public addresses.
421 local no_public_ips=-1
422 [ $num_nodes -gt 2 ] && no_public_ips=$(($RANDOM % $num_nodes))
423 echo "$no_public_ips" >$no_public_addresses
426 for i in $(seq 1 $num_nodes) ; do
427 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
429 ip addr add ::$i/128 dev lo
431 echo 127.0.0.$i >> $nodes
432 # 2 public addresses on most nodes, just to make things interesting.
433 if [ $(($i - 1)) -ne $no_public_ips ] ; then
434 echo "192.0.2.$i/24 lo" >> $public_addresses
435 echo "192.0.2.$(($i + $num_nodes))/24 lo" >> $public_addresses
443 local num_nodes="${1:-2}" # default is 2 nodes
444 shift # "$@" gets passed to ctdbd
446 local var_dir=$CTDB_DIR/tests/var
448 local nodes=$var_dir/nodes.txt
449 local public_addresses=$var_dir/public_addresses.txt
450 local no_public_addresses=$var_dir/no_public_addresses.txt
452 local no_public_ips=-1
453 [ -r $no_public_addresses ] && read no_public_ips <$no_public_addresses
455 local ctdb_options="--reclock=$var_dir/rec.lock --nlist $nodes --nopublicipcheck --event-script-dir=tests/events.d --logfile=$var_dir/daemons.log -d 0 --dbdir=$var_dir/test.db --dbdir-persistent=$var_dir/test.db/persistent"
457 echo "Starting $num_nodes ctdb daemons..."
458 if [ "$no_public_ips" != -1 ] ; then
459 echo "Node $no_public_ips will have no public IPs."
462 for i in $(seq 0 $(($num_nodes - 1))) ; do
463 if [ $(id -u) -eq 0 ]; then
464 ctdb_options="$ctdb_options --public-interface=lo"
467 if [ $i -eq $no_public_ips ] ; then
468 ctdb_options="$ctdb_options --public-addresses=/dev/null"
470 ctdb_options="$ctdb_options --public-addresses=$public_addresses"
473 # Need the $PWD so we can use "pkill -f" to kill the daemons.
474 $VALGRIND $PWD/bin/ctdbd --socket=$var_dir/sock.$i $ctdb_options "$@" ||return 1
477 if [ -L /tmp/ctdb.socket -o ! -S /tmp/ctdb.socket ] ; then
478 ln -sf $var_dir/sock.0 /tmp/ctdb.socket || return 1
482 #######################################
486 if [ -e /etc/redhat-release ] ; then
489 /etc/init.d/ctdb restart
495 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
496 daemons_setup $CTDB_TEST_NUM_DAEMONS
502 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
504 daemons_start $CTDB_TEST_NUM_DAEMONS
506 onnode -pq all $CTDB_TEST_WRAPPER _restart_ctdb
509 onnode -q 1 $CTDB_TEST_WRAPPER wait_until_healthy || return 1
511 echo "Setting RerecoveryTimeout to 1"
512 onnode -pq all "ctdb setvar RerecoveryTimeout 1"
514 # In recent versions of CTDB, forcing a recovery like this blocks
515 # until the recovery is complete. Hopefully this will help the
516 # cluster to stabilise before a subsequent test.
517 echo "Forcing a recovery..."
518 onnode -q 0 ctdb recover
520 #echo "Sleeping to allow ctdb to settle..."
526 #######################################
528 install_eventscript ()
530 local script_name="$1"
531 local script_contents="$2"
533 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
534 # The quoting here is *very* fragile. However, we do
535 # experience the joy of installing a short script using
536 # onnode, and without needing to know the IP addresses of the
538 onnode all "f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
540 f="${CTDB_DIR}/tests/events.d/${script_name}"
541 echo "$script_contents" >"$f"
546 uninstall_eventscript ()
548 local script_name="$1"
550 if [ -n "$CTDB_TEST_REAL_CLUSTER" ] ; then
551 onnode all "rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
553 rm -vf "${CTDB_DIR}/tests/events.d/${script_name}"