1 # If we're not running on a real cluster then we need a local copy of
2 # ctdb (and other stuff) in $PATH and we will use local daemons.
4 # Use in-tree binaries if running against local daemons.
5 # Otherwise CTDB need to be installed on all nodes.
6 if [ -n "$ctdb_dir" -a -d "${ctdb_dir}/bin" ] ; then
7 # ctdbd_wrapper is in config/ directory
8 PATH="${ctdb_dir}/bin:${ctdb_dir}/config:${PATH}"
10 export CTDB_EVENTD="${hdir}/ctdb_eventd"
11 export CTDB_EVENT_HELPER="${hdir}/ctdb_event"
12 export CTDB_LOCK_HELPER="${hdir}/ctdb_lock_helper"
13 export CTDB_RECOVERY_HELPER="${hdir}/ctdb_recovery_helper"
14 export CTDB_TAKEOVER_HELPER="${hdir}/ctdb_takeover_helper"
15 export CTDB_CLUSTER_MUTEX_HELPER="${hdir}/ctdb_mutex_fcntl_helper"
18 if [ -n "$TEST_SOCKET_WRAPPER_SO_PATH" ] ; then
19 export LD_PRELOAD="$TEST_SOCKET_WRAPPER_SO_PATH"
20 export SOCKET_WRAPPER_DIR="${SIMPLE_TESTS_VAR_DIR}/sw"
21 mkdir -p "$SOCKET_WRAPPER_DIR"
24 # onnode will execute this, which fakes ssh against local daemons
25 export ONNODE_SSH="${TEST_SUBDIR}/scripts/ssh_local_daemons.sh"
27 #######################################
29 # If the given IP is hosted then print 2 items: maskbits and iface
40 t=$(ip addr show to "${addr}/${bits}")
48 echo "${SIMPLE_TESTS_VAR_DIR}/node.${pnn}"
55 local node_dir=$(node_dir "$pnn")
56 echo "${node_dir}/ctdbd.conf"
63 local node_dir=$(node_dir "$pnn")
64 echo "${node_dir}/ctdbd.pid"
71 local node_dir=$(node_dir "$pnn")
72 echo "${node_dir}/ctdbd.socket"
77 local have_all_ips=true
79 for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
80 if [ -n "$CTDB_USE_IPV6" ]; then
81 local j=$(printf "%02x" $i)
82 local node_ip="fd00::5357:5f${j}"
83 if have_ip "$node_ip" ; then
87 ERROR: ${node_ip} not on an interface, please add it
97 # Fail if we don't have all of the IPv6 addresses assigned
101 setup_public_addresses ()
103 local pnn_no_ips="$1"
106 for i in $(seq 1 $TEST_LOCAL_DAEMONS) ; do
107 if [ $((i - 1)) -eq $pnn_no_ips ] ; then
111 # 2 public addresses on most nodes, just to make
113 local j=$((i + TEST_LOCAL_DAEMONS))
114 if [ -n "$CTDB_USE_IPV6" ]; then
115 printf "fc00:10::1:%x/64 lo\n" "$i"
116 printf "fc00:10::1:%x/64 lo\n" "$j"
118 printf "192.168.234.%x/24 lo\n" "$i"
119 printf "192.168.234.%x/24 lo\n" "$j"
126 local no_public_addresses=false
127 local no_event_scripts=false
129 --no-public-addresses) no_public_addresses=true ;;
130 --no-event-scripts) no_event_scripts=true ;;
133 nodes_file="${SIMPLE_TESTS_VAR_DIR}/nodes"
134 setup_nodes >"$nodes_file" || return 1
136 # If there are (strictly) greater than 2 nodes then we'll
137 # randomly choose a node to have no public addresses
139 if [ $TEST_LOCAL_DAEMONS -gt 2 ] ; then
140 pnn_no_ips=$((RANDOM % TEST_LOCAL_DAEMONS))
143 local public_addresses_all="${SIMPLE_TESTS_VAR_DIR}/public_addresses"
144 setup_public_addresses $pnn_no_ips >"$public_addresses_all"
147 for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
148 local node_dir=$(node_dir "$pnn")
150 setup_ctdb_base "$SIMPLE_TESTS_VAR_DIR" "node.${pnn}" \
153 if [ "$node_dir" != "$CTDB_BASE" ] ; then
154 die "Inconsistent CTDB_BASE"
157 cp "$nodes_file" "${CTDB_BASE}/nodes"
159 local public_addresses="${node_dir}/public_addresses"
161 if $no_public_addresses || [ $pnn_no_ips -eq $pnn ] ; then
162 echo "Node ${pnn} will have no public IPs."
163 : >"$public_addresses"
165 cp "$public_addresses_all" "$public_addresses"
168 local node_ip=$(sed -n -e "$(($pnn + 1))p" "$nodes_file")
170 local conf=$(node_conf "$pnn")
172 local db_dir="${node_dir}/db"
173 mkdir -p "${db_dir}/persistent"
175 if $no_event_scripts ; then
176 rm -vf "${CTDB_BASE}/events.d/"*
180 CTDB_RECOVERY_LOCK="${SIMPLE_TESTS_VAR_DIR}/rec.lock"
181 CTDB_NODE_ADDRESS="${node_ip}"
182 CTDB_LOGGING="file:${node_dir}/log.ctdb"
184 CTDB_DBDIR="${db_dir}"
185 CTDB_DBDIR_PERSISTENT="${db_dir}/persistent"
186 CTDB_DBDIR_STATE="${db_dir}/state"
195 local node_dir=$(node_dir "$pnn")
196 local pidfile=$(node_pidfile "$pnn")
197 local conf=$(node_conf "$pnn")
198 local socket=$(node_socket "$pnn")
200 CTDBD="${VALGRIND} ctdbd --sloppy-start --nopublicipcheck" \
201 CTDB_BASE="$node_dir" \
203 CTDB_PIDFILE="$pidfile" \
204 CTDB_SOCKET="$socket" \
210 echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
213 for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
221 local node_dir=$(node_dir "$pnn")
222 local pidfile=$(node_pidfile "$pnn")
223 local conf=$(node_conf "$pnn")
224 local socket=$(node_socket "$pnn")
226 CTDB_BASE="$node_dir" \
228 CTDB_PIDFILE="$pidfile" \
229 CTDB_SOCKET="$socket" \
235 echo "Stopping $TEST_LOCAL_DAEMONS ctdb daemons..."
238 for pnn in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
242 rm -rf "${SIMPLE_TESTS_VAR_DIR}/test.db"
268 # If this fails to find processes then the tests fails, so
269 # look at full command-line so this will work with valgrind.
270 # Note that the output could be generated with pgrep's -a
271 # option but it doesn't exist in older versions.
272 ps -p $(pgrep -f '\<ctdbd\>' | xargs | sed -e 's| |,|g') -o args ww
276 # onnode will use CTDB_BASES to help the ctdb tool connection to each
279 for i in $(seq 0 $(($TEST_LOCAL_DAEMONS - 1))) ; do
280 node_dir=$(node_dir "$i")
281 CTDB_BASES="${CTDB_BASES}${CTDB_BASES:+ }${node_dir}"
284 # Need a default CTDB_BASE for onnode (to find the functions file).
285 # Any node will do, so pick the 1st...
286 export CTDB_BASE="${CTDB_BASES%% *}"