caused by the cluster filesystem being very slow to
arbitrate locks immediately after a node failure.
*/
- DEBUG(DEBUG_NOTICE,(__location__ " set_recmode timeout - allowing recmode set\n"));
+ DEBUG(DEBUG_ERR,(__location__ " set_recmode child process hung/timedout CFS slow to grant locks? (allowing recmode set anyway)\n"));
state->ctdb->recovery_mode = state->recmode;
ctdb_request_control_reply(state->ctdb, state->c, NULL, 0, NULL);
talloc_free(state);
state = talloc(ctdb, struct ctdb_set_recmode_state);
CTDB_NO_MEMORY(ctdb, state);
+
+ if (ctdb->tunable.verify_recovery_lock == 0) {
+ /* dont need to verify the reclock file */
+ ctdb->recovery_mode = recmode;
+ return 0;
+ }
+
/* For the rest of what needs to be done, we need to do this in
a child process since
1, the call to ctdb_recovery_lock() can block if the cluster
filesystem is in the process of recovery.
- 2, running of the script may take a while.
*/
ret = pipe(state->fd);
if (ret != 0) {
char cc = 0;
close(state->fd[0]);
- /* we should not be able to get the lock on the nodes list,
+ /* we should not be able to get the lock on the reclock file,
as it should be held by the recovery master
*/
if (ctdb_recovery_lock(ctdb, false)) {
/* make sure we die when our parent dies */
while (kill(parent, 0) == 0 || errno != ESRCH) {
sleep(5);
+ write(state->fd[1], &cc, 1);
}
_exit(0);
}
talloc_set_destructor(state, set_recmode_destructor);
- state->te = event_add_timed(ctdb->ev, state, timeval_current_ofs(3, 0),
+ state->te = event_add_timed(ctdb->ev, state, timeval_current_ofs(15, 0),
ctdb_set_recmode_timeout, state);
state->fde = event_add_fd(ctdb->ev, state, state->fd[0],
{ "RecdFailCount", 3, offsetof(struct ctdb_tunable, recd_ping_failcount) },
{ "LogLatencyMs", 0, offsetof(struct ctdb_tunable, log_latency_ms) },
{ "RecoveryDropAllIPs", 60, offsetof(struct ctdb_tunable, recovery_drop_all_ips) },
+ { "VerifyRecoveryLock", 1, offsetof(struct ctdb_tunable, verify_recovery_lock) },
};
/*