+
+ DEBUG(10, ("g_lock_trylock: Did not get lock, waiting...\n"));
+
+ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * !!! HACK ALERT --- FIX ME !!!
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * What we really want to do here is to react to
+ * MSG_DBWRAP_G_LOCK_RETRY messages that are either sent
+ * by a client doing g_lock_unlock or by ourselves when
+ * we receive a CTDB_SRVID_SAMBA_NOTIFY or
+ * CTDB_SRVID_RECONFIGURE message from ctdbd, i.e. when
+ * either a client holding a lock or a complete node
+ * has died.
+ *
+ * Doing this properly involves calling tevent_loop_once(),
+ * but doing this here with the main ctdbd messaging context
+ * creates a nested event loop when g_lock_lock() is called
+ * from the main event loop, e.g. in a tcon_and_X where the
+ * share_info.tdb needs to be initialized and is locked by
+ * another process, or when the remore registry is accessed
+ * for writing and some other process already holds a lock
+ * on the registry.tdb.
+ *
+ * So as a quick fix, we act a little corasely here: we do
+ * a select on the ctdb connection fd and when it is readable
+ * or we get EINTR, then we retry without actually parsing
+ * any ctdb packages or dispatching messages. This means that
+ * we retry more often than intended by design, but this does
+ * not harm and it is unobtrusive. When we have finished,
+ * the main loop will pick up all the messages and ctdb
+ * packets. The only extra twist is that we cannot use timed
+ * events here but have to handcode a timeout.
+ */
+
+#ifdef CLUSTER_SUPPORT
+ if (lp_clustering()) {
+ struct ctdbd_connection *conn = messaging_ctdbd_connection();
+
+ r_fds = &_r_fds;
+ FD_ZERO(r_fds);
+ max_fd = ctdbd_conn_get_fd(conn);
+ FD_SET(max_fd, r_fds);