along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "includes.h"
-#include "tdb.h"
+#include "replace.h"
#include "system/network.h"
#include "system/filesys.h"
-#include "system/dir.h"
-#include "../include/ctdb_private.h"
+#include "system/time.h"
+
+#include <talloc.h>
+#include <tevent.h>
+
#include "lib/tdb_wrap/tdb_wrap.h"
#include "lib/util/dlinklist.h"
-#include "../include/ctdb_private.h"
-#include "../common/rb_tree.h"
+#include "lib/util/debug.h"
+#include "lib/util/samba_util.h"
+#include "lib/util/sys_rw.h"
+#include "lib/util/util_process.h"
+
+#include "ctdb_private.h"
+#include "ctdb_client.h"
+
+#include "common/rb_tree.h"
+#include "common/common.h"
+#include "common/logging.h"
#define TIMELIMIT() timeval_current_ofs(10, 0)
struct ctdb_context *ctdb;
struct ctdb_db_context *ctdb_db;
struct ctdb_ltdb_header hdr;
+ uint32_t remote_fail_count;
TDB_DATA key;
uint8_t keydata[1];
};
memcpy(dd->keydata, key.dptr, key.dsize);
dd->hdr = *hdr;
+ dd->remote_fail_count = 0;
hash = ctdb_hash(&key);
}
-static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
+static void ctdb_vacuum_event(struct tevent_context *ev,
+ struct tevent_timer *te,
struct timeval t, void *private_data);
static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
return 0;
}
-/**
- * Variant of delete_marshall_traverse() that bumps the
- * RSN of each traversed record in the database.
- *
- * This is needed to ensure that when rolling out our
- * empty record copy before remote deletion, we as the
- * record's dmaster keep a higher RSN than the non-dmaster
- * nodes. This is needed to prevent old copies from
- * resurrection in recoveries.
- */
-static int delete_marshall_traverse_first(void *param, void *data)
-{
- struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
- struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
- struct ctdb_db_context *ctdb_db = dd->ctdb_db;
- struct ctdb_context *ctdb = ctdb_db->ctdb;
- struct ctdb_ltdb_header header;
- uint32_t lmaster;
- uint32_t hash = ctdb_hash(&(dd->key));
- int res;
-
- res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
- if (res != 0) {
- DEBUG(DEBUG_ERR,
- (__location__ " Error getting chainlock on record with "
- "key hash [0x%08x] on database db[%s].\n",
- hash, ctdb_db->db_name));
- recs->vdata->count.delete_list.skipped++;
- recs->vdata->count.delete_list.left--;
- talloc_free(dd);
- return 0;
- }
-
- /*
- * Verify that the record is still empty, its RSN has not
- * changed and that we are still its lmaster and dmaster.
- */
-
- res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
- vacuum_record_parser, &header);
- if (res != 0) {
- goto skip;
- }
-
- if (header.flags & CTDB_REC_RO_FLAGS) {
- DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
- "on database db[%s] has read-only flags. "
- "skipping.\n",
- hash, ctdb_db->db_name));
- goto skip;
- }
-
- if (header.dmaster != ctdb->pnn) {
- DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
- "on database db[%s] has been migrated away. "
- "skipping.\n",
- hash, ctdb_db->db_name));
- goto skip;
- }
-
- if (header.rsn != dd->hdr.rsn) {
- DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
- "on database db[%s] seems to have been "
- "migrated away and back again (with empty "
- "data). skipping.\n",
- hash, ctdb_db->db_name));
- goto skip;
- }
-
- lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
-
- if (lmaster != ctdb->pnn) {
- DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
- "delete list (key hash [0x%08x], db[%s]). "
- "Strange! skipping.\n",
- hash, ctdb_db->db_name));
- goto skip;
- }
-
- /*
- * Increment the record's RSN to ensure the dmaster (i.e. the current
- * node) has the highest RSN of the record in the cluster.
- * This is to prevent old record copies from resurrecting in recoveries
- * if something should fail during the deletion process.
- * Note that ctdb_ltdb_store_server() increments the RSN if called
- * on the record's dmaster.
- */
-
- res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
- if (res != 0) {
- DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
- "key hash [0x%08x] on database db[%s].\n",
- hash, ctdb_db->db_name));
- goto skip;
- }
-
- tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
-
- goto done;
-
-skip:
- tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
-
- recs->vdata->count.delete_list.skipped++;
- recs->vdata->count.delete_list.left--;
- talloc_free(dd);
- dd = NULL;
-
-done:
- if (dd == NULL) {
- return 0;
- }
-
- return delete_marshall_traverse(param, data);
-}
-
/**
* traverse function for the traversal of the delete_queue,
* the fast-path vacuuming list.
uint32_t lmaster;
uint32_t hash = ctdb_hash(&(dd->key));
+ if (dd->remote_fail_count > 0) {
+ vdata->count.delete_list.remote_error++;
+ vdata->count.delete_list.left--;
+ talloc_free(dd);
+ return 0;
+ }
+
res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
if (res != 0) {
DEBUG(DEBUG_ERR,
goto skip;
}
- if (header.rsn != dd->hdr.rsn + 1) {
+ if (header.rsn != dd->hdr.rsn) {
/*
* The record has been migrated off the node and back again.
* But not requeued for deletion. Skip it.
- * (Note that the first marshall traverse has bumped the RSN
- * on disk.)
*/
DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
"on database db[%s] seems to have been "
* at least some of these records previously from the former dmasters
* with the vacuum fetch message.
*
- * This last step is implemented as a 3-phase process to protect from
- * races leading to data corruption:
- *
- * 1) Send the lmaster's copy to all other active nodes with the
- * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
- * 2) Send the records that could successfully be stored remotely
- * in step #1 to all active nodes with the TRY_DELETE_RECORDS
+ * 1) Send the records to all active nodes with the TRY_DELETE_RECORDS
* control. The remote notes delete their local copy.
- * 3) The lmaster locally deletes its copies of all records that
+ * 2) The lmaster locally deletes its copies of all records that
* could successfully be deleted remotely in step #2.
*/
static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
struct ctdb_context *ctdb = ctdb_db->ctdb;
struct delete_records_list *recs;
TDB_DATA indata;
- struct ctdb_node_map *nodemap;
+ struct ctdb_node_map_old *nodemap;
uint32_t *active_nodes;
int num_active_nodes;
TALLOC_CTX *tmp_ctx;
num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
/*
- * Now delete the records all active nodes in a three-phase process:
- * 1) send all active remote nodes the current empty copy with this
- * node as DMASTER
- * 2) if all nodes could store the new copy,
- * tell all the active remote nodes to delete all their copy
- * 3) if all remote nodes deleted their record copy, delete it locally
- */
-
- /*
- * Step 1:
- * Send currently empty record copy to all active nodes for storing.
+ * Now delete the records all active nodes in a two-phase process:
+ * 1) tell all active remote nodes to delete all their copy
+ * 2) if all remote nodes deleted their record copy, delete it locally
*/
recs = talloc_zero(tmp_ctx, struct delete_records_list);
DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
goto done;
}
- recs->records = (struct ctdb_marshall_buffer *)
- talloc_zero_size(recs,
- offsetof(struct ctdb_marshall_buffer, data));
- if (recs->records == NULL) {
- DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
- goto done;
- }
- recs->records->db_id = ctdb_db->db_id;
- recs->vdata = vdata;
/*
- * traverse the tree of all records we want to delete and
- * create a blob we can send to the other nodes.
- *
- * We call delete_marshall_traverse_first() to bump the
- * records' RSNs in the database, to ensure we (as dmaster)
- * keep the highest RSN of the records in the cluster.
- */
- ret = trbt_traversearray32(vdata->delete_list, 1,
- delete_marshall_traverse_first, recs);
- if (ret != 0) {
- DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
- "delete list for first marshalling.\n"));
- goto done;
- }
-
- indata = ctdb_marshall_finish(recs->records);
-
- for (i = 0; i < num_active_nodes; i++) {
- struct ctdb_marshall_buffer *records;
- struct ctdb_rec_data *rec;
- int32_t res;
- TDB_DATA outdata;
-
- ret = ctdb_control(ctdb, active_nodes[i], 0,
- CTDB_CONTROL_RECEIVE_RECORDS, 0,
- indata, recs, &outdata, &res,
- NULL, NULL);
- if (ret != 0 || res != 0) {
- DEBUG(DEBUG_ERR, ("Error storing record copies on "
- "node %u: ret[%d] res[%d]\n",
- active_nodes[i], ret, res));
- goto done;
- }
-
- /*
- * outdata contains the list of records coming back
- * from the node: These are the records that the
- * remote node could not store. We remove these from
- * the list to process further.
- */
- records = (struct ctdb_marshall_buffer *)outdata.dptr;
- rec = (struct ctdb_rec_data *)&records->data[0];
- while (records->count-- > 1) {
- TDB_DATA reckey, recdata;
- struct ctdb_ltdb_header *rechdr;
- struct delete_record_data *dd;
-
- reckey.dptr = &rec->data[0];
- reckey.dsize = rec->keylen;
- recdata.dptr = &rec->data[reckey.dsize];
- recdata.dsize = rec->datalen;
-
- if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
- DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
- goto done;
- }
- rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
- recdata.dptr += sizeof(*rechdr);
- recdata.dsize -= sizeof(*rechdr);
-
- dd = (struct delete_record_data *)trbt_lookup32(
- vdata->delete_list,
- ctdb_hash(&reckey));
- if (dd != NULL) {
- /*
- * The other node could not store the record
- * copy and it is the first node that failed.
- * So we should remove it from the tree and
- * update statistics.
- */
- talloc_free(dd);
- vdata->count.delete_list.remote_error++;
- vdata->count.delete_list.left--;
- } else {
- DEBUG(DEBUG_ERR, (__location__ " Failed to "
- "find record with hash 0x%08x coming "
- "back from RECEIVE_RECORDS "
- "control in delete list.\n",
- ctdb_hash(&reckey)));
- vdata->count.delete_list.local_error++;
- vdata->count.delete_list.left--;
- }
-
- rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
- }
- }
-
- if (vdata->count.delete_list.left == 0) {
- goto success;
- }
-
- /*
- * Step 2:
- * Send the remaining records to all active nodes for deletion.
- *
- * The lmaster's (i.e. our) copies of these records have been stored
- * successfully on the other nodes.
+ * Step 1:
+ * Send all records to all active nodes for deletion.
*/
/*
* Create a marshall blob from the remaining list of records to delete.
*/
- talloc_free(recs->records);
-
recs->records = (struct ctdb_marshall_buffer *)
talloc_zero_size(recs,
offsetof(struct ctdb_marshall_buffer, data));
for (i = 0; i < num_active_nodes; i++) {
struct ctdb_marshall_buffer *records;
- struct ctdb_rec_data *rec;
+ struct ctdb_rec_data_old *rec;
int32_t res;
TDB_DATA outdata;
* the list to delete locally.
*/
records = (struct ctdb_marshall_buffer *)outdata.dptr;
- rec = (struct ctdb_rec_data *)&records->data[0];
+ rec = (struct ctdb_rec_data_old *)&records->data[0];
while (records->count-- > 1) {
TDB_DATA reckey, recdata;
struct ctdb_ltdb_header *rechdr;
ctdb_hash(&reckey));
if (dd != NULL) {
/*
- * The other node could not delete the
- * record and it is the first node that
- * failed. So we should remove it from
- * the tree and update statistics.
+ * The remote node could not delete the
+ * record. Since other remote nodes can
+ * also fail, we just mark the record.
*/
- talloc_free(dd);
- vdata->count.delete_list.remote_error++;
- vdata->count.delete_list.left--;
+ dd->remote_fail_count++;
} else {
DEBUG(DEBUG_ERR, (__location__ " Failed to "
"find record with hash 0x%08x coming "
"back from TRY_DELETE_RECORDS "
"control in delete list.\n",
ctdb_hash(&reckey)));
- vdata->count.delete_list.local_error++;
- vdata->count.delete_list.left--;
}
- rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
+ rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
}
}
- if (vdata->count.delete_list.left == 0) {
- goto success;
- }
-
/*
- * Step 3:
+ * Step 2:
* Delete the remaining records locally.
*
* These records have successfully been deleted on all
"delete list for deletion.\n"));
}
-success:
-
if (vdata->count.delete_list.left != 0) {
DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
"there are %u records left for deletion after "
DLIST_REMOVE(ctdb->vacuumers, child_ctx);
- event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
- timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
- ctdb_vacuum_event, child_ctx->vacuum_handle);
+ tevent_add_timer(ctdb->ev, child_ctx->vacuum_handle,
+ timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
+ ctdb_vacuum_event, child_ctx->vacuum_handle);
return 0;
}
/*
* this event is generated when a vacuum child process times out
*/
-static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
- struct timeval t, void *private_data)
+static void vacuum_child_timeout(struct tevent_context *ev,
+ struct tevent_timer *te,
+ struct timeval t, void *private_data)
{
struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
/*
* this event is generated when a vacuum child process has completed
*/
-static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
- uint16_t flags, void *private_data)
+static void vacuum_child_handler(struct tevent_context *ev,
+ struct tevent_fd *fde,
+ uint16_t flags, void *private_data)
{
struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
char c = 0;
/*
* this event is called every time we need to start a new vacuum process
*/
-static void
-ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
- struct timeval t, void *private_data)
+static void ctdb_vacuum_event(struct tevent_context *ev,
+ struct tevent_timer *te,
+ struct timeval t, void *private_data)
{
struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
struct ctdb_context *ctdb = ctdb_db->ctdb;
struct ctdb_vacuum_child_context *child_ctx;
struct tevent_fd *fde;
+ bool full_vacuum_run = false;
int ret;
- /* we dont vacuum if we are in recovery mode, or db frozen */
+ /* we don't vacuum if we are in recovery mode, or db frozen */
if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
- ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
+ ctdb_db_frozen(ctdb_db)) {
DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
- ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
- : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
- ? "freeze pending"
- : "frozen"));
- event_add_timed(ctdb->ev, vacuum_handle,
- timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
- ctdb_vacuum_event, vacuum_handle);
+ ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ?
+ "in recovery" : "frozen"));
+ tevent_add_timer(ctdb->ev, vacuum_handle,
+ timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
+ ctdb_vacuum_event, vacuum_handle);
return;
}
* new vacuuming event to stagger vacuuming events.
*/
if (ctdb->vacuumers != NULL) {
- event_add_timed(ctdb->ev, vacuum_handle,
- timeval_current_ofs(0, 500*1000),
- ctdb_vacuum_event, vacuum_handle);
+ tevent_add_timer(ctdb->ev, vacuum_handle,
+ timeval_current_ofs(0, 500*1000),
+ ctdb_vacuum_event, vacuum_handle);
return;
}
if (ret != 0) {
talloc_free(child_ctx);
DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
- event_add_timed(ctdb->ev, vacuum_handle,
- timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
- ctdb_vacuum_event, vacuum_handle);
+ tevent_add_timer(ctdb->ev, vacuum_handle,
+ timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
+ ctdb_vacuum_event, vacuum_handle);
return;
}
- if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
+ if (vacuum_handle->fast_path_count >=
+ ctdb->tunable.vacuum_fast_path_count) {
+ if (ctdb->tunable.vacuum_fast_path_count > 0) {
+ full_vacuum_run = true;
+ }
vacuum_handle->fast_path_count = 0;
}
close(child_ctx->fd[1]);
talloc_free(child_ctx);
DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
- event_add_timed(ctdb->ev, vacuum_handle,
- timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
- ctdb_vacuum_event, vacuum_handle);
+ tevent_add_timer(ctdb->ev, vacuum_handle,
+ timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
+ ctdb_vacuum_event, vacuum_handle);
return;
}
if (child_ctx->child_pid == 0) {
char cc = 0;
- bool full_vacuum_run = false;
close(child_ctx->fd[0]);
DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
- ctdb_set_process_name("ctdb_vacuum");
- if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
+ prctl_set_comment("ctdb_vacuum");
+ if (switch_from_server_to_client(ctdb) != 0) {
DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
_exit(1);
}
- if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
- (vacuum_handle->fast_path_count == 0))
- {
- full_vacuum_run = true;
- }
cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
sys_write(child_ctx->fd[1], &cc, 1);
"in parent context. Shutting down\n");
}
- event_add_timed(ctdb->ev, child_ctx,
- timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
- vacuum_child_timeout, child_ctx);
+ tevent_add_timer(ctdb->ev, child_ctx,
+ timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
+ vacuum_child_timeout, child_ctx);
DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
- fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
- EVENT_FD_READ, vacuum_child_handler, child_ctx);
+ fde = tevent_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
+ TEVENT_FD_READ, vacuum_child_handler, child_ctx);
tevent_fd_set_auto_close(fde);
vacuum_handle->child_ctx = child_ctx;
*/
int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
{
- if (ctdb_db->persistent != 0) {
- DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
+ if (! ctdb_db_volatile(ctdb_db)) {
+ DEBUG(DEBUG_ERR,
+ ("Vacuuming is disabled for non-volatile database %s\n",
+ ctdb_db->db_name));
return 0;
}
ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
ctdb_db->vacuum_handle->fast_path_count = 0;
- event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
- timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
- ctdb_vacuum_event, ctdb_db->vacuum_handle);
+ tevent_add_timer(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
+ timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
+ ctdb_vacuum_event, ctdb_db->vacuum_handle);
return 0;
}
hash = (uint32_t)ctdb_hash(&key);
- DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
- "db_id[0x%08x] "
- "key_hash[0x%08x] "
- "lmaster[%u] "
- "migrated_with_data[%s]\n",
+ DEBUG(DEBUG_DEBUG, (__location__ " schedule for deletion: db[%s] "
+ "db_id[0x%08x] "
+ "key_hash[0x%08x] "
+ "lmaster[%u] "
+ "migrated_with_data[%s]\n",
ctdb_db->db_name, ctdb_db->db_id,
hash,
ctdb_lmaster(ctdb_db->ctdb, &key),
return ret;
}
- /* if we dont have a connection to the daemon we can not send
+ /* if we don't have a connection to the daemon we can not send
a control. For example sometimes from update_record control child
process.
*/