ctdb-daemon: Rename struct ctdb_rec_data to ctdb_rec_data_old
[vlendec/samba-autobuild/.git] / ctdb / server / ctdb_vacuum.c
1 /*
2    ctdb vacuuming events
3
4    Copyright (C) Ronnie Sahlberg  2009
5    Copyright (C) Michael Adam 2010-2013
6    Copyright (C) Stefan Metzmacher 2010-2011
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "replace.h"
23 #include "system/network.h"
24 #include "system/filesys.h"
25 #include "system/time.h"
26
27 #include <talloc.h>
28 #include <tevent.h>
29
30 #include "lib/tdb_wrap/tdb_wrap.h"
31 #include "lib/util/dlinklist.h"
32 #include "lib/util/debug.h"
33 #include "lib/util/samba_util.h"
34
35 #include "ctdb_private.h"
36 #include "ctdb_client.h"
37 #include "ctdb_logging.h"
38
39 #include "common/rb_tree.h"
40 #include "common/system.h"
41 #include "common/common.h"
42
43 #define TIMELIMIT() timeval_current_ofs(10, 0)
44
45 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
46
47 struct ctdb_vacuum_child_context {
48         struct ctdb_vacuum_child_context *next, *prev;
49         struct ctdb_vacuum_handle *vacuum_handle;
50         /* fd child writes status to */
51         int fd[2];
52         pid_t child_pid;
53         enum vacuum_child_status status;
54         struct timeval start_time;
55 };
56
57 struct ctdb_vacuum_handle {
58         struct ctdb_db_context *ctdb_db;
59         struct ctdb_vacuum_child_context *child_ctx;
60         uint32_t fast_path_count;
61 };
62
63
64 /*  a list of records to possibly delete */
65 struct vacuum_data {
66         struct ctdb_context *ctdb;
67         struct ctdb_db_context *ctdb_db;
68         struct tdb_context *dest_db;
69         trbt_tree_t *delete_list;
70         struct ctdb_marshall_buffer **vacuum_fetch_list;
71         struct timeval start;
72         bool traverse_error;
73         bool vacuum;
74         struct {
75                 struct {
76                         uint32_t added_to_vacuum_fetch_list;
77                         uint32_t added_to_delete_list;
78                         uint32_t deleted;
79                         uint32_t skipped;
80                         uint32_t error;
81                         uint32_t total;
82                 } delete_queue;
83                 struct {
84                         uint32_t scheduled;
85                         uint32_t skipped;
86                         uint32_t error;
87                         uint32_t total;
88                 } db_traverse;
89                 struct {
90                         uint32_t total;
91                         uint32_t remote_error;
92                         uint32_t local_error;
93                         uint32_t deleted;
94                         uint32_t skipped;
95                         uint32_t left;
96                 } delete_list;
97                 struct {
98                         uint32_t vacuumed;
99                         uint32_t copied;
100                 } repack;
101         } count;
102 };
103
104 /* this structure contains the information for one record to be deleted */
105 struct delete_record_data {
106         struct ctdb_context *ctdb;
107         struct ctdb_db_context *ctdb_db;
108         struct ctdb_ltdb_header hdr;
109         TDB_DATA key;
110         uint8_t keydata[1];
111 };
112
113 struct delete_records_list {
114         struct ctdb_marshall_buffer *records;
115         struct vacuum_data *vdata;
116 };
117
118 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
119                                            const struct ctdb_ltdb_header *hdr,
120                                            TDB_DATA key);
121
122 /**
123  * Store key and header in a tree, indexed by the key hash.
124  */
125 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
126                                                struct ctdb_db_context *ctdb_db,
127                                                trbt_tree_t *tree,
128                                                const struct ctdb_ltdb_header *hdr,
129                                                TDB_DATA key)
130 {
131         struct delete_record_data *dd;
132         uint32_t hash;
133         size_t len;
134
135         len = offsetof(struct delete_record_data, keydata) + key.dsize;
136
137         dd = (struct delete_record_data *)talloc_size(tree, len);
138         if (dd == NULL) {
139                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
140                 return -1;
141         }
142         talloc_set_name_const(dd, "struct delete_record_data");
143
144         dd->ctdb      = ctdb;
145         dd->ctdb_db   = ctdb_db;
146         dd->key.dsize = key.dsize;
147         dd->key.dptr  = dd->keydata;
148         memcpy(dd->keydata, key.dptr, key.dsize);
149
150         dd->hdr = *hdr;
151
152         hash = ctdb_hash(&key);
153
154         trbt_insert32(tree, hash, dd);
155
156         return 0;
157 }
158
159 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
160                                      struct ctdb_ltdb_header *hdr)
161 {
162         struct ctdb_context *ctdb = vdata->ctdb;
163         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
164         uint32_t hash;
165         int ret;
166
167         hash = ctdb_hash(&key);
168
169         if (trbt_lookup32(vdata->delete_list, hash)) {
170                 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
171                 return 0;
172         }
173
174         ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
175                                                   vdata->delete_list,
176                                                   hdr, key);
177         if (ret != 0) {
178                 return -1;
179         }
180
181         vdata->count.delete_list.total++;
182
183         return 0;
184 }
185
186 /**
187  * Add a record to the list of records to be sent
188  * to their lmaster with VACUUM_FETCH.
189  */
190 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
191                                            TDB_DATA key)
192 {
193         struct ctdb_context *ctdb = vdata->ctdb;
194         uint32_t lmaster;
195         struct ctdb_marshall_buffer *vfl;
196
197         lmaster = ctdb_lmaster(ctdb, &key);
198
199         vfl = vdata->vacuum_fetch_list[lmaster];
200
201         vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
202                                 key, NULL, tdb_null);
203         if (vfl == NULL) {
204                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
205                 vdata->traverse_error = true;
206                 return -1;
207         }
208
209         vdata->vacuum_fetch_list[lmaster] = vfl;
210
211         return 0;
212 }
213
214
215 static void ctdb_vacuum_event(struct tevent_context *ev,
216                               struct tevent_timer *te,
217                               struct timeval t, void *private_data);
218
219 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
220 {
221         struct ctdb_ltdb_header *header =
222                 (struct ctdb_ltdb_header *)private_data;
223
224         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
225                 return -1;
226         }
227
228         *header = *(struct ctdb_ltdb_header *)data.dptr;
229
230         return 0;
231 }
232
233 /*
234  * traverse function for gathering the records that can be deleted
235  */
236 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
237                            void *private_data)
238 {
239         struct vacuum_data *vdata = talloc_get_type(private_data,
240                                                     struct vacuum_data);
241         struct ctdb_context *ctdb = vdata->ctdb;
242         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
243         uint32_t lmaster;
244         struct ctdb_ltdb_header *hdr;
245         int res = 0;
246
247         vdata->count.db_traverse.total++;
248
249         lmaster = ctdb_lmaster(ctdb, &key);
250         if (lmaster >= ctdb->num_nodes) {
251                 vdata->count.db_traverse.error++;
252                 DEBUG(DEBUG_CRIT, (__location__
253                                    " lmaster[%u] >= ctdb->num_nodes[%u] for key"
254                                    " with hash[%u]!\n",
255                                    (unsigned)lmaster,
256                                    (unsigned)ctdb->num_nodes,
257                                    (unsigned)ctdb_hash(&key)));
258                 return -1;
259         }
260
261         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
262                 /* it is not a deleted record */
263                 vdata->count.db_traverse.skipped++;
264                 return 0;
265         }
266
267         hdr = (struct ctdb_ltdb_header *)data.dptr;
268
269         if (hdr->dmaster != ctdb->pnn) {
270                 vdata->count.db_traverse.skipped++;
271                 return 0;
272         }
273
274         /*
275          * Add the record to this process's delete_queue for processing
276          * in the subsequent traverse in the fast vacuum run.
277          */
278         res = insert_record_into_delete_queue(ctdb_db, hdr, key);
279         if (res != 0) {
280                 vdata->count.db_traverse.error++;
281         } else {
282                 vdata->count.db_traverse.scheduled++;
283         }
284
285         return 0;
286 }
287
288 /*
289  * traverse the tree of records to delete and marshall them into
290  * a blob
291  */
292 static int delete_marshall_traverse(void *param, void *data)
293 {
294         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
295         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
296         struct ctdb_marshall_buffer *m;
297
298         m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
299                               recs->records->db_id,
300                               dd->key, &dd->hdr, tdb_null);
301         if (m == NULL) {
302                 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
303                 return -1;
304         }
305
306         recs->records = m;
307         return 0;
308 }
309
310 /**
311  * Variant of delete_marshall_traverse() that bumps the
312  * RSN of each traversed record in the database.
313  *
314  * This is needed to ensure that when rolling out our
315  * empty record copy before remote deletion, we as the
316  * record's dmaster keep a higher RSN than the non-dmaster
317  * nodes. This is needed to prevent old copies from
318  * resurrection in recoveries.
319  */
320 static int delete_marshall_traverse_first(void *param, void *data)
321 {
322         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
323         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
324         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
325         struct ctdb_context *ctdb = ctdb_db->ctdb;
326         struct ctdb_ltdb_header header;
327         uint32_t lmaster;
328         uint32_t hash = ctdb_hash(&(dd->key));
329         int res;
330
331         res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
332         if (res != 0) {
333                 recs->vdata->count.delete_list.skipped++;
334                 recs->vdata->count.delete_list.left--;
335                 talloc_free(dd);
336                 return 0;
337         }
338
339         /*
340          * Verify that the record is still empty, its RSN has not
341          * changed and that we are still its lmaster and dmaster.
342          */
343
344         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
345                                vacuum_record_parser, &header);
346         if (res != 0) {
347                 goto skip;
348         }
349
350         if (header.flags & CTDB_REC_RO_FLAGS) {
351                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
352                                    "on database db[%s] has read-only flags. "
353                                    "skipping.\n",
354                                    hash, ctdb_db->db_name));
355                 goto skip;
356         }
357
358         if (header.dmaster != ctdb->pnn) {
359                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
360                                    "on database db[%s] has been migrated away. "
361                                    "skipping.\n",
362                                    hash, ctdb_db->db_name));
363                 goto skip;
364         }
365
366         if (header.rsn != dd->hdr.rsn) {
367                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
368                                    "on database db[%s] seems to have been "
369                                    "migrated away and back again (with empty "
370                                    "data). skipping.\n",
371                                    hash, ctdb_db->db_name));
372                 goto skip;
373         }
374
375         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
376
377         if (lmaster != ctdb->pnn) {
378                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
379                                    "delete list (key hash [0x%08x], db[%s]). "
380                                    "Strange! skipping.\n",
381                                    hash, ctdb_db->db_name));
382                 goto skip;
383         }
384
385         /*
386          * Increment the record's RSN to ensure the dmaster (i.e. the current
387          * node) has the highest RSN of the record in the cluster.
388          * This is to prevent old record copies from resurrecting in recoveries
389          * if something should fail during the deletion process.
390          * Note that ctdb_ltdb_store_server() increments the RSN if called
391          * on the record's dmaster.
392          */
393
394         res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
395         if (res != 0) {
396                 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
397                                   "key hash [0x%08x] on database db[%s].\n",
398                                   hash, ctdb_db->db_name));
399                 goto skip;
400         }
401
402         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
403
404         goto done;
405
406 skip:
407         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
408
409         recs->vdata->count.delete_list.skipped++;
410         recs->vdata->count.delete_list.left--;
411         talloc_free(dd);
412         dd = NULL;
413
414 done:
415         if (dd == NULL) {
416                 return 0;
417         }
418
419         return delete_marshall_traverse(param, data);
420 }
421
422 /**
423  * traverse function for the traversal of the delete_queue,
424  * the fast-path vacuuming list.
425  *
426  *  - If the record has been migrated off the node
427  *    or has been revived (filled with data) on the node,
428  *    then skip the record.
429  *
430  *  - If the current node is the record's lmaster and it is
431  *    a record that has never been migrated with data, then
432  *    delete the record from the local tdb.
433  *
434  *  - If the current node is the record's lmaster and it has
435  *    been migrated with data, then schedule it for the normal
436  *    vacuuming procedure (i.e. add it to the delete_list).
437  *
438  *  - If the current node is NOT the record's lmaster then
439  *    add it to the list of records that are to be sent to
440  *    the lmaster with the VACUUM_FETCH message.
441  */
442 static int delete_queue_traverse(void *param, void *data)
443 {
444         struct delete_record_data *dd =
445                 talloc_get_type(data, struct delete_record_data);
446         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
447         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
448         struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
449         int res;
450         struct ctdb_ltdb_header header;
451         uint32_t lmaster;
452         uint32_t hash = ctdb_hash(&(dd->key));
453
454         vdata->count.delete_queue.total++;
455
456         res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
457         if (res != 0) {
458                 vdata->count.delete_queue.error++;
459                 return 0;
460         }
461
462         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
463                                vacuum_record_parser, &header);
464         if (res != 0) {
465                 goto skipped;
466         }
467
468         if (header.dmaster != ctdb->pnn) {
469                 /* The record has been migrated off the node. Skip. */
470                 goto skipped;
471         }
472
473         if (header.rsn != dd->hdr.rsn) {
474                 /*
475                  * The record has been migrated off the node and back again.
476                  * But not requeued for deletion. Skip it.
477                  */
478                 goto skipped;
479         }
480
481         /*
482          * We are dmaster, and the record has no data, and it has
483          * not been migrated after it has been queued for deletion.
484          *
485          * At this stage, the record could still have been revived locally
486          * and last been written with empty data. This can only be
487          * fixed with the addition of an active or delete flag. (TODO)
488          */
489
490         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
491
492         if (lmaster != ctdb->pnn) {
493                 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
494
495                 if (res != 0) {
496                         DEBUG(DEBUG_ERR,
497                               (__location__ " Error adding record to list "
498                                "of records to send to lmaster.\n"));
499                         vdata->count.delete_queue.error++;
500                 } else {
501                         vdata->count.delete_queue.added_to_vacuum_fetch_list++;
502                 }
503                 goto done;
504         }
505
506         /* use header->flags or dd->hdr.flags ?? */
507         if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
508                 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
509
510                 if (res != 0) {
511                         DEBUG(DEBUG_ERR,
512                               (__location__ " Error adding record to list "
513                                "of records for deletion on lmaster.\n"));
514                         vdata->count.delete_queue.error++;
515                 } else {
516                         vdata->count.delete_queue.added_to_delete_list++;
517                 }
518         } else {
519                 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
520
521                 if (res != 0) {
522                         DEBUG(DEBUG_ERR,
523                               (__location__ " Error deleting record with key "
524                                "hash [0x%08x] from local data base db[%s].\n",
525                                hash, ctdb_db->db_name));
526                         vdata->count.delete_queue.error++;
527                         goto done;
528                 }
529
530                 DEBUG(DEBUG_DEBUG,
531                       (__location__ " Deleted record with key hash "
532                        "[0x%08x] from local data base db[%s].\n",
533                        hash, ctdb_db->db_name));
534                 vdata->count.delete_queue.deleted++;
535         }
536
537         goto done;
538
539 skipped:
540         vdata->count.delete_queue.skipped++;
541
542 done:
543         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
544
545         return 0;
546 }
547
548 /**
549  * Delete the records that we are lmaster and dmaster for and
550  * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
551  * control.
552  */
553 static int delete_record_traverse(void *param, void *data)
554 {
555         struct delete_record_data *dd =
556                 talloc_get_type(data, struct delete_record_data);
557         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
558         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
559         struct ctdb_context *ctdb = ctdb_db->ctdb;
560         int res;
561         struct ctdb_ltdb_header header;
562         uint32_t lmaster;
563         uint32_t hash = ctdb_hash(&(dd->key));
564
565         res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
566         if (res != 0) {
567                 DEBUG(DEBUG_ERR,
568                       (__location__ " Error getting chainlock on record with "
569                        "key hash [0x%08x] on database db[%s].\n",
570                        hash, ctdb_db->db_name));
571                 vdata->count.delete_list.local_error++;
572                 vdata->count.delete_list.left--;
573                 talloc_free(dd);
574                 return 0;
575         }
576
577         /*
578          * Verify that the record is still empty, its RSN has not
579          * changed and that we are still its lmaster and dmaster.
580          */
581
582         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
583                                vacuum_record_parser, &header);
584         if (res != 0) {
585                 goto skip;
586         }
587
588         if (header.flags & CTDB_REC_RO_FLAGS) {
589                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
590                                    "on database db[%s] has read-only flags. "
591                                    "skipping.\n",
592                                    hash, ctdb_db->db_name));
593                 goto skip;
594         }
595
596         if (header.dmaster != ctdb->pnn) {
597                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
598                                    "on database db[%s] has been migrated away. "
599                                    "skipping.\n",
600                                    hash, ctdb_db->db_name));
601                 goto skip;
602         }
603
604         if (header.rsn != dd->hdr.rsn + 1) {
605                 /*
606                  * The record has been migrated off the node and back again.
607                  * But not requeued for deletion. Skip it.
608                  * (Note that the first marshall traverse has bumped the RSN
609                  *  on disk.)
610                  */
611                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
612                                    "on database db[%s] seems to have been "
613                                    "migrated away and back again (with empty "
614                                    "data). skipping.\n",
615                                    hash, ctdb_db->db_name));
616                 goto skip;
617         }
618
619         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
620
621         if (lmaster != ctdb->pnn) {
622                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
623                                    "delete list (key hash [0x%08x], db[%s]). "
624                                    "Strange! skipping.\n",
625                                    hash, ctdb_db->db_name));
626                 goto skip;
627         }
628
629         res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
630
631         if (res != 0) {
632                 DEBUG(DEBUG_ERR,
633                       (__location__ " Error deleting record with key hash "
634                        "[0x%08x] from local data base db[%s].\n",
635                        hash, ctdb_db->db_name));
636                 vdata->count.delete_list.local_error++;
637                 goto done;
638         }
639
640         DEBUG(DEBUG_DEBUG,
641               (__location__ " Deleted record with key hash [0x%08x] from "
642                "local data base db[%s].\n", hash, ctdb_db->db_name));
643
644         vdata->count.delete_list.deleted++;
645         goto done;
646
647 skip:
648         vdata->count.delete_list.skipped++;
649
650 done:
651         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
652
653         talloc_free(dd);
654         vdata->count.delete_list.left--;
655
656         return 0;
657 }
658
659 /**
660  * Traverse the delete_queue.
661  * Records are either deleted directly or filled
662  * into the delete list or the vacuum fetch lists
663  * for further processing.
664  */
665 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
666                                       struct vacuum_data *vdata)
667 {
668         uint32_t sum;
669         int ret;
670
671         ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
672                                    delete_queue_traverse, vdata);
673
674         if (ret != 0) {
675                 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
676                       "the delete queue.\n"));
677         }
678
679         sum = vdata->count.delete_queue.deleted
680             + vdata->count.delete_queue.skipped
681             + vdata->count.delete_queue.error
682             + vdata->count.delete_queue.added_to_delete_list
683             + vdata->count.delete_queue.added_to_vacuum_fetch_list;
684
685         if (vdata->count.delete_queue.total != sum) {
686                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
687                       "counts for db[%s]: total[%u] != sum[%u]\n",
688                       ctdb_db->db_name,
689                       (unsigned)vdata->count.delete_queue.total,
690                       (unsigned)sum));
691         }
692
693         if (vdata->count.delete_queue.total > 0) {
694                 DEBUG(DEBUG_INFO,
695                       (__location__
696                        " fast vacuuming delete_queue traverse statistics: "
697                        "db[%s] "
698                        "total[%u] "
699                        "del[%u] "
700                        "skp[%u] "
701                        "err[%u] "
702                        "adl[%u] "
703                        "avf[%u]\n",
704                        ctdb_db->db_name,
705                        (unsigned)vdata->count.delete_queue.total,
706                        (unsigned)vdata->count.delete_queue.deleted,
707                        (unsigned)vdata->count.delete_queue.skipped,
708                        (unsigned)vdata->count.delete_queue.error,
709                        (unsigned)vdata->count.delete_queue.added_to_delete_list,
710                        (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
711         }
712
713         return;
714 }
715
716 /**
717  * read-only traverse of the database, looking for records that
718  * might be able to be vacuumed.
719  *
720  * This is not done each time but only every tunable
721  * VacuumFastPathCount times.
722  */
723 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
724                                     struct vacuum_data *vdata)
725 {
726         int ret;
727
728         ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
729         if (ret == -1 || vdata->traverse_error) {
730                 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
731                                   "'%s'\n", ctdb_db->db_name));
732                 return;
733         }
734
735         if (vdata->count.db_traverse.total > 0) {
736                 DEBUG(DEBUG_INFO,
737                       (__location__
738                        " full vacuuming db traverse statistics: "
739                        "db[%s] "
740                        "total[%u] "
741                        "skp[%u] "
742                        "err[%u] "
743                        "sched[%u]\n",
744                        ctdb_db->db_name,
745                        (unsigned)vdata->count.db_traverse.total,
746                        (unsigned)vdata->count.db_traverse.skipped,
747                        (unsigned)vdata->count.db_traverse.error,
748                        (unsigned)vdata->count.db_traverse.scheduled));
749         }
750
751         return;
752 }
753
754 /**
755  * Process the vacuum fetch lists:
756  * For records for which we are not the lmaster, tell the lmaster to
757  * fetch the record.
758  */
759 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
760                                             struct vacuum_data *vdata)
761 {
762         int i;
763         struct ctdb_context *ctdb = ctdb_db->ctdb;
764
765         for (i = 0; i < ctdb->num_nodes; i++) {
766                 TDB_DATA data;
767                 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
768
769                 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
770                         continue;
771                 }
772
773                 if (vfl->count == 0) {
774                         continue;
775                 }
776
777                 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
778                                    vfl->count, ctdb->nodes[i]->pnn,
779                                    ctdb_db->db_name));
780
781                 data = ctdb_marshall_finish(vfl);
782                 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
783                                              CTDB_SRVID_VACUUM_FETCH,
784                                              data) != 0)
785                 {
786                         DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
787                                           "fetch message to %u\n",
788                                           ctdb->nodes[i]->pnn));
789                 }
790         }
791
792         return;
793 }
794
795 /**
796  * Process the delete list:
797  *
798  * This is the last step of vacuuming that consistently deletes
799  * those records that have been migrated with data and can hence
800  * not be deleted when leaving a node.
801  *
802  * In this step, the lmaster does the final deletion of those empty
803  * records that it is also dmaster for. It has ususally received
804  * at least some of these records previously from the former dmasters
805  * with the vacuum fetch message.
806  *
807  * This last step is implemented as a 3-phase process to protect from
808  * races leading to data corruption:
809  *
810  *  1) Send the lmaster's copy to all other active nodes with the
811  *     RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
812  *  2) Send the records that could successfully be stored remotely
813  *     in step #1 to all active nodes with the TRY_DELETE_RECORDS
814  *     control. The remote notes delete their local copy.
815  *  3) The lmaster locally deletes its copies of all records that
816  *     could successfully be deleted remotely in step #2.
817  */
818 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
819                                      struct vacuum_data *vdata)
820 {
821         int ret, i;
822         struct ctdb_context *ctdb = ctdb_db->ctdb;
823         struct delete_records_list *recs;
824         TDB_DATA indata;
825         struct ctdb_node_map_old *nodemap;
826         uint32_t *active_nodes;
827         int num_active_nodes;
828         TALLOC_CTX *tmp_ctx;
829         uint32_t sum;
830
831         if (vdata->count.delete_list.total == 0) {
832                 return;
833         }
834
835         tmp_ctx = talloc_new(vdata);
836         if (tmp_ctx == NULL) {
837                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
838                 return;
839         }
840
841         vdata->count.delete_list.left = vdata->count.delete_list.total;
842
843         /*
844          * get the list of currently active nodes
845          */
846
847         ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
848                                    CTDB_CURRENT_NODE,
849                                    tmp_ctx,
850                                    &nodemap);
851         if (ret != 0) {
852                 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
853                 goto done;
854         }
855
856         active_nodes = list_of_active_nodes(ctdb, nodemap,
857                                             nodemap, /* talloc context */
858                                             false /* include self */);
859         /* yuck! ;-) */
860         num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
861
862         /*
863          * Now delete the records all active nodes in a three-phase process:
864          * 1) send all active remote nodes the current empty copy with this
865          *    node as DMASTER
866          * 2) if all nodes could store the new copy,
867          *    tell all the active remote nodes to delete all their copy
868          * 3) if all remote nodes deleted their record copy, delete it locally
869          */
870
871         /*
872          * Step 1:
873          * Send currently empty record copy to all active nodes for storing.
874          */
875
876         recs = talloc_zero(tmp_ctx, struct delete_records_list);
877         if (recs == NULL) {
878                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
879                 goto done;
880         }
881         recs->records = (struct ctdb_marshall_buffer *)
882                 talloc_zero_size(recs,
883                                  offsetof(struct ctdb_marshall_buffer, data));
884         if (recs->records == NULL) {
885                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
886                 goto done;
887         }
888         recs->records->db_id = ctdb_db->db_id;
889         recs->vdata = vdata;
890
891         /*
892          * traverse the tree of all records we want to delete and
893          * create a blob we can send to the other nodes.
894          *
895          * We call delete_marshall_traverse_first() to bump the
896          * records' RSNs in the database, to ensure we (as dmaster)
897          * keep the highest RSN of the records in the cluster.
898          */
899         ret = trbt_traversearray32(vdata->delete_list, 1,
900                                    delete_marshall_traverse_first, recs);
901         if (ret != 0) {
902                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
903                       "delete list for first marshalling.\n"));
904                 goto done;
905         }
906
907         indata = ctdb_marshall_finish(recs->records);
908
909         for (i = 0; i < num_active_nodes; i++) {
910                 struct ctdb_marshall_buffer *records;
911                 struct ctdb_rec_data_old *rec;
912                 int32_t res;
913                 TDB_DATA outdata;
914
915                 ret = ctdb_control(ctdb, active_nodes[i], 0,
916                                 CTDB_CONTROL_RECEIVE_RECORDS, 0,
917                                 indata, recs, &outdata, &res,
918                                 NULL, NULL);
919                 if (ret != 0 || res != 0) {
920                         DEBUG(DEBUG_ERR, ("Error storing record copies on "
921                                           "node %u: ret[%d] res[%d]\n",
922                                           active_nodes[i], ret, res));
923                         goto done;
924                 }
925
926                 /*
927                  * outdata contains the list of records coming back
928                  * from the node: These are the records that the
929                  * remote node could not store. We remove these from
930                  * the list to process further.
931                  */
932                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
933                 rec = (struct ctdb_rec_data_old *)&records->data[0];
934                 while (records->count-- > 1) {
935                         TDB_DATA reckey, recdata;
936                         struct ctdb_ltdb_header *rechdr;
937                         struct delete_record_data *dd;
938
939                         reckey.dptr = &rec->data[0];
940                         reckey.dsize = rec->keylen;
941                         recdata.dptr = &rec->data[reckey.dsize];
942                         recdata.dsize = rec->datalen;
943
944                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
945                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
946                                 goto done;
947                         }
948                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
949                         recdata.dptr += sizeof(*rechdr);
950                         recdata.dsize -= sizeof(*rechdr);
951
952                         dd = (struct delete_record_data *)trbt_lookup32(
953                                         vdata->delete_list,
954                                         ctdb_hash(&reckey));
955                         if (dd != NULL) {
956                                 /*
957                                  * The other node could not store the record
958                                  * copy and it is the first node that failed.
959                                  * So we should remove it from the tree and
960                                  * update statistics.
961                                  */
962                                 talloc_free(dd);
963                                 vdata->count.delete_list.remote_error++;
964                                 vdata->count.delete_list.left--;
965                         } else {
966                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
967                                       "find record with hash 0x%08x coming "
968                                       "back from RECEIVE_RECORDS "
969                                       "control in delete list.\n",
970                                       ctdb_hash(&reckey)));
971                                 vdata->count.delete_list.local_error++;
972                                 vdata->count.delete_list.left--;
973                         }
974
975                         rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
976                 }
977         }
978
979         if (vdata->count.delete_list.left == 0) {
980                 goto success;
981         }
982
983         /*
984          * Step 2:
985          * Send the remaining records to all active nodes for deletion.
986          *
987          * The lmaster's (i.e. our) copies of these records have been stored
988          * successfully on the other nodes.
989          */
990
991         /*
992          * Create a marshall blob from the remaining list of records to delete.
993          */
994
995         talloc_free(recs->records);
996
997         recs->records = (struct ctdb_marshall_buffer *)
998                 talloc_zero_size(recs,
999                                  offsetof(struct ctdb_marshall_buffer, data));
1000         if (recs->records == NULL) {
1001                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1002                 goto done;
1003         }
1004         recs->records->db_id = ctdb_db->db_id;
1005
1006         ret = trbt_traversearray32(vdata->delete_list, 1,
1007                                    delete_marshall_traverse, recs);
1008         if (ret != 0) {
1009                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1010                       "delete list for second marshalling.\n"));
1011                 goto done;
1012         }
1013
1014         indata = ctdb_marshall_finish(recs->records);
1015
1016         for (i = 0; i < num_active_nodes; i++) {
1017                 struct ctdb_marshall_buffer *records;
1018                 struct ctdb_rec_data_old *rec;
1019                 int32_t res;
1020                 TDB_DATA outdata;
1021
1022                 ret = ctdb_control(ctdb, active_nodes[i], 0,
1023                                 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1024                                 indata, recs, &outdata, &res,
1025                                 NULL, NULL);
1026                 if (ret != 0 || res != 0) {
1027                         DEBUG(DEBUG_ERR, ("Failed to delete records on "
1028                                           "node %u: ret[%d] res[%d]\n",
1029                                           active_nodes[i], ret, res));
1030                         goto done;
1031                 }
1032
1033                 /*
1034                  * outdata contains the list of records coming back
1035                  * from the node: These are the records that the
1036                  * remote node could not delete. We remove these from
1037                  * the list to delete locally.
1038                  */
1039                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1040                 rec = (struct ctdb_rec_data_old *)&records->data[0];
1041                 while (records->count-- > 1) {
1042                         TDB_DATA reckey, recdata;
1043                         struct ctdb_ltdb_header *rechdr;
1044                         struct delete_record_data *dd;
1045
1046                         reckey.dptr = &rec->data[0];
1047                         reckey.dsize = rec->keylen;
1048                         recdata.dptr = &rec->data[reckey.dsize];
1049                         recdata.dsize = rec->datalen;
1050
1051                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1052                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1053                                 goto done;
1054                         }
1055                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1056                         recdata.dptr += sizeof(*rechdr);
1057                         recdata.dsize -= sizeof(*rechdr);
1058
1059                         dd = (struct delete_record_data *)trbt_lookup32(
1060                                         vdata->delete_list,
1061                                         ctdb_hash(&reckey));
1062                         if (dd != NULL) {
1063                                 /*
1064                                  * The other node could not delete the
1065                                  * record and it is the first node that
1066                                  * failed. So we should remove it from
1067                                  * the tree and update statistics.
1068                                  */
1069                                 talloc_free(dd);
1070                                 vdata->count.delete_list.remote_error++;
1071                                 vdata->count.delete_list.left--;
1072                         } else {
1073                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
1074                                       "find record with hash 0x%08x coming "
1075                                       "back from TRY_DELETE_RECORDS "
1076                                       "control in delete list.\n",
1077                                       ctdb_hash(&reckey)));
1078                                 vdata->count.delete_list.local_error++;
1079                                 vdata->count.delete_list.left--;
1080                         }
1081
1082                         rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
1083                 }
1084         }
1085
1086         if (vdata->count.delete_list.left == 0) {
1087                 goto success;
1088         }
1089
1090         /*
1091          * Step 3:
1092          * Delete the remaining records locally.
1093          *
1094          * These records have successfully been deleted on all
1095          * active remote nodes.
1096          */
1097
1098         ret = trbt_traversearray32(vdata->delete_list, 1,
1099                                    delete_record_traverse, vdata);
1100         if (ret != 0) {
1101                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1102                       "delete list for deletion.\n"));
1103         }
1104
1105 success:
1106
1107         if (vdata->count.delete_list.left != 0) {
1108                 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1109                       "there are %u records left for deletion after "
1110                       "processing delete list\n",
1111                       ctdb_db->db_name,
1112                       (unsigned)vdata->count.delete_list.left));
1113         }
1114
1115         sum = vdata->count.delete_list.deleted
1116             + vdata->count.delete_list.skipped
1117             + vdata->count.delete_list.remote_error
1118             + vdata->count.delete_list.local_error
1119             + vdata->count.delete_list.left;
1120
1121         if (vdata->count.delete_list.total != sum) {
1122                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1123                       "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1124                       ctdb_db->db_name,
1125                       (unsigned)vdata->count.delete_list.total,
1126                       (unsigned)sum));
1127         }
1128
1129         if (vdata->count.delete_list.total > 0) {
1130                 DEBUG(DEBUG_INFO,
1131                       (__location__
1132                        " vacuum delete list statistics: "
1133                        "db[%s] "
1134                        "total[%u] "
1135                        "del[%u] "
1136                        "skip[%u] "
1137                        "rem.err[%u] "
1138                        "loc.err[%u] "
1139                        "left[%u]\n",
1140                        ctdb_db->db_name,
1141                        (unsigned)vdata->count.delete_list.total,
1142                        (unsigned)vdata->count.delete_list.deleted,
1143                        (unsigned)vdata->count.delete_list.skipped,
1144                        (unsigned)vdata->count.delete_list.remote_error,
1145                        (unsigned)vdata->count.delete_list.local_error,
1146                        (unsigned)vdata->count.delete_list.left));
1147         }
1148
1149 done:
1150         talloc_free(tmp_ctx);
1151
1152         return;
1153 }
1154
1155 /**
1156  * initialize the vacuum_data
1157  */
1158 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
1159                                         struct ctdb_db_context *ctdb_db,
1160                                         TALLOC_CTX *mem_ctx)
1161 {
1162         int i;
1163         struct ctdb_context *ctdb = ctdb_db->ctdb;
1164         struct vacuum_data *vdata;
1165
1166         vdata = talloc_zero(mem_ctx, struct vacuum_data);
1167         if (vdata == NULL) {
1168                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1169                 return NULL;
1170         }
1171
1172         vdata->ctdb = ctdb_db->ctdb;
1173         vdata->ctdb_db = ctdb_db;
1174         vdata->delete_list = trbt_create(vdata, 0);
1175         if (vdata->delete_list == NULL) {
1176                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1177                 goto fail;
1178         }
1179
1180         vdata->start = timeval_current();
1181
1182         vdata->count.delete_queue.added_to_delete_list = 0;
1183         vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
1184         vdata->count.delete_queue.deleted = 0;
1185         vdata->count.delete_queue.skipped = 0;
1186         vdata->count.delete_queue.error = 0;
1187         vdata->count.delete_queue.total = 0;
1188         vdata->count.db_traverse.scheduled = 0;
1189         vdata->count.db_traverse.skipped = 0;
1190         vdata->count.db_traverse.error = 0;
1191         vdata->count.db_traverse.total = 0;
1192         vdata->count.delete_list.total = 0;
1193         vdata->count.delete_list.left = 0;
1194         vdata->count.delete_list.remote_error = 0;
1195         vdata->count.delete_list.local_error = 0;
1196         vdata->count.delete_list.skipped = 0;
1197         vdata->count.delete_list.deleted = 0;
1198
1199         /* the list needs to be of length num_nodes */
1200         vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1201                                                 struct ctdb_marshall_buffer *,
1202                                                 ctdb->num_nodes);
1203         if (vdata->vacuum_fetch_list == NULL) {
1204                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1205                 goto fail;
1206         }
1207         for (i = 0; i < ctdb->num_nodes; i++) {
1208                 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1209                         talloc_zero_size(vdata->vacuum_fetch_list,
1210                                          offsetof(struct ctdb_marshall_buffer, data));
1211                 if (vdata->vacuum_fetch_list[i] == NULL) {
1212                         DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1213                         talloc_free(vdata);
1214                         return NULL;
1215                 }
1216                 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1217         }
1218
1219         return vdata;
1220
1221 fail:
1222         talloc_free(vdata);
1223         return NULL;
1224 }
1225
1226 /**
1227  * Vacuum a DB:
1228  *  - Always do the fast vacuuming run, which traverses
1229  *    the in-memory delete queue: these records have been
1230  *    scheduled for deletion.
1231  *  - Only if explicitly requested, the database is traversed
1232  *    in order to use the traditional heuristics on empty records
1233  *    to trigger deletion.
1234  *    This is done only every VacuumFastPathCount'th vacuuming run.
1235  *
1236  * The traverse runs fill two lists:
1237  *
1238  * - The delete_list:
1239  *   This is the list of empty records the current
1240  *   node is lmaster and dmaster for. These records are later
1241  *   deleted first on other nodes and then locally.
1242  *
1243  *   The fast vacuuming run has a short cut for those records
1244  *   that have never been migrated with data: these records
1245  *   are immediately deleted locally, since they have left
1246  *   no trace on other nodes.
1247  *
1248  * - The vacuum_fetch lists
1249  *   (one for each other lmaster node):
1250  *   The records in this list are sent for deletion to
1251  *   their lmaster in a bulk VACUUM_FETCH message.
1252  *
1253  *   The lmaster then migrates all these records to itelf
1254  *   so that they can be vacuumed there.
1255  *
1256  * This executes in the child context.
1257  */
1258 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1259                           bool full_vacuum_run)
1260 {
1261         struct ctdb_context *ctdb = ctdb_db->ctdb;
1262         int ret, pnn;
1263         struct vacuum_data *vdata;
1264         TALLOC_CTX *tmp_ctx;
1265
1266         DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1267                            "%s db_id[0x%08x]\n",
1268                            full_vacuum_run ? "full" : "fast",
1269                            ctdb_db->db_name, ctdb_db->db_id));
1270
1271         ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1272         if (ret != 0) {
1273                 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1274                 return ret;
1275         }
1276
1277         pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1278         if (pnn == -1) {
1279                 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1280                 return -1;
1281         }
1282
1283         ctdb->pnn = pnn;
1284
1285         tmp_ctx = talloc_new(ctdb_db);
1286         if (tmp_ctx == NULL) {
1287                 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1288                 return -1;
1289         }
1290
1291         vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1292         if (vdata == NULL) {
1293                 talloc_free(tmp_ctx);
1294                 return -1;
1295         }
1296
1297         if (full_vacuum_run) {
1298                 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1299         }
1300
1301         ctdb_process_delete_queue(ctdb_db, vdata);
1302
1303         ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1304
1305         ctdb_process_delete_list(ctdb_db, vdata);
1306
1307         talloc_free(tmp_ctx);
1308
1309         /* this ensures we run our event queue */
1310         ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1311
1312         return 0;
1313 }
1314
1315 /*
1316  * repack and vaccum a db
1317  * called from the child context
1318  */
1319 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1320                                      bool full_vacuum_run)
1321 {
1322         uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1323         const char *name = ctdb_db->db_name;
1324         int freelist_size = 0;
1325         int ret;
1326
1327         if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1328                 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1329         }
1330
1331         freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1332         if (freelist_size == -1) {
1333                 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1334                 return -1;
1335         }
1336
1337         /*
1338          * decide if a repack is necessary
1339          */
1340         if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1341         {
1342                 return 0;
1343         }
1344
1345         DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1346                            name, freelist_size));
1347
1348         ret = tdb_repack(ctdb_db->ltdb->tdb);
1349         if (ret != 0) {
1350                 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1351                 return -1;
1352         }
1353
1354         return 0;
1355 }
1356
1357 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1358 {
1359         uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1360
1361         return interval;
1362 }
1363
1364 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1365 {
1366         double l = timeval_elapsed(&child_ctx->start_time);
1367         struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1368         struct ctdb_context *ctdb = ctdb_db->ctdb;
1369
1370         CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1371         DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1372
1373         if (child_ctx->child_pid != -1) {
1374                 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1375         } else {
1376                 /* Bump the number of successful fast-path runs. */
1377                 child_ctx->vacuum_handle->fast_path_count++;
1378         }
1379
1380         DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1381
1382         tevent_add_timer(ctdb->ev, child_ctx->vacuum_handle,
1383                          timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1384                          ctdb_vacuum_event, child_ctx->vacuum_handle);
1385
1386         return 0;
1387 }
1388
1389 /*
1390  * this event is generated when a vacuum child process times out
1391  */
1392 static void vacuum_child_timeout(struct tevent_context *ev,
1393                                  struct tevent_timer *te,
1394                                  struct timeval t, void *private_data)
1395 {
1396         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1397
1398         DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1399
1400         child_ctx->status = VACUUM_TIMEOUT;
1401
1402         talloc_free(child_ctx);
1403 }
1404
1405
1406 /*
1407  * this event is generated when a vacuum child process has completed
1408  */
1409 static void vacuum_child_handler(struct tevent_context *ev,
1410                                  struct tevent_fd *fde,
1411                                  uint16_t flags, void *private_data)
1412 {
1413         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1414         char c = 0;
1415         int ret;
1416
1417         DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1418         child_ctx->child_pid = -1;
1419
1420         ret = sys_read(child_ctx->fd[0], &c, 1);
1421         if (ret != 1 || c != 0) {
1422                 child_ctx->status = VACUUM_ERROR;
1423                 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1424         } else {
1425                 child_ctx->status = VACUUM_OK;
1426         }
1427
1428         talloc_free(child_ctx);
1429 }
1430
1431 /*
1432  * this event is called every time we need to start a new vacuum process
1433  */
1434 static void ctdb_vacuum_event(struct tevent_context *ev,
1435                               struct tevent_timer *te,
1436                               struct timeval t, void *private_data)
1437 {
1438         struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1439         struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1440         struct ctdb_context *ctdb = ctdb_db->ctdb;
1441         struct ctdb_vacuum_child_context *child_ctx;
1442         struct tevent_fd *fde;
1443         int ret;
1444
1445         /* we dont vacuum if we are in recovery mode, or db frozen */
1446         if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1447             ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1448                 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1449                                    ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1450                                    : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1451                                    ? "freeze pending"
1452                                    : "frozen"));
1453                 tevent_add_timer(ctdb->ev, vacuum_handle,
1454                                  timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1455                                  ctdb_vacuum_event, vacuum_handle);
1456                 return;
1457         }
1458
1459         /* Do not allow multiple vacuuming child processes to be active at the
1460          * same time.  If there is vacuuming child process active, delay
1461          * new vacuuming event to stagger vacuuming events.
1462          */
1463         if (ctdb->vacuumers != NULL) {
1464                 tevent_add_timer(ctdb->ev, vacuum_handle,
1465                                  timeval_current_ofs(0, 500*1000),
1466                                  ctdb_vacuum_event, vacuum_handle);
1467                 return;
1468         }
1469
1470         child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1471         if (child_ctx == NULL) {
1472                 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1473                 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1474         }
1475
1476
1477         ret = pipe(child_ctx->fd);
1478         if (ret != 0) {
1479                 talloc_free(child_ctx);
1480                 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1481                 tevent_add_timer(ctdb->ev, vacuum_handle,
1482                                  timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1483                                  ctdb_vacuum_event, vacuum_handle);
1484                 return;
1485         }
1486
1487         if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1488                 vacuum_handle->fast_path_count = 0;
1489         }
1490
1491         child_ctx->child_pid = ctdb_fork(ctdb);
1492         if (child_ctx->child_pid == (pid_t)-1) {
1493                 close(child_ctx->fd[0]);
1494                 close(child_ctx->fd[1]);
1495                 talloc_free(child_ctx);
1496                 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1497                 tevent_add_timer(ctdb->ev, vacuum_handle,
1498                                  timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1499                                  ctdb_vacuum_event, vacuum_handle);
1500                 return;
1501         }
1502
1503
1504         if (child_ctx->child_pid == 0) {
1505                 char cc = 0;
1506                 bool full_vacuum_run = false;
1507                 close(child_ctx->fd[0]);
1508
1509                 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1510                 ctdb_set_process_name("ctdb_vacuum");
1511                 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1512                         DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1513                         _exit(1);
1514                 }
1515
1516                 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1517                     (vacuum_handle->fast_path_count == 0))
1518                 {
1519                         full_vacuum_run = true;
1520                 }
1521                 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1522
1523                 sys_write(child_ctx->fd[1], &cc, 1);
1524                 _exit(0);
1525         }
1526
1527         set_close_on_exec(child_ctx->fd[0]);
1528         close(child_ctx->fd[1]);
1529
1530         child_ctx->status = VACUUM_RUNNING;
1531         child_ctx->start_time = timeval_current();
1532
1533         DLIST_ADD(ctdb->vacuumers, child_ctx);
1534         talloc_set_destructor(child_ctx, vacuum_child_destructor);
1535
1536         /*
1537          * Clear the fastpath vacuuming list in the parent.
1538          */
1539         talloc_free(ctdb_db->delete_queue);
1540         ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1541         if (ctdb_db->delete_queue == NULL) {
1542                 /* fatal here? ... */
1543                 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1544                                  "in parent context. Shutting down\n");
1545         }
1546
1547         tevent_add_timer(ctdb->ev, child_ctx,
1548                          timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1549                          vacuum_child_timeout, child_ctx);
1550
1551         DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1552
1553         fde = tevent_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1554                             TEVENT_FD_READ, vacuum_child_handler, child_ctx);
1555         tevent_fd_set_auto_close(fde);
1556
1557         vacuum_handle->child_ctx = child_ctx;
1558         child_ctx->vacuum_handle = vacuum_handle;
1559 }
1560
1561 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1562 {
1563         /* Simply free them all. */
1564         while (ctdb->vacuumers) {
1565                 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1566                            ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1567                            (int)ctdb->vacuumers->child_pid));
1568                 /* vacuum_child_destructor kills it, removes from list */
1569                 talloc_free(ctdb->vacuumers);
1570         }
1571 }
1572
1573 /* this function initializes the vacuuming context for a database
1574  * starts the vacuuming events
1575  */
1576 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1577 {
1578         if (ctdb_db->persistent != 0) {
1579                 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1580                 return 0;
1581         }
1582
1583         ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1584         CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1585
1586         ctdb_db->vacuum_handle->ctdb_db         = ctdb_db;
1587         ctdb_db->vacuum_handle->fast_path_count = 0;
1588
1589         tevent_add_timer(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1590                          timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1591                          ctdb_vacuum_event, ctdb_db->vacuum_handle);
1592
1593         return 0;
1594 }
1595
1596 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1597                                             const struct ctdb_ltdb_header *hdr,
1598                                             const TDB_DATA key)
1599 {
1600         struct delete_record_data *kd;
1601         uint32_t hash;
1602
1603         hash = (uint32_t)ctdb_hash(&key);
1604
1605         DEBUG(DEBUG_DEBUG, (__location__
1606                             " remove_record_from_delete_queue: "
1607                             "db[%s] "
1608                             "db_id[0x%08x] "
1609                             "key_hash[0x%08x] "
1610                             "lmaster[%u] "
1611                             "migrated_with_data[%s]\n",
1612                              ctdb_db->db_name, ctdb_db->db_id,
1613                              hash,
1614                              ctdb_lmaster(ctdb_db->ctdb, &key),
1615                              hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1616
1617         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1618         if (kd == NULL) {
1619                 DEBUG(DEBUG_DEBUG, (__location__
1620                                     " remove_record_from_delete_queue: "
1621                                     "record not in queue (hash[0x%08x])\n.",
1622                                     hash));
1623                 return;
1624         }
1625
1626         if ((kd->key.dsize != key.dsize) ||
1627             (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1628         {
1629                 DEBUG(DEBUG_DEBUG, (__location__
1630                                     " remove_record_from_delete_queue: "
1631                                     "hash collision for key with hash[0x%08x] "
1632                                     "in db[%s] - skipping\n",
1633                                     hash, ctdb_db->db_name));
1634                 return;
1635         }
1636
1637         DEBUG(DEBUG_DEBUG, (__location__
1638                             " remove_record_from_delete_queue: "
1639                             "removing key with hash[0x%08x]\n",
1640                              hash));
1641
1642         talloc_free(kd);
1643
1644         return;
1645 }
1646
1647 /**
1648  * Insert a record into the ctdb_db context's delete queue,
1649  * handling hash collisions.
1650  */
1651 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1652                                            const struct ctdb_ltdb_header *hdr,
1653                                            TDB_DATA key)
1654 {
1655         struct delete_record_data *kd;
1656         uint32_t hash;
1657         int ret;
1658
1659         hash = (uint32_t)ctdb_hash(&key);
1660
1661         DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1662                            "db_id[0x%08x] "
1663                            "key_hash[0x%08x] "
1664                            "lmaster[%u] "
1665                            "migrated_with_data[%s]\n",
1666                             ctdb_db->db_name, ctdb_db->db_id,
1667                             hash,
1668                             ctdb_lmaster(ctdb_db->ctdb, &key),
1669                             hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1670
1671         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1672         if (kd != NULL) {
1673                 if ((kd->key.dsize != key.dsize) ||
1674                     (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1675                 {
1676                         DEBUG(DEBUG_INFO,
1677                               (__location__ " schedule for deletion: "
1678                                "hash collision for key hash [0x%08x]. "
1679                                "Skipping the record.\n", hash));
1680                         return 0;
1681                 } else {
1682                         DEBUG(DEBUG_DEBUG,
1683                               (__location__ " schedule for deletion: "
1684                                "updating entry for key with hash [0x%08x].\n",
1685                                hash));
1686                 }
1687         }
1688
1689         ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1690                                                   ctdb_db->delete_queue,
1691                                                   hdr, key);
1692         if (ret != 0) {
1693                 DEBUG(DEBUG_INFO,
1694                       (__location__ " schedule for deletion: error "
1695                        "inserting key with hash [0x%08x] into delete queue\n",
1696                        hash));
1697                 return -1;
1698         }
1699
1700         return 0;
1701 }
1702
1703 /**
1704  * Schedule a record for deletetion.
1705  * Called from the parent context.
1706  */
1707 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1708                                            TDB_DATA indata)
1709 {
1710         struct ctdb_control_schedule_for_deletion *dd;
1711         struct ctdb_db_context *ctdb_db;
1712         int ret;
1713         TDB_DATA key;
1714
1715         dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1716
1717         ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1718         if (ctdb_db == NULL) {
1719                 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1720                                   dd->db_id));
1721                 return -1;
1722         }
1723
1724         key.dsize = dd->keylen;
1725         key.dptr = dd->key;
1726
1727         ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1728
1729         return ret;
1730 }
1731
1732 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1733                                          const struct ctdb_ltdb_header *hdr,
1734                                          TDB_DATA key)
1735 {
1736         int ret;
1737         struct ctdb_control_schedule_for_deletion *dd;
1738         TDB_DATA indata;
1739         int32_t status;
1740
1741         if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1742                 /* main daemon - directly queue */
1743                 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1744
1745                 return ret;
1746         }
1747
1748         /* if we dont have a connection to the daemon we can not send
1749            a control. For example sometimes from update_record control child
1750            process.
1751         */
1752         if (!ctdb_db->ctdb->can_send_controls) {
1753                 return -1;
1754         }
1755
1756
1757         /* child process: send the main daemon a control */
1758         indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1759         indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1760         if (indata.dptr == NULL) {
1761                 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1762                 return -1;
1763         }
1764         dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1765         dd->db_id = ctdb_db->db_id;
1766         dd->hdr = *hdr;
1767         dd->keylen = key.dsize;
1768         memcpy(dd->key, key.dptr, key.dsize);
1769
1770         ret = ctdb_control(ctdb_db->ctdb,
1771                            CTDB_CURRENT_NODE,
1772                            ctdb_db->db_id,
1773                            CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1774                            CTDB_CTRL_FLAG_NOREPLY, /* flags */
1775                            indata,
1776                            NULL, /* mem_ctx */
1777                            NULL, /* outdata */
1778                            &status,
1779                            NULL, /* timeout : NULL == wait forever */
1780                            NULL); /* error message */
1781
1782         talloc_free(indata.dptr);
1783
1784         if (ret != 0 || status != 0) {
1785                 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1786                                   "SCHEDULE_FOR_DELETION "
1787                                   "control.\n"));
1788                 if (status != 0) {
1789                         ret = -1;
1790                 }
1791         }
1792
1793         return ret;
1794 }
1795
1796 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1797                                          const struct ctdb_ltdb_header *hdr,
1798                                          const TDB_DATA key)
1799 {
1800         if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1801                 /*
1802                  * Only remove the record from the delete queue if called
1803                  * in the main daemon.
1804                  */
1805                 return;
1806         }
1807
1808         remove_record_from_delete_queue(ctdb_db, hdr, key);
1809
1810         return;
1811 }