39eed0b74e5d7b1d69a5f7b2402ca745efe06e57
[vlendec/samba-autobuild/.git] / ctdb / server / ctdb_vacuum.c
1 /*
2    ctdb vacuuming events
3
4    Copyright (C) Ronnie Sahlberg  2009
5    Copyright (C) Michael Adam 2010-2013
6    Copyright (C) Stefan Metzmacher 2010-2011
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "lib/tdb_wrap/tdb_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
32 #include "common/system.h"
33
34 #define TIMELIMIT() timeval_current_ofs(10, 0)
35
36 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37
38 struct ctdb_vacuum_child_context {
39         struct ctdb_vacuum_child_context *next, *prev;
40         struct ctdb_vacuum_handle *vacuum_handle;
41         /* fd child writes status to */
42         int fd[2];
43         pid_t child_pid;
44         enum vacuum_child_status status;
45         struct timeval start_time;
46 };
47
48 struct ctdb_vacuum_handle {
49         struct ctdb_db_context *ctdb_db;
50         struct ctdb_vacuum_child_context *child_ctx;
51         uint32_t fast_path_count;
52 };
53
54
55 /*  a list of records to possibly delete */
56 struct vacuum_data {
57         struct ctdb_context *ctdb;
58         struct ctdb_db_context *ctdb_db;
59         struct tdb_context *dest_db;
60         trbt_tree_t *delete_list;
61         struct ctdb_marshall_buffer **vacuum_fetch_list;
62         struct timeval start;
63         bool traverse_error;
64         bool vacuum;
65         struct {
66                 struct {
67                         uint32_t added_to_vacuum_fetch_list;
68                         uint32_t added_to_delete_list;
69                         uint32_t deleted;
70                         uint32_t skipped;
71                         uint32_t error;
72                         uint32_t total;
73                 } delete_queue;
74                 struct {
75                         uint32_t scheduled;
76                         uint32_t skipped;
77                         uint32_t error;
78                         uint32_t total;
79                 } db_traverse;
80                 struct {
81                         uint32_t total;
82                         uint32_t remote_error;
83                         uint32_t local_error;
84                         uint32_t deleted;
85                         uint32_t skipped;
86                         uint32_t left;
87                 } delete_list;
88                 struct {
89                         uint32_t vacuumed;
90                         uint32_t copied;
91                 } repack;
92         } count;
93 };
94
95 /* this structure contains the information for one record to be deleted */
96 struct delete_record_data {
97         struct ctdb_context *ctdb;
98         struct ctdb_db_context *ctdb_db;
99         struct ctdb_ltdb_header hdr;
100         TDB_DATA key;
101         uint8_t keydata[1];
102 };
103
104 struct delete_records_list {
105         struct ctdb_marshall_buffer *records;
106         struct vacuum_data *vdata;
107 };
108
109 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
110                                            const struct ctdb_ltdb_header *hdr,
111                                            TDB_DATA key);
112
113 /**
114  * Store key and header in a tree, indexed by the key hash.
115  */
116 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
117                                                struct ctdb_db_context *ctdb_db,
118                                                trbt_tree_t *tree,
119                                                const struct ctdb_ltdb_header *hdr,
120                                                TDB_DATA key)
121 {
122         struct delete_record_data *dd;
123         uint32_t hash;
124         size_t len;
125
126         len = offsetof(struct delete_record_data, keydata) + key.dsize;
127
128         dd = (struct delete_record_data *)talloc_size(tree, len);
129         if (dd == NULL) {
130                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
131                 return -1;
132         }
133         talloc_set_name_const(dd, "struct delete_record_data");
134
135         dd->ctdb      = ctdb;
136         dd->ctdb_db   = ctdb_db;
137         dd->key.dsize = key.dsize;
138         dd->key.dptr  = dd->keydata;
139         memcpy(dd->keydata, key.dptr, key.dsize);
140
141         dd->hdr = *hdr;
142
143         hash = ctdb_hash(&key);
144
145         trbt_insert32(tree, hash, dd);
146
147         return 0;
148 }
149
150 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
151                                      struct ctdb_ltdb_header *hdr)
152 {
153         struct ctdb_context *ctdb = vdata->ctdb;
154         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
155         uint32_t hash;
156         int ret;
157
158         hash = ctdb_hash(&key);
159
160         if (trbt_lookup32(vdata->delete_list, hash)) {
161                 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
162                 return 0;
163         }
164
165         ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
166                                                   vdata->delete_list,
167                                                   hdr, key);
168         if (ret != 0) {
169                 return -1;
170         }
171
172         vdata->count.delete_list.total++;
173
174         return 0;
175 }
176
177 /**
178  * Add a record to the list of records to be sent
179  * to their lmaster with VACUUM_FETCH.
180  */
181 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
182                                            TDB_DATA key)
183 {
184         struct ctdb_context *ctdb = vdata->ctdb;
185         uint32_t lmaster;
186         struct ctdb_marshall_buffer *vfl;
187
188         lmaster = ctdb_lmaster(ctdb, &key);
189
190         vfl = vdata->vacuum_fetch_list[lmaster];
191
192         vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
193                                 key, NULL, tdb_null);
194         if (vfl == NULL) {
195                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
196                 vdata->traverse_error = true;
197                 return -1;
198         }
199
200         vdata->vacuum_fetch_list[lmaster] = vfl;
201
202         return 0;
203 }
204
205
206 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
207                               struct timeval t, void *private_data);
208
209 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
210 {
211         struct ctdb_ltdb_header *header =
212                 (struct ctdb_ltdb_header *)private_data;
213
214         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
215                 return -1;
216         }
217
218         *header = *(struct ctdb_ltdb_header *)data.dptr;
219
220         return 0;
221 }
222
223 /*
224  * traverse function for gathering the records that can be deleted
225  */
226 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
227                            void *private_data)
228 {
229         struct vacuum_data *vdata = talloc_get_type(private_data,
230                                                     struct vacuum_data);
231         struct ctdb_context *ctdb = vdata->ctdb;
232         struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
233         uint32_t lmaster;
234         struct ctdb_ltdb_header *hdr;
235         int res = 0;
236
237         vdata->count.db_traverse.total++;
238
239         lmaster = ctdb_lmaster(ctdb, &key);
240         if (lmaster >= ctdb->num_nodes) {
241                 vdata->count.db_traverse.error++;
242                 DEBUG(DEBUG_CRIT, (__location__
243                                    " lmaster[%u] >= ctdb->num_nodes[%u] for key"
244                                    " with hash[%u]!\n",
245                                    (unsigned)lmaster,
246                                    (unsigned)ctdb->num_nodes,
247                                    (unsigned)ctdb_hash(&key)));
248                 return -1;
249         }
250
251         if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
252                 /* it is not a deleted record */
253                 vdata->count.db_traverse.skipped++;
254                 return 0;
255         }
256
257         hdr = (struct ctdb_ltdb_header *)data.dptr;
258
259         if (hdr->dmaster != ctdb->pnn) {
260                 vdata->count.db_traverse.skipped++;
261                 return 0;
262         }
263
264         /*
265          * Add the record to this process's delete_queue for processing
266          * in the subsequent traverse in the fast vacuum run.
267          */
268         res = insert_record_into_delete_queue(ctdb_db, hdr, key);
269         if (res != 0) {
270                 vdata->count.db_traverse.error++;
271         } else {
272                 vdata->count.db_traverse.scheduled++;
273         }
274
275         return 0;
276 }
277
278 /*
279  * traverse the tree of records to delete and marshall them into
280  * a blob
281  */
282 static int delete_marshall_traverse(void *param, void *data)
283 {
284         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
285         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
286         struct ctdb_marshall_buffer *m;
287
288         m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
289                               recs->records->db_id,
290                               dd->key, &dd->hdr, tdb_null);
291         if (m == NULL) {
292                 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
293                 return -1;
294         }
295
296         recs->records = m;
297         return 0;
298 }
299
300 /**
301  * Variant of delete_marshall_traverse() that bumps the
302  * RSN of each traversed record in the database.
303  *
304  * This is needed to ensure that when rolling out our
305  * empty record copy before remote deletion, we as the
306  * record's dmaster keep a higher RSN than the non-dmaster
307  * nodes. This is needed to prevent old copies from
308  * resurrection in recoveries.
309  */
310 static int delete_marshall_traverse_first(void *param, void *data)
311 {
312         struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
313         struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
314         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
315         struct ctdb_context *ctdb = ctdb_db->ctdb;
316         struct ctdb_ltdb_header header;
317         uint32_t lmaster;
318         uint32_t hash = ctdb_hash(&(dd->key));
319         int res;
320
321         res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
322         if (res != 0) {
323                 recs->vdata->count.delete_list.skipped++;
324                 recs->vdata->count.delete_list.left--;
325                 talloc_free(dd);
326                 return 0;
327         }
328
329         /*
330          * Verify that the record is still empty, its RSN has not
331          * changed and that we are still its lmaster and dmaster.
332          */
333
334         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
335                                vacuum_record_parser, &header);
336         if (res != 0) {
337                 goto skip;
338         }
339
340         if (header.flags & CTDB_REC_RO_FLAGS) {
341                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
342                                    "on database db[%s] has read-only flags. "
343                                    "skipping.\n",
344                                    hash, ctdb_db->db_name));
345                 goto skip;
346         }
347
348         if (header.dmaster != ctdb->pnn) {
349                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
350                                    "on database db[%s] has been migrated away. "
351                                    "skipping.\n",
352                                    hash, ctdb_db->db_name));
353                 goto skip;
354         }
355
356         if (header.rsn != dd->hdr.rsn) {
357                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
358                                    "on database db[%s] seems to have been "
359                                    "migrated away and back again (with empty "
360                                    "data). skipping.\n",
361                                    hash, ctdb_db->db_name));
362                 goto skip;
363         }
364
365         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
366
367         if (lmaster != ctdb->pnn) {
368                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
369                                    "delete list (key hash [0x%08x], db[%s]). "
370                                    "Strange! skipping.\n",
371                                    hash, ctdb_db->db_name));
372                 goto skip;
373         }
374
375         /*
376          * Increment the record's RSN to ensure the dmaster (i.e. the current
377          * node) has the highest RSN of the record in the cluster.
378          * This is to prevent old record copies from resurrecting in recoveries
379          * if something should fail during the deletion process.
380          * Note that ctdb_ltdb_store_server() increments the RSN if called
381          * on the record's dmaster.
382          */
383
384         res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
385         if (res != 0) {
386                 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
387                                   "key hash [0x%08x] on database db[%s].\n",
388                                   hash, ctdb_db->db_name));
389                 goto skip;
390         }
391
392         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
393
394         goto done;
395
396 skip:
397         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
398
399         recs->vdata->count.delete_list.skipped++;
400         recs->vdata->count.delete_list.left--;
401         talloc_free(dd);
402         dd = NULL;
403
404 done:
405         if (dd == NULL) {
406                 return 0;
407         }
408
409         return delete_marshall_traverse(param, data);
410 }
411
412 /**
413  * traverse function for the traversal of the delete_queue,
414  * the fast-path vacuuming list.
415  *
416  *  - If the record has been migrated off the node
417  *    or has been revived (filled with data) on the node,
418  *    then skip the record.
419  *
420  *  - If the current node is the record's lmaster and it is
421  *    a record that has never been migrated with data, then
422  *    delete the record from the local tdb.
423  *
424  *  - If the current node is the record's lmaster and it has
425  *    been migrated with data, then schedule it for the normal
426  *    vacuuming procedure (i.e. add it to the delete_list).
427  *
428  *  - If the current node is NOT the record's lmaster then
429  *    add it to the list of records that are to be sent to
430  *    the lmaster with the VACUUM_FETCH message.
431  */
432 static int delete_queue_traverse(void *param, void *data)
433 {
434         struct delete_record_data *dd =
435                 talloc_get_type(data, struct delete_record_data);
436         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
437         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
438         struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
439         int res;
440         struct ctdb_ltdb_header header;
441         uint32_t lmaster;
442         uint32_t hash = ctdb_hash(&(dd->key));
443
444         vdata->count.delete_queue.total++;
445
446         res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
447         if (res != 0) {
448                 vdata->count.delete_queue.error++;
449                 return 0;
450         }
451
452         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
453                                vacuum_record_parser, &header);
454         if (res != 0) {
455                 goto skipped;
456         }
457
458         if (header.dmaster != ctdb->pnn) {
459                 /* The record has been migrated off the node. Skip. */
460                 goto skipped;
461         }
462
463         if (header.rsn != dd->hdr.rsn) {
464                 /*
465                  * The record has been migrated off the node and back again.
466                  * But not requeued for deletion. Skip it.
467                  */
468                 goto skipped;
469         }
470
471         /*
472          * We are dmaster, and the record has no data, and it has
473          * not been migrated after it has been queued for deletion.
474          *
475          * At this stage, the record could still have been revived locally
476          * and last been written with empty data. This can only be
477          * fixed with the addition of an active or delete flag. (TODO)
478          */
479
480         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
481
482         if (lmaster != ctdb->pnn) {
483                 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
484
485                 if (res != 0) {
486                         DEBUG(DEBUG_ERR,
487                               (__location__ " Error adding record to list "
488                                "of records to send to lmaster.\n"));
489                         vdata->count.delete_queue.error++;
490                 } else {
491                         vdata->count.delete_queue.added_to_vacuum_fetch_list++;
492                 }
493                 goto done;
494         }
495
496         /* use header->flags or dd->hdr.flags ?? */
497         if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
498                 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
499
500                 if (res != 0) {
501                         DEBUG(DEBUG_ERR,
502                               (__location__ " Error adding record to list "
503                                "of records for deletion on lmaster.\n"));
504                         vdata->count.delete_queue.error++;
505                 } else {
506                         vdata->count.delete_queue.added_to_delete_list++;
507                 }
508         } else {
509                 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
510
511                 if (res != 0) {
512                         DEBUG(DEBUG_ERR,
513                               (__location__ " Error deleting record with key "
514                                "hash [0x%08x] from local data base db[%s].\n",
515                                hash, ctdb_db->db_name));
516                         vdata->count.delete_queue.error++;
517                         goto done;
518                 }
519
520                 DEBUG(DEBUG_DEBUG,
521                       (__location__ " Deleted record with key hash "
522                        "[0x%08x] from local data base db[%s].\n",
523                        hash, ctdb_db->db_name));
524                 vdata->count.delete_queue.deleted++;
525         }
526
527         goto done;
528
529 skipped:
530         vdata->count.delete_queue.skipped++;
531
532 done:
533         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
534
535         return 0;
536 }
537
538 /**
539  * Delete the records that we are lmaster and dmaster for and
540  * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
541  * control.
542  */
543 static int delete_record_traverse(void *param, void *data)
544 {
545         struct delete_record_data *dd =
546                 talloc_get_type(data, struct delete_record_data);
547         struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
548         struct ctdb_db_context *ctdb_db = dd->ctdb_db;
549         struct ctdb_context *ctdb = ctdb_db->ctdb;
550         int res;
551         struct ctdb_ltdb_header header;
552         uint32_t lmaster;
553         uint32_t hash = ctdb_hash(&(dd->key));
554
555         res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
556         if (res != 0) {
557                 DEBUG(DEBUG_ERR,
558                       (__location__ " Error getting chainlock on record with "
559                        "key hash [0x%08x] on database db[%s].\n",
560                        hash, ctdb_db->db_name));
561                 vdata->count.delete_list.local_error++;
562                 vdata->count.delete_list.left--;
563                 talloc_free(dd);
564                 return 0;
565         }
566
567         /*
568          * Verify that the record is still empty, its RSN has not
569          * changed and that we are still its lmaster and dmaster.
570          */
571
572         res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
573                                vacuum_record_parser, &header);
574         if (res != 0) {
575                 goto skip;
576         }
577
578         if (header.flags & CTDB_REC_RO_FLAGS) {
579                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
580                                    "on database db[%s] has read-only flags. "
581                                    "skipping.\n",
582                                    hash, ctdb_db->db_name));
583                 goto skip;
584         }
585
586         if (header.dmaster != ctdb->pnn) {
587                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
588                                    "on database db[%s] has been migrated away. "
589                                    "skipping.\n",
590                                    hash, ctdb_db->db_name));
591                 goto skip;
592         }
593
594         if (header.rsn != dd->hdr.rsn + 1) {
595                 /*
596                  * The record has been migrated off the node and back again.
597                  * But not requeued for deletion. Skip it.
598                  * (Note that the first marshall traverse has bumped the RSN
599                  *  on disk.)
600                  */
601                 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
602                                    "on database db[%s] seems to have been "
603                                    "migrated away and back again (with empty "
604                                    "data). skipping.\n",
605                                    hash, ctdb_db->db_name));
606                 goto skip;
607         }
608
609         lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
610
611         if (lmaster != ctdb->pnn) {
612                 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
613                                    "delete list (key hash [0x%08x], db[%s]). "
614                                    "Strange! skipping.\n",
615                                    hash, ctdb_db->db_name));
616                 goto skip;
617         }
618
619         res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
620
621         if (res != 0) {
622                 DEBUG(DEBUG_ERR,
623                       (__location__ " Error deleting record with key hash "
624                        "[0x%08x] from local data base db[%s].\n",
625                        hash, ctdb_db->db_name));
626                 vdata->count.delete_list.local_error++;
627                 goto done;
628         }
629
630         DEBUG(DEBUG_DEBUG,
631               (__location__ " Deleted record with key hash [0x%08x] from "
632                "local data base db[%s].\n", hash, ctdb_db->db_name));
633
634         vdata->count.delete_list.deleted++;
635         goto done;
636
637 skip:
638         vdata->count.delete_list.skipped++;
639
640 done:
641         tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
642
643         talloc_free(dd);
644         vdata->count.delete_list.left--;
645
646         return 0;
647 }
648
649 /**
650  * Traverse the delete_queue.
651  * Records are either deleted directly or filled
652  * into the delete list or the vacuum fetch lists
653  * for further processing.
654  */
655 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
656                                       struct vacuum_data *vdata)
657 {
658         uint32_t sum;
659         int ret;
660
661         ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
662                                    delete_queue_traverse, vdata);
663
664         if (ret != 0) {
665                 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
666                       "the delete queue.\n"));
667         }
668
669         sum = vdata->count.delete_queue.deleted
670             + vdata->count.delete_queue.skipped
671             + vdata->count.delete_queue.error
672             + vdata->count.delete_queue.added_to_delete_list
673             + vdata->count.delete_queue.added_to_vacuum_fetch_list;
674
675         if (vdata->count.delete_queue.total != sum) {
676                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
677                       "counts for db[%s]: total[%u] != sum[%u]\n",
678                       ctdb_db->db_name,
679                       (unsigned)vdata->count.delete_queue.total,
680                       (unsigned)sum));
681         }
682
683         if (vdata->count.delete_queue.total > 0) {
684                 DEBUG(DEBUG_INFO,
685                       (__location__
686                        " fast vacuuming delete_queue traverse statistics: "
687                        "db[%s] "
688                        "total[%u] "
689                        "del[%u] "
690                        "skp[%u] "
691                        "err[%u] "
692                        "adl[%u] "
693                        "avf[%u]\n",
694                        ctdb_db->db_name,
695                        (unsigned)vdata->count.delete_queue.total,
696                        (unsigned)vdata->count.delete_queue.deleted,
697                        (unsigned)vdata->count.delete_queue.skipped,
698                        (unsigned)vdata->count.delete_queue.error,
699                        (unsigned)vdata->count.delete_queue.added_to_delete_list,
700                        (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
701         }
702
703         return;
704 }
705
706 /**
707  * read-only traverse of the database, looking for records that
708  * might be able to be vacuumed.
709  *
710  * This is not done each time but only every tunable
711  * VacuumFastPathCount times.
712  */
713 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
714                                     struct vacuum_data *vdata)
715 {
716         int ret;
717
718         ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
719         if (ret == -1 || vdata->traverse_error) {
720                 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
721                                   "'%s'\n", ctdb_db->db_name));
722                 return;
723         }
724
725         if (vdata->count.db_traverse.total > 0) {
726                 DEBUG(DEBUG_INFO,
727                       (__location__
728                        " full vacuuming db traverse statistics: "
729                        "db[%s] "
730                        "total[%u] "
731                        "skp[%u] "
732                        "err[%u] "
733                        "sched[%u]\n",
734                        ctdb_db->db_name,
735                        (unsigned)vdata->count.db_traverse.total,
736                        (unsigned)vdata->count.db_traverse.skipped,
737                        (unsigned)vdata->count.db_traverse.error,
738                        (unsigned)vdata->count.db_traverse.scheduled));
739         }
740
741         return;
742 }
743
744 /**
745  * Process the vacuum fetch lists:
746  * For records for which we are not the lmaster, tell the lmaster to
747  * fetch the record.
748  */
749 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
750                                             struct vacuum_data *vdata)
751 {
752         int i;
753         struct ctdb_context *ctdb = ctdb_db->ctdb;
754
755         for (i = 0; i < ctdb->num_nodes; i++) {
756                 TDB_DATA data;
757                 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
758
759                 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
760                         continue;
761                 }
762
763                 if (vfl->count == 0) {
764                         continue;
765                 }
766
767                 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
768                                    vfl->count, ctdb->nodes[i]->pnn,
769                                    ctdb_db->db_name));
770
771                 data = ctdb_marshall_finish(vfl);
772                 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
773                                              CTDB_SRVID_VACUUM_FETCH,
774                                              data) != 0)
775                 {
776                         DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
777                                           "fetch message to %u\n",
778                                           ctdb->nodes[i]->pnn));
779                 }
780         }
781
782         return;
783 }
784
785 /**
786  * Process the delete list:
787  *
788  * This is the last step of vacuuming that consistently deletes
789  * those records that have been migrated with data and can hence
790  * not be deleted when leaving a node.
791  *
792  * In this step, the lmaster does the final deletion of those empty
793  * records that it is also dmaster for. It has ususally received
794  * at least some of these records previously from the former dmasters
795  * with the vacuum fetch message.
796  *
797  * This last step is implemented as a 3-phase process to protect from
798  * races leading to data corruption:
799  *
800  *  1) Send the lmaster's copy to all other active nodes with the
801  *     RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
802  *  2) Send the records that could successfully be stored remotely
803  *     in step #1 to all active nodes with the TRY_DELETE_RECORDS
804  *     control. The remote notes delete their local copy.
805  *  3) The lmaster locally deletes its copies of all records that
806  *     could successfully be deleted remotely in step #2.
807  */
808 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
809                                      struct vacuum_data *vdata)
810 {
811         int ret, i;
812         struct ctdb_context *ctdb = ctdb_db->ctdb;
813         struct delete_records_list *recs;
814         TDB_DATA indata;
815         struct ctdb_node_map *nodemap;
816         uint32_t *active_nodes;
817         int num_active_nodes;
818         TALLOC_CTX *tmp_ctx;
819         uint32_t sum;
820
821         if (vdata->count.delete_list.total == 0) {
822                 return;
823         }
824
825         tmp_ctx = talloc_new(vdata);
826         if (tmp_ctx == NULL) {
827                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
828                 return;
829         }
830
831         vdata->count.delete_list.left = vdata->count.delete_list.total;
832
833         /*
834          * get the list of currently active nodes
835          */
836
837         ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
838                                    CTDB_CURRENT_NODE,
839                                    tmp_ctx,
840                                    &nodemap);
841         if (ret != 0) {
842                 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
843                 goto done;
844         }
845
846         active_nodes = list_of_active_nodes(ctdb, nodemap,
847                                             nodemap, /* talloc context */
848                                             false /* include self */);
849         /* yuck! ;-) */
850         num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
851
852         /*
853          * Now delete the records all active nodes in a three-phase process:
854          * 1) send all active remote nodes the current empty copy with this
855          *    node as DMASTER
856          * 2) if all nodes could store the new copy,
857          *    tell all the active remote nodes to delete all their copy
858          * 3) if all remote nodes deleted their record copy, delete it locally
859          */
860
861         /*
862          * Step 1:
863          * Send currently empty record copy to all active nodes for storing.
864          */
865
866         recs = talloc_zero(tmp_ctx, struct delete_records_list);
867         if (recs == NULL) {
868                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
869                 goto done;
870         }
871         recs->records = (struct ctdb_marshall_buffer *)
872                 talloc_zero_size(recs,
873                                  offsetof(struct ctdb_marshall_buffer, data));
874         if (recs->records == NULL) {
875                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
876                 goto done;
877         }
878         recs->records->db_id = ctdb_db->db_id;
879         recs->vdata = vdata;
880
881         /*
882          * traverse the tree of all records we want to delete and
883          * create a blob we can send to the other nodes.
884          *
885          * We call delete_marshall_traverse_first() to bump the
886          * records' RSNs in the database, to ensure we (as dmaster)
887          * keep the highest RSN of the records in the cluster.
888          */
889         ret = trbt_traversearray32(vdata->delete_list, 1,
890                                    delete_marshall_traverse_first, recs);
891         if (ret != 0) {
892                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
893                       "delete list for first marshalling.\n"));
894                 goto done;
895         }
896
897         indata = ctdb_marshall_finish(recs->records);
898
899         for (i = 0; i < num_active_nodes; i++) {
900                 struct ctdb_marshall_buffer *records;
901                 struct ctdb_rec_data *rec;
902                 int32_t res;
903                 TDB_DATA outdata;
904
905                 ret = ctdb_control(ctdb, active_nodes[i], 0,
906                                 CTDB_CONTROL_RECEIVE_RECORDS, 0,
907                                 indata, recs, &outdata, &res,
908                                 NULL, NULL);
909                 if (ret != 0 || res != 0) {
910                         DEBUG(DEBUG_ERR, ("Error storing record copies on "
911                                           "node %u: ret[%d] res[%d]\n",
912                                           active_nodes[i], ret, res));
913                         goto done;
914                 }
915
916                 /*
917                  * outdata contains the list of records coming back
918                  * from the node: These are the records that the
919                  * remote node could not store. We remove these from
920                  * the list to process further.
921                  */
922                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
923                 rec = (struct ctdb_rec_data *)&records->data[0];
924                 while (records->count-- > 1) {
925                         TDB_DATA reckey, recdata;
926                         struct ctdb_ltdb_header *rechdr;
927                         struct delete_record_data *dd;
928
929                         reckey.dptr = &rec->data[0];
930                         reckey.dsize = rec->keylen;
931                         recdata.dptr = &rec->data[reckey.dsize];
932                         recdata.dsize = rec->datalen;
933
934                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
935                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
936                                 goto done;
937                         }
938                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
939                         recdata.dptr += sizeof(*rechdr);
940                         recdata.dsize -= sizeof(*rechdr);
941
942                         dd = (struct delete_record_data *)trbt_lookup32(
943                                         vdata->delete_list,
944                                         ctdb_hash(&reckey));
945                         if (dd != NULL) {
946                                 /*
947                                  * The other node could not store the record
948                                  * copy and it is the first node that failed.
949                                  * So we should remove it from the tree and
950                                  * update statistics.
951                                  */
952                                 talloc_free(dd);
953                                 vdata->count.delete_list.remote_error++;
954                                 vdata->count.delete_list.left--;
955                         } else {
956                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
957                                       "find record with hash 0x%08x coming "
958                                       "back from RECEIVE_RECORDS "
959                                       "control in delete list.\n",
960                                       ctdb_hash(&reckey)));
961                                 vdata->count.delete_list.local_error++;
962                                 vdata->count.delete_list.left--;
963                         }
964
965                         rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
966                 }
967         }
968
969         if (vdata->count.delete_list.left == 0) {
970                 goto success;
971         }
972
973         /*
974          * Step 2:
975          * Send the remaining records to all active nodes for deletion.
976          *
977          * The lmaster's (i.e. our) copies of these records have been stored
978          * successfully on the other nodes.
979          */
980
981         /*
982          * Create a marshall blob from the remaining list of records to delete.
983          */
984
985         talloc_free(recs->records);
986
987         recs->records = (struct ctdb_marshall_buffer *)
988                 talloc_zero_size(recs,
989                                  offsetof(struct ctdb_marshall_buffer, data));
990         if (recs->records == NULL) {
991                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
992                 goto done;
993         }
994         recs->records->db_id = ctdb_db->db_id;
995
996         ret = trbt_traversearray32(vdata->delete_list, 1,
997                                    delete_marshall_traverse, recs);
998         if (ret != 0) {
999                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1000                       "delete list for second marshalling.\n"));
1001                 goto done;
1002         }
1003
1004         indata = ctdb_marshall_finish(recs->records);
1005
1006         for (i = 0; i < num_active_nodes; i++) {
1007                 struct ctdb_marshall_buffer *records;
1008                 struct ctdb_rec_data *rec;
1009                 int32_t res;
1010                 TDB_DATA outdata;
1011
1012                 ret = ctdb_control(ctdb, active_nodes[i], 0,
1013                                 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1014                                 indata, recs, &outdata, &res,
1015                                 NULL, NULL);
1016                 if (ret != 0 || res != 0) {
1017                         DEBUG(DEBUG_ERR, ("Failed to delete records on "
1018                                           "node %u: ret[%d] res[%d]\n",
1019                                           active_nodes[i], ret, res));
1020                         goto done;
1021                 }
1022
1023                 /*
1024                  * outdata contains the list of records coming back
1025                  * from the node: These are the records that the
1026                  * remote node could not delete. We remove these from
1027                  * the list to delete locally.
1028                  */
1029                 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1030                 rec = (struct ctdb_rec_data *)&records->data[0];
1031                 while (records->count-- > 1) {
1032                         TDB_DATA reckey, recdata;
1033                         struct ctdb_ltdb_header *rechdr;
1034                         struct delete_record_data *dd;
1035
1036                         reckey.dptr = &rec->data[0];
1037                         reckey.dsize = rec->keylen;
1038                         recdata.dptr = &rec->data[reckey.dsize];
1039                         recdata.dsize = rec->datalen;
1040
1041                         if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1042                                 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1043                                 goto done;
1044                         }
1045                         rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1046                         recdata.dptr += sizeof(*rechdr);
1047                         recdata.dsize -= sizeof(*rechdr);
1048
1049                         dd = (struct delete_record_data *)trbt_lookup32(
1050                                         vdata->delete_list,
1051                                         ctdb_hash(&reckey));
1052                         if (dd != NULL) {
1053                                 /*
1054                                  * The other node could not delete the
1055                                  * record and it is the first node that
1056                                  * failed. So we should remove it from
1057                                  * the tree and update statistics.
1058                                  */
1059                                 talloc_free(dd);
1060                                 vdata->count.delete_list.remote_error++;
1061                                 vdata->count.delete_list.left--;
1062                         } else {
1063                                 DEBUG(DEBUG_ERR, (__location__ " Failed to "
1064                                       "find record with hash 0x%08x coming "
1065                                       "back from TRY_DELETE_RECORDS "
1066                                       "control in delete list.\n",
1067                                       ctdb_hash(&reckey)));
1068                                 vdata->count.delete_list.local_error++;
1069                                 vdata->count.delete_list.left--;
1070                         }
1071
1072                         rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1073                 }
1074         }
1075
1076         if (vdata->count.delete_list.left == 0) {
1077                 goto success;
1078         }
1079
1080         /*
1081          * Step 3:
1082          * Delete the remaining records locally.
1083          *
1084          * These records have successfully been deleted on all
1085          * active remote nodes.
1086          */
1087
1088         ret = trbt_traversearray32(vdata->delete_list, 1,
1089                                    delete_record_traverse, vdata);
1090         if (ret != 0) {
1091                 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1092                       "delete list for deletion.\n"));
1093         }
1094
1095 success:
1096
1097         if (vdata->count.delete_list.left != 0) {
1098                 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1099                       "there are %u records left for deletion after "
1100                       "processing delete list\n",
1101                       ctdb_db->db_name,
1102                       (unsigned)vdata->count.delete_list.left));
1103         }
1104
1105         sum = vdata->count.delete_list.deleted
1106             + vdata->count.delete_list.skipped
1107             + vdata->count.delete_list.remote_error
1108             + vdata->count.delete_list.local_error
1109             + vdata->count.delete_list.left;
1110
1111         if (vdata->count.delete_list.total != sum) {
1112                 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1113                       "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1114                       ctdb_db->db_name,
1115                       (unsigned)vdata->count.delete_list.total,
1116                       (unsigned)sum));
1117         }
1118
1119         if (vdata->count.delete_list.total > 0) {
1120                 DEBUG(DEBUG_INFO,
1121                       (__location__
1122                        " vacuum delete list statistics: "
1123                        "db[%s] "
1124                        "total[%u] "
1125                        "del[%u] "
1126                        "skip[%u] "
1127                        "rem.err[%u] "
1128                        "loc.err[%u] "
1129                        "left[%u]\n",
1130                        ctdb_db->db_name,
1131                        (unsigned)vdata->count.delete_list.total,
1132                        (unsigned)vdata->count.delete_list.deleted,
1133                        (unsigned)vdata->count.delete_list.skipped,
1134                        (unsigned)vdata->count.delete_list.remote_error,
1135                        (unsigned)vdata->count.delete_list.local_error,
1136                        (unsigned)vdata->count.delete_list.left));
1137         }
1138
1139 done:
1140         talloc_free(tmp_ctx);
1141
1142         return;
1143 }
1144
1145 /**
1146  * initialize the vacuum_data
1147  */
1148 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
1149                                         struct ctdb_db_context *ctdb_db,
1150                                         TALLOC_CTX *mem_ctx)
1151 {
1152         int i;
1153         struct ctdb_context *ctdb = ctdb_db->ctdb;
1154         struct vacuum_data *vdata;
1155
1156         vdata = talloc_zero(mem_ctx, struct vacuum_data);
1157         if (vdata == NULL) {
1158                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1159                 return NULL;
1160         }
1161
1162         vdata->ctdb = ctdb_db->ctdb;
1163         vdata->ctdb_db = ctdb_db;
1164         vdata->delete_list = trbt_create(vdata, 0);
1165         if (vdata->delete_list == NULL) {
1166                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1167                 goto fail;
1168         }
1169
1170         vdata->start = timeval_current();
1171
1172         vdata->count.delete_queue.added_to_delete_list = 0;
1173         vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
1174         vdata->count.delete_queue.deleted = 0;
1175         vdata->count.delete_queue.skipped = 0;
1176         vdata->count.delete_queue.error = 0;
1177         vdata->count.delete_queue.total = 0;
1178         vdata->count.db_traverse.scheduled = 0;
1179         vdata->count.db_traverse.skipped = 0;
1180         vdata->count.db_traverse.error = 0;
1181         vdata->count.db_traverse.total = 0;
1182         vdata->count.delete_list.total = 0;
1183         vdata->count.delete_list.left = 0;
1184         vdata->count.delete_list.remote_error = 0;
1185         vdata->count.delete_list.local_error = 0;
1186         vdata->count.delete_list.skipped = 0;
1187         vdata->count.delete_list.deleted = 0;
1188
1189         /* the list needs to be of length num_nodes */
1190         vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1191                                                 struct ctdb_marshall_buffer *,
1192                                                 ctdb->num_nodes);
1193         if (vdata->vacuum_fetch_list == NULL) {
1194                 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1195                 goto fail;
1196         }
1197         for (i = 0; i < ctdb->num_nodes; i++) {
1198                 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1199                         talloc_zero_size(vdata->vacuum_fetch_list,
1200                                          offsetof(struct ctdb_marshall_buffer, data));
1201                 if (vdata->vacuum_fetch_list[i] == NULL) {
1202                         DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1203                         talloc_free(vdata);
1204                         return NULL;
1205                 }
1206                 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1207         }
1208
1209         return vdata;
1210
1211 fail:
1212         talloc_free(vdata);
1213         return NULL;
1214 }
1215
1216 /**
1217  * Vacuum a DB:
1218  *  - Always do the fast vacuuming run, which traverses
1219  *    the in-memory delete queue: these records have been
1220  *    scheduled for deletion.
1221  *  - Only if explicitly requested, the database is traversed
1222  *    in order to use the traditional heuristics on empty records
1223  *    to trigger deletion.
1224  *    This is done only every VacuumFastPathCount'th vacuuming run.
1225  *
1226  * The traverse runs fill two lists:
1227  *
1228  * - The delete_list:
1229  *   This is the list of empty records the current
1230  *   node is lmaster and dmaster for. These records are later
1231  *   deleted first on other nodes and then locally.
1232  *
1233  *   The fast vacuuming run has a short cut for those records
1234  *   that have never been migrated with data: these records
1235  *   are immediately deleted locally, since they have left
1236  *   no trace on other nodes.
1237  *
1238  * - The vacuum_fetch lists
1239  *   (one for each other lmaster node):
1240  *   The records in this list are sent for deletion to
1241  *   their lmaster in a bulk VACUUM_FETCH message.
1242  *
1243  *   The lmaster then migrates all these records to itelf
1244  *   so that they can be vacuumed there.
1245  *
1246  * This executes in the child context.
1247  */
1248 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1249                           bool full_vacuum_run)
1250 {
1251         struct ctdb_context *ctdb = ctdb_db->ctdb;
1252         int ret, pnn;
1253         struct vacuum_data *vdata;
1254         TALLOC_CTX *tmp_ctx;
1255
1256         DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1257                            "%s db_id[0x%08x]\n",
1258                            full_vacuum_run ? "full" : "fast",
1259                            ctdb_db->db_name, ctdb_db->db_id));
1260
1261         ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1262         if (ret != 0) {
1263                 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1264                 return ret;
1265         }
1266
1267         pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1268         if (pnn == -1) {
1269                 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1270                 return -1;
1271         }
1272
1273         ctdb->pnn = pnn;
1274
1275         tmp_ctx = talloc_new(ctdb_db);
1276         if (tmp_ctx == NULL) {
1277                 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1278                 return -1;
1279         }
1280
1281         vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1282         if (vdata == NULL) {
1283                 talloc_free(tmp_ctx);
1284                 return -1;
1285         }
1286
1287         if (full_vacuum_run) {
1288                 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1289         }
1290
1291         ctdb_process_delete_queue(ctdb_db, vdata);
1292
1293         ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1294
1295         ctdb_process_delete_list(ctdb_db, vdata);
1296
1297         talloc_free(tmp_ctx);
1298
1299         /* this ensures we run our event queue */
1300         ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1301
1302         return 0;
1303 }
1304
1305 /*
1306  * repack and vaccum a db
1307  * called from the child context
1308  */
1309 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1310                                      bool full_vacuum_run)
1311 {
1312         uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1313         const char *name = ctdb_db->db_name;
1314         int freelist_size = 0;
1315         int ret;
1316
1317         if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1318                 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1319         }
1320
1321         freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1322         if (freelist_size == -1) {
1323                 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1324                 return -1;
1325         }
1326
1327         /*
1328          * decide if a repack is necessary
1329          */
1330         if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1331         {
1332                 return 0;
1333         }
1334
1335         DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1336                            name, freelist_size));
1337
1338         ret = tdb_repack(ctdb_db->ltdb->tdb);
1339         if (ret != 0) {
1340                 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1341                 return -1;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1348 {
1349         uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1350
1351         return interval;
1352 }
1353
1354 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1355 {
1356         double l = timeval_elapsed(&child_ctx->start_time);
1357         struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1358         struct ctdb_context *ctdb = ctdb_db->ctdb;
1359
1360         CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1361         DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1362
1363         if (child_ctx->child_pid != -1) {
1364                 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1365         } else {
1366                 /* Bump the number of successful fast-path runs. */
1367                 child_ctx->vacuum_handle->fast_path_count++;
1368         }
1369
1370         DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1371
1372         event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1373                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0), 
1374                         ctdb_vacuum_event, child_ctx->vacuum_handle);
1375
1376         return 0;
1377 }
1378
1379 /*
1380  * this event is generated when a vacuum child process times out
1381  */
1382 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1383                                          struct timeval t, void *private_data)
1384 {
1385         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1386
1387         DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1388
1389         child_ctx->status = VACUUM_TIMEOUT;
1390
1391         talloc_free(child_ctx);
1392 }
1393
1394
1395 /*
1396  * this event is generated when a vacuum child process has completed
1397  */
1398 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1399                              uint16_t flags, void *private_data)
1400 {
1401         struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1402         char c = 0;
1403         int ret;
1404
1405         DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1406         child_ctx->child_pid = -1;
1407
1408         ret = sys_read(child_ctx->fd[0], &c, 1);
1409         if (ret != 1 || c != 0) {
1410                 child_ctx->status = VACUUM_ERROR;
1411                 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1412         } else {
1413                 child_ctx->status = VACUUM_OK;
1414         }
1415
1416         talloc_free(child_ctx);
1417 }
1418
1419 /*
1420  * this event is called every time we need to start a new vacuum process
1421  */
1422 static void
1423 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1424                                struct timeval t, void *private_data)
1425 {
1426         struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1427         struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1428         struct ctdb_context *ctdb = ctdb_db->ctdb;
1429         struct ctdb_vacuum_child_context *child_ctx;
1430         struct tevent_fd *fde;
1431         int ret;
1432
1433         /* we dont vacuum if we are in recovery mode, or db frozen */
1434         if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1435             ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1436                 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1437                                    ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1438                                    : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1439                                    ? "freeze pending"
1440                                    : "frozen"));
1441                 event_add_timed(ctdb->ev, vacuum_handle,
1442                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1443                         ctdb_vacuum_event, vacuum_handle);
1444                 return;
1445         }
1446
1447         /* Do not allow multiple vacuuming child processes to be active at the
1448          * same time.  If there is vacuuming child process active, delay
1449          * new vacuuming event to stagger vacuuming events.
1450          */
1451         if (ctdb->vacuumers != NULL) {
1452                 event_add_timed(ctdb->ev, vacuum_handle,
1453                                 timeval_current_ofs(0, 500*1000),
1454                                 ctdb_vacuum_event, vacuum_handle);
1455                 return;
1456         }
1457
1458         child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1459         if (child_ctx == NULL) {
1460                 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1461                 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1462         }
1463
1464
1465         ret = pipe(child_ctx->fd);
1466         if (ret != 0) {
1467                 talloc_free(child_ctx);
1468                 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1469                 event_add_timed(ctdb->ev, vacuum_handle,
1470                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1471                         ctdb_vacuum_event, vacuum_handle);
1472                 return;
1473         }
1474
1475         if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1476                 vacuum_handle->fast_path_count = 0;
1477         }
1478
1479         child_ctx->child_pid = ctdb_fork(ctdb);
1480         if (child_ctx->child_pid == (pid_t)-1) {
1481                 close(child_ctx->fd[0]);
1482                 close(child_ctx->fd[1]);
1483                 talloc_free(child_ctx);
1484                 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1485                 event_add_timed(ctdb->ev, vacuum_handle,
1486                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1487                         ctdb_vacuum_event, vacuum_handle);
1488                 return;
1489         }
1490
1491
1492         if (child_ctx->child_pid == 0) {
1493                 char cc = 0;
1494                 bool full_vacuum_run = false;
1495                 close(child_ctx->fd[0]);
1496
1497                 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1498                 ctdb_set_process_name("ctdb_vacuum");
1499                 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1500                         DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1501                         _exit(1);
1502                 }
1503
1504                 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1505                     (vacuum_handle->fast_path_count == 0))
1506                 {
1507                         full_vacuum_run = true;
1508                 }
1509                 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1510
1511                 sys_write(child_ctx->fd[1], &cc, 1);
1512                 _exit(0);
1513         }
1514
1515         set_close_on_exec(child_ctx->fd[0]);
1516         close(child_ctx->fd[1]);
1517
1518         child_ctx->status = VACUUM_RUNNING;
1519         child_ctx->start_time = timeval_current();
1520
1521         DLIST_ADD(ctdb->vacuumers, child_ctx);
1522         talloc_set_destructor(child_ctx, vacuum_child_destructor);
1523
1524         /*
1525          * Clear the fastpath vacuuming list in the parent.
1526          */
1527         talloc_free(ctdb_db->delete_queue);
1528         ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1529         if (ctdb_db->delete_queue == NULL) {
1530                 /* fatal here? ... */
1531                 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1532                                  "in parent context. Shutting down\n");
1533         }
1534
1535         event_add_timed(ctdb->ev, child_ctx,
1536                 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1537                 vacuum_child_timeout, child_ctx);
1538
1539         DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1540
1541         fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1542                            EVENT_FD_READ, vacuum_child_handler, child_ctx);
1543         tevent_fd_set_auto_close(fde);
1544
1545         vacuum_handle->child_ctx = child_ctx;
1546         child_ctx->vacuum_handle = vacuum_handle;
1547 }
1548
1549 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1550 {
1551         /* Simply free them all. */
1552         while (ctdb->vacuumers) {
1553                 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1554                            ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1555                            (int)ctdb->vacuumers->child_pid));
1556                 /* vacuum_child_destructor kills it, removes from list */
1557                 talloc_free(ctdb->vacuumers);
1558         }
1559 }
1560
1561 /* this function initializes the vacuuming context for a database
1562  * starts the vacuuming events
1563  */
1564 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1565 {
1566         if (ctdb_db->persistent != 0) {
1567                 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1568                 return 0;
1569         }
1570
1571         ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1572         CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1573
1574         ctdb_db->vacuum_handle->ctdb_db         = ctdb_db;
1575         ctdb_db->vacuum_handle->fast_path_count = 0;
1576
1577         event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle, 
1578                         timeval_current_ofs(get_vacuum_interval(ctdb_db), 0), 
1579                         ctdb_vacuum_event, ctdb_db->vacuum_handle);
1580
1581         return 0;
1582 }
1583
1584 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1585                                             const struct ctdb_ltdb_header *hdr,
1586                                             const TDB_DATA key)
1587 {
1588         struct delete_record_data *kd;
1589         uint32_t hash;
1590
1591         hash = (uint32_t)ctdb_hash(&key);
1592
1593         DEBUG(DEBUG_DEBUG, (__location__
1594                             " remove_record_from_delete_queue: "
1595                             "db[%s] "
1596                             "db_id[0x%08x] "
1597                             "key_hash[0x%08x] "
1598                             "lmaster[%u] "
1599                             "migrated_with_data[%s]\n",
1600                              ctdb_db->db_name, ctdb_db->db_id,
1601                              hash,
1602                              ctdb_lmaster(ctdb_db->ctdb, &key),
1603                              hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1604
1605         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1606         if (kd == NULL) {
1607                 DEBUG(DEBUG_DEBUG, (__location__
1608                                     " remove_record_from_delete_queue: "
1609                                     "record not in queue (hash[0x%08x])\n.",
1610                                     hash));
1611                 return;
1612         }
1613
1614         if ((kd->key.dsize != key.dsize) ||
1615             (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1616         {
1617                 DEBUG(DEBUG_DEBUG, (__location__
1618                                     " remove_record_from_delete_queue: "
1619                                     "hash collision for key with hash[0x%08x] "
1620                                     "in db[%s] - skipping\n",
1621                                     hash, ctdb_db->db_name));
1622                 return;
1623         }
1624
1625         DEBUG(DEBUG_DEBUG, (__location__
1626                             " remove_record_from_delete_queue: "
1627                             "removing key with hash[0x%08x]\n",
1628                              hash));
1629
1630         talloc_free(kd);
1631
1632         return;
1633 }
1634
1635 /**
1636  * Insert a record into the ctdb_db context's delete queue,
1637  * handling hash collisions.
1638  */
1639 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1640                                            const struct ctdb_ltdb_header *hdr,
1641                                            TDB_DATA key)
1642 {
1643         struct delete_record_data *kd;
1644         uint32_t hash;
1645         int ret;
1646
1647         hash = (uint32_t)ctdb_hash(&key);
1648
1649         DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1650                            "db_id[0x%08x] "
1651                            "key_hash[0x%08x] "
1652                            "lmaster[%u] "
1653                            "migrated_with_data[%s]\n",
1654                             ctdb_db->db_name, ctdb_db->db_id,
1655                             hash,
1656                             ctdb_lmaster(ctdb_db->ctdb, &key),
1657                             hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1658
1659         kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1660         if (kd != NULL) {
1661                 if ((kd->key.dsize != key.dsize) ||
1662                     (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1663                 {
1664                         DEBUG(DEBUG_INFO,
1665                               (__location__ " schedule for deletion: "
1666                                "hash collision for key hash [0x%08x]. "
1667                                "Skipping the record.\n", hash));
1668                         return 0;
1669                 } else {
1670                         DEBUG(DEBUG_DEBUG,
1671                               (__location__ " schedule for deletion: "
1672                                "updating entry for key with hash [0x%08x].\n",
1673                                hash));
1674                 }
1675         }
1676
1677         ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1678                                                   ctdb_db->delete_queue,
1679                                                   hdr, key);
1680         if (ret != 0) {
1681                 DEBUG(DEBUG_INFO,
1682                       (__location__ " schedule for deletion: error "
1683                        "inserting key with hash [0x%08x] into delete queue\n",
1684                        hash));
1685                 return -1;
1686         }
1687
1688         return 0;
1689 }
1690
1691 /**
1692  * Schedule a record for deletetion.
1693  * Called from the parent context.
1694  */
1695 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1696                                            TDB_DATA indata)
1697 {
1698         struct ctdb_control_schedule_for_deletion *dd;
1699         struct ctdb_db_context *ctdb_db;
1700         int ret;
1701         TDB_DATA key;
1702
1703         dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1704
1705         ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1706         if (ctdb_db == NULL) {
1707                 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1708                                   dd->db_id));
1709                 return -1;
1710         }
1711
1712         key.dsize = dd->keylen;
1713         key.dptr = dd->key;
1714
1715         ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1716
1717         return ret;
1718 }
1719
1720 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1721                                          const struct ctdb_ltdb_header *hdr,
1722                                          TDB_DATA key)
1723 {
1724         int ret;
1725         struct ctdb_control_schedule_for_deletion *dd;
1726         TDB_DATA indata;
1727         int32_t status;
1728
1729         if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1730                 /* main daemon - directly queue */
1731                 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1732
1733                 return ret;
1734         }
1735
1736         /* if we dont have a connection to the daemon we can not send
1737            a control. For example sometimes from update_record control child
1738            process.
1739         */
1740         if (!ctdb_db->ctdb->can_send_controls) {
1741                 return -1;
1742         }
1743
1744
1745         /* child process: send the main daemon a control */
1746         indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1747         indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1748         if (indata.dptr == NULL) {
1749                 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1750                 return -1;
1751         }
1752         dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1753         dd->db_id = ctdb_db->db_id;
1754         dd->hdr = *hdr;
1755         dd->keylen = key.dsize;
1756         memcpy(dd->key, key.dptr, key.dsize);
1757
1758         ret = ctdb_control(ctdb_db->ctdb,
1759                            CTDB_CURRENT_NODE,
1760                            ctdb_db->db_id,
1761                            CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1762                            CTDB_CTRL_FLAG_NOREPLY, /* flags */
1763                            indata,
1764                            NULL, /* mem_ctx */
1765                            NULL, /* outdata */
1766                            &status,
1767                            NULL, /* timeout : NULL == wait forever */
1768                            NULL); /* error message */
1769
1770         talloc_free(indata.dptr);
1771
1772         if (ret != 0 || status != 0) {
1773                 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1774                                   "SCHEDULE_FOR_DELETION "
1775                                   "control.\n"));
1776                 if (status != 0) {
1777                         ret = -1;
1778                 }
1779         }
1780
1781         return ret;
1782 }
1783
1784 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1785                                          const struct ctdb_ltdb_header *hdr,
1786                                          const TDB_DATA key)
1787 {
1788         if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1789                 /*
1790                  * Only remove the record from the delete queue if called
1791                  * in the main daemon.
1792                  */
1793                 return;
1794         }
1795
1796         remove_record_from_delete_queue(ctdb_db, hdr, key);
1797
1798         return;
1799 }