2 * Unix SMB/CIFS implementation.
4 * Copyright (C) Volker Lendecke 2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 3 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "librpc/gen_ndr/notify.h"
22 #include "librpc/gen_ndr/messaging.h"
23 #include "librpc/gen_ndr/server_id.h"
24 #include "lib/dbwrap/dbwrap.h"
25 #include "lib/dbwrap/dbwrap_rbt.h"
31 #include "lib/util/server_id_db.h"
32 #include "lib/util/tevent_unix.h"
33 #include "ctdbd_conn.h"
34 #include "ctdb_srvids.h"
35 #include "source3/smbd/proto.h"
36 #include "server_id_db_util.h"
37 #include "lib/util/iov_buf.h"
38 #include "messages_util.h"
40 #ifdef CLUSTER_SUPPORT
41 #include "ctdb_protocol.h"
47 * All of notifyd's state
50 struct notifyd_state {
51 struct tevent_context *ev;
52 struct messaging_context *msg_ctx;
53 struct ctdbd_connection *ctdbd_conn;
56 * Database of everything clients show interest in. Indexed by
57 * absolute path. The database keys are not 0-terminated
58 * because the criticial operation, notifyd_trigger, can walk
59 * the structure from the top without adding intermediate 0s.
60 * The database records contain an array of
62 * struct notifyd_instance
64 * to be maintained by parsed by notifyd_entry_parse()
66 struct db_context *entries;
69 * In the cluster case, this is the place where we store a log
70 * of all MSG_SMB_NOTIFY_REC_CHANGE messages. We just 1:1
71 * forward them to our peer notifyd's in the cluster once a
72 * second or when the log grows too large.
75 struct messaging_reclog *log;
78 * Array of companion notifyd's in a cluster. Every notifyd
79 * broadcasts its messaging_reclog to every other notifyd in
80 * the cluster. This is done by making ctdb send a message to
81 * srvid CTDB_SRVID_SAMBA_NOTIFY_PROXY with destination node
82 * number CTDB_BROADCAST_VNNMAP. Everybody in the cluster who
83 * had called register_with_ctdbd this srvid will receive the
86 * Database replication happens via these broadcasts. Also,
87 * they serve as liveness indication. If a notifyd receives a
88 * broadcast from an unknown peer, it will create one for this
89 * srvid. Also when we don't hear anything from a peer for a
90 * while, we will discard it.
93 struct notifyd_peer **peers;
96 sys_notify_watch_fn sys_notify_watch;
97 struct sys_notify_context *sys_notify_ctx;
101 * notifyd's representation of a notify instance
103 struct notifyd_instance {
104 struct server_id client;
105 struct notify_instance instance;
107 void *sys_watch; /* inotify/fam/etc handle */
110 * Filters after sys_watch took responsibility of some bits
112 uint32_t internal_filter;
113 uint32_t internal_subdir_filter;
116 struct notifyd_peer {
117 struct notifyd_state *state;
118 struct server_id pid;
120 struct db_context *db;
121 time_t last_broadcast;
124 static bool notifyd_rec_change(struct messaging_context *msg_ctx,
125 struct messaging_rec **prec,
127 static bool notifyd_trigger(struct messaging_context *msg_ctx,
128 struct messaging_rec **prec,
130 static bool notifyd_get_db(struct messaging_context *msg_ctx,
131 struct messaging_rec **prec,
133 static bool notifyd_got_db(struct messaging_context *msg_ctx,
134 struct messaging_rec **prec,
137 #ifdef CLUSTER_SUPPORT
138 static void notifyd_broadcast_reclog(struct ctdbd_connection *ctdbd_conn,
139 struct server_id src,
140 struct messaging_reclog *log);
142 static void notifyd_sys_callback(struct sys_notify_context *ctx,
143 void *private_data, struct notify_event *ev,
146 #ifdef CLUSTER_SUPPORT
147 static struct tevent_req *notifyd_broadcast_reclog_send(
148 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
149 struct ctdbd_connection *ctdbd_conn, struct server_id src,
150 struct messaging_reclog *log);
151 static int notifyd_broadcast_reclog_recv(struct tevent_req *req);
153 static struct tevent_req *notifyd_clean_peers_send(
154 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
155 struct notifyd_state *notifyd);
156 static int notifyd_clean_peers_recv(struct tevent_req *req);
159 static int sys_notify_watch_dummy(
161 struct sys_notify_context *ctx,
164 uint32_t *subdir_filter,
165 void (*callback)(struct sys_notify_context *ctx,
167 struct notify_event *ev,
172 void **handle = handle_p;
177 static void notifyd_handler_done(struct tevent_req *subreq);
179 #ifdef CLUSTER_SUPPORT
180 static void notifyd_broadcast_reclog_finished(struct tevent_req *subreq);
181 static void notifyd_clean_peers_finished(struct tevent_req *subreq);
182 static int notifyd_snoop_broadcast(uint32_t src_vnn, uint32_t dst_vnn,
184 const uint8_t *msg, size_t msglen,
188 struct tevent_req *notifyd_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
189 struct messaging_context *msg_ctx,
190 struct ctdbd_connection *ctdbd_conn,
191 sys_notify_watch_fn sys_notify_watch,
192 struct sys_notify_context *sys_notify_ctx)
194 struct tevent_req *req, *subreq;
195 struct notifyd_state *state;
196 struct server_id_db *names_db;
199 req = tevent_req_create(mem_ctx, &state, struct notifyd_state);
204 state->msg_ctx = msg_ctx;
205 state->ctdbd_conn = ctdbd_conn;
207 if (sys_notify_watch == NULL) {
208 sys_notify_watch = sys_notify_watch_dummy;
211 state->sys_notify_watch = sys_notify_watch;
212 state->sys_notify_ctx = sys_notify_ctx;
214 state->entries = db_open_rbt(state);
215 if (tevent_req_nomem(state->entries, req)) {
216 return tevent_req_post(req, ev);
219 subreq = messaging_handler_send(state, ev, msg_ctx,
220 MSG_SMB_NOTIFY_REC_CHANGE,
221 notifyd_rec_change, state);
222 if (tevent_req_nomem(subreq, req)) {
223 return tevent_req_post(req, ev);
225 tevent_req_set_callback(subreq, notifyd_handler_done, req);
227 subreq = messaging_handler_send(state, ev, msg_ctx,
228 MSG_SMB_NOTIFY_TRIGGER,
229 notifyd_trigger, state);
230 if (tevent_req_nomem(subreq, req)) {
231 return tevent_req_post(req, ev);
233 tevent_req_set_callback(subreq, notifyd_handler_done, req);
235 subreq = messaging_handler_send(state, ev, msg_ctx,
236 MSG_SMB_NOTIFY_GET_DB,
237 notifyd_get_db, state);
238 if (tevent_req_nomem(subreq, req)) {
239 return tevent_req_post(req, ev);
241 tevent_req_set_callback(subreq, notifyd_handler_done, req);
243 subreq = messaging_handler_send(state, ev, msg_ctx,
245 notifyd_got_db, state);
246 if (tevent_req_nomem(subreq, req)) {
247 return tevent_req_post(req, ev);
249 tevent_req_set_callback(subreq, notifyd_handler_done, req);
251 names_db = messaging_names_db(msg_ctx);
253 ret = server_id_db_set_exclusive(names_db, "notify-daemon");
255 DEBUG(10, ("%s: server_id_db_add failed: %s\n",
256 __func__, strerror(ret)));
257 tevent_req_error(req, ret);
258 return tevent_req_post(req, ev);
261 /* Block those signals that we are not handling */
262 BlockSignals(True, SIGHUP);
263 BlockSignals(True, SIGUSR1);
265 if (ctdbd_conn == NULL) {
267 * No cluster around, skip the database replication
273 #ifdef CLUSTER_SUPPORT
274 state->log = talloc_zero(state, struct messaging_reclog);
275 if (tevent_req_nomem(state->log, req)) {
276 return tevent_req_post(req, ev);
279 subreq = notifyd_broadcast_reclog_send(
280 state->log, ev, ctdbd_conn, messaging_server_id(msg_ctx),
282 if (tevent_req_nomem(subreq, req)) {
283 return tevent_req_post(req, ev);
285 tevent_req_set_callback(subreq, notifyd_broadcast_reclog_finished,
288 subreq = notifyd_clean_peers_send(state, ev, state);
289 if (tevent_req_nomem(subreq, req)) {
290 return tevent_req_post(req, ev);
292 tevent_req_set_callback(subreq, notifyd_clean_peers_finished,
295 ret = register_with_ctdbd(ctdbd_conn, CTDB_SRVID_SAMBA_NOTIFY_PROXY,
296 notifyd_snoop_broadcast, state);
298 tevent_req_error(req, ret);
299 return tevent_req_post(req, ev);
306 static void notifyd_handler_done(struct tevent_req *subreq)
308 struct tevent_req *req = tevent_req_callback_data(
309 subreq, struct tevent_req);
312 ret = messaging_handler_recv(subreq);
314 tevent_req_error(req, ret);
317 #ifdef CLUSTER_SUPPORT
319 static void notifyd_broadcast_reclog_finished(struct tevent_req *subreq)
321 struct tevent_req *req = tevent_req_callback_data(
322 subreq, struct tevent_req);
325 ret = notifyd_broadcast_reclog_recv(subreq);
327 tevent_req_error(req, ret);
330 static void notifyd_clean_peers_finished(struct tevent_req *subreq)
332 struct tevent_req *req = tevent_req_callback_data(
333 subreq, struct tevent_req);
336 ret = notifyd_clean_peers_recv(subreq);
338 tevent_req_error(req, ret);
343 int notifyd_recv(struct tevent_req *req)
345 return tevent_req_simple_recv_unix(req);
349 * Parse an entry in the notifyd_context->entries database
352 static bool notifyd_parse_entry(uint8_t *buf, size_t buflen,
353 struct notifyd_instance **instances,
354 size_t *num_instances)
356 if ((buflen % sizeof(struct notifyd_instance)) != 0) {
357 DEBUG(1, ("%s: invalid buffer size: %u\n",
358 __func__, (unsigned)buflen));
362 if (instances != NULL) {
363 *instances = (struct notifyd_instance *)buf;
365 if (num_instances != NULL) {
366 *num_instances = buflen / sizeof(struct notifyd_instance);
371 static bool notifyd_apply_rec_change(
372 const struct server_id *client,
373 const char *path, size_t pathlen,
374 const struct notify_instance *chg,
375 struct db_context *entries,
376 sys_notify_watch_fn sys_notify_watch,
377 struct sys_notify_context *sys_notify_ctx,
378 struct messaging_context *msg_ctx)
380 struct db_record *rec;
381 struct notifyd_instance *instances;
382 size_t num_instances;
384 struct notifyd_instance *instance;
390 DEBUG(1, ("%s: pathlen==0\n", __func__));
393 if (path[pathlen-1] != '\0') {
394 DEBUG(1, ("%s: path not 0-terminated\n", __func__));
398 DEBUG(10, ("%s: path=%s, filter=%u, subdir_filter=%u, "
399 "private_data=%p\n", __func__, path,
400 (unsigned)chg->filter, (unsigned)chg->subdir_filter,
403 rec = dbwrap_fetch_locked(
405 make_tdb_data((const uint8_t *)path, pathlen-1));
408 DEBUG(1, ("%s: dbwrap_fetch_locked failed\n", __func__));
413 value = dbwrap_record_get_value(rec);
415 if (value.dsize != 0) {
416 if (!notifyd_parse_entry(value.dptr, value.dsize, NULL,
423 * Overallocate by one instance to avoid a realloc when adding
425 instances = talloc_array(rec, struct notifyd_instance,
427 if (instances == NULL) {
428 DEBUG(1, ("%s: talloc failed\n", __func__));
432 if (value.dsize != 0) {
433 memcpy(instances, value.dptr, value.dsize);
436 for (i=0; i<num_instances; i++) {
437 instance = &instances[i];
439 if (server_id_equal(&instance->client, client) &&
440 (instance->instance.private_data == chg->private_data)) {
445 if (i < num_instances) {
446 instance->instance = *chg;
449 * We've overallocated for one instance
451 instance = &instances[num_instances];
453 *instance = (struct notifyd_instance) {
456 .internal_filter = chg->filter,
457 .internal_subdir_filter = chg->subdir_filter
463 if ((instance->instance.filter != 0) ||
464 (instance->instance.subdir_filter != 0)) {
467 TALLOC_FREE(instance->sys_watch);
469 ret = sys_notify_watch(entries, sys_notify_ctx, path,
470 &instance->internal_filter,
471 &instance->internal_subdir_filter,
472 notifyd_sys_callback, msg_ctx,
473 &instance->sys_watch);
475 DEBUG(1, ("%s: inotify_watch returned %s\n",
476 __func__, strerror(errno)));
480 if ((instance->instance.filter == 0) &&
481 (instance->instance.subdir_filter == 0)) {
482 /* This is a delete request */
483 TALLOC_FREE(instance->sys_watch);
484 *instance = instances[num_instances-1];
488 DEBUG(10, ("%s: %s has %u instances\n", __func__,
489 path, (unsigned)num_instances));
491 if (num_instances == 0) {
492 status = dbwrap_record_delete(rec);
493 if (!NT_STATUS_IS_OK(status)) {
494 DEBUG(1, ("%s: dbwrap_record_delete returned %s\n",
495 __func__, nt_errstr(status)));
499 value = make_tdb_data(
500 (uint8_t *)instances,
501 sizeof(struct notifyd_instance) * num_instances);
503 status = dbwrap_record_store(rec, value, 0);
504 if (!NT_STATUS_IS_OK(status)) {
505 DEBUG(1, ("%s: dbwrap_record_store returned %s\n",
506 __func__, nt_errstr(status)));
517 static void notifyd_sys_callback(struct sys_notify_context *ctx,
518 void *private_data, struct notify_event *ev,
521 struct messaging_context *msg_ctx = talloc_get_type_abort(
522 private_data, struct messaging_context);
523 struct notify_trigger_msg msg;
527 msg = (struct notify_trigger_msg) {
528 .when = timespec_current(),
529 .action = ev->action,
533 iov[0].iov_base = &msg;
534 iov[0].iov_len = offsetof(struct notify_trigger_msg, path);
535 iov[1].iov_base = discard_const_p(char, ev->dir);
536 iov[1].iov_len = strlen(ev->dir);
537 iov[2].iov_base = &slash;
539 iov[3].iov_base = discard_const_p(char, ev->path);
540 iov[3].iov_len = strlen(ev->path)+1;
543 msg_ctx, messaging_server_id(msg_ctx),
544 MSG_SMB_NOTIFY_TRIGGER, iov, ARRAY_SIZE(iov), NULL, 0);
547 static bool notifyd_parse_rec_change(uint8_t *buf, size_t bufsize,
548 struct notify_rec_change_msg **pmsg,
551 struct notify_rec_change_msg *msg;
553 if (bufsize < offsetof(struct notify_rec_change_msg, path) + 1) {
554 DEBUG(1, ("%s: message too short, ignoring: %u\n", __func__,
559 *pmsg = msg = (struct notify_rec_change_msg *)buf;
560 *pathlen = bufsize - offsetof(struct notify_rec_change_msg, path);
562 DEBUG(10, ("%s: Got rec_change_msg filter=%u, subdir_filter=%u, "
563 "private_data=%p, path=%.*s\n",
564 __func__, (unsigned)msg->instance.filter,
565 (unsigned)msg->instance.subdir_filter,
566 msg->instance.private_data, (int)(*pathlen), msg->path));
571 static bool notifyd_rec_change(struct messaging_context *msg_ctx,
572 struct messaging_rec **prec,
575 struct notifyd_state *state = talloc_get_type_abort(
576 private_data, struct notifyd_state);
577 struct server_id_buf idbuf;
578 struct messaging_rec *rec = *prec;
579 struct notify_rec_change_msg *msg;
583 DEBUG(10, ("%s: Got %d bytes from %s\n", __func__,
584 (unsigned)rec->buf.length,
585 server_id_str_buf(rec->src, &idbuf)));
587 ok = notifyd_parse_rec_change(rec->buf.data, rec->buf.length,
593 ok = notifyd_apply_rec_change(
594 &rec->src, msg->path, pathlen, &msg->instance,
595 state->entries, state->sys_notify_watch, state->sys_notify_ctx,
598 DEBUG(1, ("%s: notifyd_apply_rec_change failed, ignoring\n",
603 if ((state->log == NULL) || (state->ctdbd_conn == NULL)) {
607 #ifdef CLUSTER_SUPPORT
610 struct messaging_rec **tmp;
611 struct messaging_reclog *log;
615 tmp = talloc_realloc(log, log->recs, struct messaging_rec *,
618 DEBUG(1, ("%s: talloc_realloc failed, ignoring\n", __func__));
623 log->recs[log->num_recs] = talloc_move(log->recs, prec);
626 if (log->num_recs >= 100) {
628 * Don't let the log grow too large
630 notifyd_broadcast_reclog(state->ctdbd_conn,
631 messaging_server_id(msg_ctx), log);
640 struct notifyd_trigger_state {
641 struct messaging_context *msg_ctx;
642 struct notify_trigger_msg *msg;
644 bool covered_by_sys_notify;
647 static void notifyd_trigger_parser(TDB_DATA key, TDB_DATA data,
650 static bool notifyd_trigger(struct messaging_context *msg_ctx,
651 struct messaging_rec **prec,
654 struct notifyd_state *state = talloc_get_type_abort(
655 private_data, struct notifyd_state);
656 struct server_id my_id = messaging_server_id(msg_ctx);
657 struct messaging_rec *rec = *prec;
658 struct notifyd_trigger_state tstate;
660 const char *p, *next_p;
662 if (rec->buf.length < offsetof(struct notify_trigger_msg, path) + 1) {
663 DEBUG(1, ("message too short, ignoring: %u\n",
664 (unsigned)rec->buf.length));
667 if (rec->buf.data[rec->buf.length-1] != 0) {
668 DEBUG(1, ("%s: path not 0-terminated, ignoring\n", __func__));
672 tstate.msg_ctx = msg_ctx;
674 tstate.covered_by_sys_notify = (rec->src.vnn == my_id.vnn);
675 tstate.covered_by_sys_notify &= !server_id_equal(&rec->src, &my_id);
677 tstate.msg = (struct notify_trigger_msg *)rec->buf.data;
678 path = tstate.msg->path;
680 DEBUG(10, ("%s: Got trigger_msg action=%u, filter=%u, path=%s\n",
681 __func__, (unsigned)tstate.msg->action,
682 (unsigned)tstate.msg->filter, path));
684 if (path[0] != '/') {
685 DEBUG(1, ("%s: path %s does not start with /, ignoring\n",
690 for (p = strchr(path+1, '/'); p != NULL; p = next_p) {
691 ptrdiff_t path_len = p - path;
695 next_p = strchr(p+1, '/');
696 tstate.recursive = (next_p != NULL);
698 DEBUG(10, ("%s: Trying path %.*s\n", __func__,
699 (int)path_len, path));
701 key = (TDB_DATA) { .dptr = discard_const_p(uint8_t, path),
704 dbwrap_parse_record(state->entries, key,
705 notifyd_trigger_parser, &tstate);
707 if (state->peers == NULL) {
711 if (rec->src.vnn != my_id.vnn) {
715 for (i=0; i<state->num_peers; i++) {
716 if (state->peers[i]->db == NULL) {
718 * Inactive peer, did not get a db yet
722 dbwrap_parse_record(state->peers[i]->db, key,
723 notifyd_trigger_parser, &tstate);
730 static void notifyd_send_delete(struct messaging_context *msg_ctx,
732 struct notifyd_instance *instance);
734 static void notifyd_trigger_parser(TDB_DATA key, TDB_DATA data,
738 struct notifyd_trigger_state *tstate = private_data;
739 struct notify_event_msg msg = { .action = tstate->msg->action };
741 size_t path_len = key.dsize;
742 struct notifyd_instance *instances = NULL;
743 size_t num_instances = 0;
746 if (!notifyd_parse_entry(data.dptr, data.dsize, &instances,
748 DEBUG(1, ("%s: Could not parse notifyd_entry\n", __func__));
752 DEBUG(10, ("%s: Found %u instances for %.*s\n", __func__,
753 (unsigned)num_instances, (int)key.dsize,
756 iov[0].iov_base = &msg;
757 iov[0].iov_len = offsetof(struct notify_event_msg, path);
758 iov[1].iov_base = tstate->msg->path + path_len + 1;
759 iov[1].iov_len = strlen((char *)(iov[1].iov_base)) + 1;
761 for (i=0; i<num_instances; i++) {
762 struct notifyd_instance *instance = &instances[i];
763 struct server_id_buf idbuf;
767 if (tstate->covered_by_sys_notify) {
768 if (tstate->recursive) {
769 i_filter = instance->internal_subdir_filter;
771 i_filter = instance->internal_filter;
774 if (tstate->recursive) {
775 i_filter = instance->instance.subdir_filter;
777 i_filter = instance->instance.filter;
781 if ((i_filter & tstate->msg->filter) == 0) {
785 msg.private_data = instance->instance.private_data;
787 status = messaging_send_iov(
788 tstate->msg_ctx, instance->client,
789 MSG_PVFS_NOTIFY, iov, ARRAY_SIZE(iov), NULL, 0);
791 DEBUG(10, ("%s: messaging_send_iov to %s returned %s\n",
793 server_id_str_buf(instance->client, &idbuf),
796 if (NT_STATUS_EQUAL(status, NT_STATUS_OBJECT_NAME_NOT_FOUND) &&
797 procid_is_local(&instance->client)) {
799 * That process has died
801 notifyd_send_delete(tstate->msg_ctx, key, instance);
805 if (!NT_STATUS_IS_OK(status)) {
806 DEBUG(1, ("%s: messaging_send_iov returned %s\n",
807 __func__, nt_errstr(status)));
813 * Send a delete request to ourselves to properly discard a notify
814 * record for an smbd that has died.
817 static void notifyd_send_delete(struct messaging_context *msg_ctx,
819 struct notifyd_instance *instance)
821 struct notify_rec_change_msg msg = {
822 .instance.private_data = instance->instance.private_data
829 * Send a rec_change to ourselves to delete a dead entry
832 iov[0] = (struct iovec) {
834 .iov_len = offsetof(struct notify_rec_change_msg, path) };
835 iov[1] = (struct iovec) { .iov_base = key.dptr, .iov_len = key.dsize };
836 iov[2] = (struct iovec) { .iov_base = &nul, .iov_len = sizeof(nul) };
838 ret = messaging_send_iov_from(
839 msg_ctx, instance->client, messaging_server_id(msg_ctx),
840 MSG_SMB_NOTIFY_REC_CHANGE, iov, ARRAY_SIZE(iov), NULL, 0);
843 DEBUG(10, ("%s: messaging_send_iov_from returned %s\n",
844 __func__, strerror(ret)));
848 static bool notifyd_get_db(struct messaging_context *msg_ctx,
849 struct messaging_rec **prec,
852 struct notifyd_state *state = talloc_get_type_abort(
853 private_data, struct notifyd_state);
854 struct messaging_rec *rec = *prec;
855 struct server_id_buf id1, id2;
857 uint64_t rec_index = UINT64_MAX;
858 uint8_t index_buf[sizeof(uint64_t)];
863 dbsize = dbwrap_marshall(state->entries, NULL, 0);
865 buf = talloc_array(rec, uint8_t, dbsize);
867 DEBUG(1, ("%s: talloc_array(%ju) failed\n",
868 __func__, (uintmax_t)dbsize));
872 dbsize = dbwrap_marshall(state->entries, buf, dbsize);
874 if (dbsize != talloc_get_size(buf)) {
875 DEBUG(1, ("%s: dbsize changed: %ju->%ju\n", __func__,
876 (uintmax_t)talloc_get_size(buf),
882 if (state->log != NULL) {
883 rec_index = state->log->rec_index;
885 SBVAL(index_buf, 0, rec_index);
887 iov[0] = (struct iovec) { .iov_base = index_buf,
888 .iov_len = sizeof(index_buf) };
889 iov[1] = (struct iovec) { .iov_base = buf,
892 DEBUG(10, ("%s: Sending %ju bytes to %s->%s\n", __func__,
893 (uintmax_t)iov_buflen(iov, ARRAY_SIZE(iov)),
894 server_id_str_buf(messaging_server_id(msg_ctx), &id1),
895 server_id_str_buf(rec->src, &id2)));
897 status = messaging_send_iov(msg_ctx, rec->src, MSG_SMB_NOTIFY_DB,
898 iov, ARRAY_SIZE(iov), NULL, 0);
900 if (!NT_STATUS_IS_OK(status)) {
901 DEBUG(1, ("%s: messaging_send_iov failed: %s\n",
902 __func__, nt_errstr(status)));
908 static int notifyd_add_proxy_syswatches(struct db_record *rec,
911 static bool notifyd_got_db(struct messaging_context *msg_ctx,
912 struct messaging_rec **prec,
915 struct notifyd_state *state = talloc_get_type_abort(
916 private_data, struct notifyd_state);
917 struct messaging_rec *rec = *prec;
918 struct notifyd_peer *p = NULL;
919 struct server_id_buf idbuf;
924 for (i=0; i<state->num_peers; i++) {
925 if (server_id_equal(&rec->src, &state->peers[i]->pid)) {
932 DEBUG(10, ("%s: Did not find peer for db from %s\n",
933 __func__, server_id_str_buf(rec->src, &idbuf)));
937 if (rec->buf.length < 8) {
938 DEBUG(10, ("%s: Got short db length %u from %s\n", __func__,
939 (unsigned)rec->buf.length,
940 server_id_str_buf(rec->src, &idbuf)));
945 p->rec_index = BVAL(rec->buf.data, 0);
947 p->db = db_open_rbt(p);
949 DEBUG(10, ("%s: db_open_rbt failed\n", __func__));
954 status = dbwrap_unmarshall(p->db, rec->buf.data + 8,
955 rec->buf.length - 8);
956 if (!NT_STATUS_IS_OK(status)) {
957 DEBUG(10, ("%s: dbwrap_unmarshall returned %s for db %s\n",
958 __func__, nt_errstr(status),
959 server_id_str_buf(rec->src, &idbuf)));
964 dbwrap_traverse_read(p->db, notifyd_add_proxy_syswatches, state,
967 DEBUG(10, ("%s: Database from %s contained %d records\n", __func__,
968 server_id_str_buf(rec->src, &idbuf), count));
973 #ifdef CLUSTER_SUPPORT
975 static void notifyd_broadcast_reclog(struct ctdbd_connection *ctdbd_conn,
976 struct server_id src,
977 struct messaging_reclog *log)
979 enum ndr_err_code ndr_err;
980 uint8_t msghdr[MESSAGE_HDR_LENGTH];
989 DEBUG(10, ("%s: rec_index=%ju, num_recs=%u\n", __func__,
990 (uintmax_t)log->rec_index, (unsigned)log->num_recs));
992 message_hdr_put(msghdr, MSG_SMB_NOTIFY_REC_CHANGES, src,
993 (struct server_id) {0 });
994 iov[0] = (struct iovec) { .iov_base = msghdr,
995 .iov_len = sizeof(msghdr) };
997 ndr_err = ndr_push_struct_blob(
999 (ndr_push_flags_fn_t)ndr_push_messaging_reclog);
1000 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1001 DEBUG(1, ("%s: ndr_push_messaging_recs failed: %s\n",
1002 __func__, ndr_errstr(ndr_err)));
1005 iov[1] = (struct iovec) { .iov_base = blob.data,
1006 .iov_len = blob.length };
1008 ret = ctdbd_messaging_send_iov(
1009 ctdbd_conn, CTDB_BROADCAST_VNNMAP,
1010 CTDB_SRVID_SAMBA_NOTIFY_PROXY, iov, ARRAY_SIZE(iov));
1011 TALLOC_FREE(blob.data);
1013 DEBUG(1, ("%s: ctdbd_messaging_send failed: %s\n",
1014 __func__, strerror(ret)));
1018 log->rec_index += 1;
1022 TALLOC_FREE(log->recs);
1025 struct notifyd_broadcast_reclog_state {
1026 struct tevent_context *ev;
1027 struct ctdbd_connection *ctdbd_conn;
1028 struct server_id src;
1029 struct messaging_reclog *log;
1032 static void notifyd_broadcast_reclog_next(struct tevent_req *subreq);
1034 static struct tevent_req *notifyd_broadcast_reclog_send(
1035 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1036 struct ctdbd_connection *ctdbd_conn, struct server_id src,
1037 struct messaging_reclog *log)
1039 struct tevent_req *req, *subreq;
1040 struct notifyd_broadcast_reclog_state *state;
1042 req = tevent_req_create(mem_ctx, &state,
1043 struct notifyd_broadcast_reclog_state);
1048 state->ctdbd_conn = ctdbd_conn;
1052 subreq = tevent_wakeup_send(state, state->ev,
1053 timeval_current_ofs_msec(1000));
1054 if (tevent_req_nomem(subreq, req)) {
1055 return tevent_req_post(req, ev);
1057 tevent_req_set_callback(subreq, notifyd_broadcast_reclog_next, req);
1061 static void notifyd_broadcast_reclog_next(struct tevent_req *subreq)
1063 struct tevent_req *req = tevent_req_callback_data(
1064 subreq, struct tevent_req);
1065 struct notifyd_broadcast_reclog_state *state = tevent_req_data(
1066 req, struct notifyd_broadcast_reclog_state);
1069 ok = tevent_wakeup_recv(subreq);
1070 TALLOC_FREE(subreq);
1072 tevent_req_oom(req);
1076 notifyd_broadcast_reclog(state->ctdbd_conn, state->src, state->log);
1078 subreq = tevent_wakeup_send(state, state->ev,
1079 timeval_current_ofs_msec(1000));
1080 if (tevent_req_nomem(subreq, req)) {
1083 tevent_req_set_callback(subreq, notifyd_broadcast_reclog_next, req);
1086 static int notifyd_broadcast_reclog_recv(struct tevent_req *req)
1088 return tevent_req_simple_recv_unix(req);
1091 struct notifyd_clean_peers_state {
1092 struct tevent_context *ev;
1093 struct notifyd_state *notifyd;
1096 static void notifyd_clean_peers_next(struct tevent_req *subreq);
1098 static struct tevent_req *notifyd_clean_peers_send(
1099 TALLOC_CTX *mem_ctx, struct tevent_context *ev,
1100 struct notifyd_state *notifyd)
1102 struct tevent_req *req, *subreq;
1103 struct notifyd_clean_peers_state *state;
1105 req = tevent_req_create(mem_ctx, &state,
1106 struct notifyd_clean_peers_state);
1111 state->notifyd = notifyd;
1113 subreq = tevent_wakeup_send(state, state->ev,
1114 timeval_current_ofs_msec(30000));
1115 if (tevent_req_nomem(subreq, req)) {
1116 return tevent_req_post(req, ev);
1118 tevent_req_set_callback(subreq, notifyd_clean_peers_next, req);
1122 static void notifyd_clean_peers_next(struct tevent_req *subreq)
1124 struct tevent_req *req = tevent_req_callback_data(
1125 subreq, struct tevent_req);
1126 struct notifyd_clean_peers_state *state = tevent_req_data(
1127 req, struct notifyd_clean_peers_state);
1128 struct notifyd_state *notifyd = state->notifyd;
1131 time_t now = time(NULL);
1133 ok = tevent_wakeup_recv(subreq);
1134 TALLOC_FREE(subreq);
1136 tevent_req_oom(req);
1141 while (i < notifyd->num_peers) {
1142 struct notifyd_peer *p = notifyd->peers[i];
1144 if ((now - p->last_broadcast) > 60) {
1145 struct server_id_buf idbuf;
1148 * Haven't heard for more than 60 seconds. Call this
1152 DEBUG(10, ("%s: peer %s died\n", __func__,
1153 server_id_str_buf(p->pid, &idbuf)));
1155 * This implicitly decrements notifyd->num_peers
1163 subreq = tevent_wakeup_send(state, state->ev,
1164 timeval_current_ofs_msec(30000));
1165 if (tevent_req_nomem(subreq, req)) {
1168 tevent_req_set_callback(subreq, notifyd_clean_peers_next, req);
1171 static int notifyd_clean_peers_recv(struct tevent_req *req)
1173 return tevent_req_simple_recv_unix(req);
1178 static int notifyd_add_proxy_syswatches(struct db_record *rec,
1181 struct notifyd_state *state = talloc_get_type_abort(
1182 private_data, struct notifyd_state);
1183 struct db_context *db = dbwrap_record_get_db(rec);
1184 TDB_DATA key = dbwrap_record_get_key(rec);
1185 TDB_DATA value = dbwrap_record_get_value(rec);
1186 struct notifyd_instance *instances = NULL;
1187 size_t num_instances = 0;
1189 char path[key.dsize+1];
1192 memcpy(path, key.dptr, key.dsize);
1193 path[key.dsize] = '\0';
1195 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1198 DEBUG(1, ("%s: Could not parse notifyd entry for %s\n",
1203 for (i=0; i<num_instances; i++) {
1204 struct notifyd_instance *instance = &instances[i];
1205 uint32_t filter = instance->instance.filter;
1206 uint32_t subdir_filter = instance->instance.subdir_filter;
1209 ret = state->sys_notify_watch(
1210 db, state->sys_notify_ctx, path,
1211 &filter, &subdir_filter,
1212 notifyd_sys_callback, state->msg_ctx,
1213 &instance->sys_watch);
1215 DEBUG(1, ("%s: inotify_watch returned %s\n",
1216 __func__, strerror(errno)));
1223 #ifdef CLUSTER_SUPPORT
1225 static int notifyd_db_del_syswatches(struct db_record *rec, void *private_data)
1227 TDB_DATA key = dbwrap_record_get_key(rec);
1228 TDB_DATA value = dbwrap_record_get_value(rec);
1229 struct notifyd_instance *instances = NULL;
1230 size_t num_instances = 0;
1234 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1237 DEBUG(1, ("%s: Could not parse notifyd entry for %.*s\n",
1238 __func__, (int)key.dsize, (char *)key.dptr));
1241 for (i=0; i<num_instances; i++) {
1242 TALLOC_FREE(instances[i].sys_watch);
1247 static int notifyd_peer_destructor(struct notifyd_peer *p)
1249 struct notifyd_state *state = p->state;
1252 if (p->db != NULL) {
1253 dbwrap_traverse_read(p->db, notifyd_db_del_syswatches,
1257 for (i = 0; i<state->num_peers; i++) {
1258 if (p == state->peers[i]) {
1259 state->peers[i] = state->peers[state->num_peers-1];
1260 state->num_peers -= 1;
1267 static struct notifyd_peer *notifyd_peer_new(
1268 struct notifyd_state *state, struct server_id pid)
1270 struct notifyd_peer *p, **tmp;
1272 tmp = talloc_realloc(state, state->peers, struct notifyd_peer *,
1273 state->num_peers+1);
1279 p = talloc_zero(state->peers, struct notifyd_peer);
1286 state->peers[state->num_peers] = p;
1287 state->num_peers += 1;
1289 talloc_set_destructor(p, notifyd_peer_destructor);
1294 static void notifyd_apply_reclog(struct notifyd_peer *peer,
1295 const uint8_t *msg, size_t msglen)
1297 struct notifyd_state *state = peer->state;
1298 DATA_BLOB blob = { .data = discard_const_p(uint8_t, msg),
1300 struct server_id_buf idbuf;
1301 struct messaging_reclog *log;
1302 enum ndr_err_code ndr_err;
1305 if (peer->db == NULL) {
1312 log = talloc(peer, struct messaging_reclog);
1314 DEBUG(10, ("%s: talloc failed\n", __func__));
1318 ndr_err = ndr_pull_struct_blob_all(
1320 (ndr_pull_flags_fn_t)ndr_pull_messaging_reclog);
1321 if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
1322 DEBUG(10, ("%s: ndr_pull_messaging_reclog failed: %s\n",
1323 __func__, ndr_errstr(ndr_err)));
1327 DEBUG(10, ("%s: Got %u recs index %ju from %s\n", __func__,
1328 (unsigned)log->num_recs, (uintmax_t)log->rec_index,
1329 server_id_str_buf(peer->pid, &idbuf)));
1331 if (log->rec_index != peer->rec_index) {
1332 DEBUG(3, ("%s: Got rec index %ju from %s, expected %ju\n",
1333 __func__, (uintmax_t)log->rec_index,
1334 server_id_str_buf(peer->pid, &idbuf),
1335 (uintmax_t)peer->rec_index));
1339 for (i=0; i<log->num_recs; i++) {
1340 struct messaging_rec *r = log->recs[i];
1341 struct notify_rec_change_msg *chg;
1345 ok = notifyd_parse_rec_change(r->buf.data, r->buf.length,
1348 DEBUG(3, ("%s: notifyd_parse_rec_change failed\n",
1353 ok = notifyd_apply_rec_change(&r->src, chg->path, pathlen,
1354 &chg->instance, peer->db,
1355 state->sys_notify_watch,
1356 state->sys_notify_ctx,
1359 DEBUG(3, ("%s: notifyd_apply_rec_change failed\n",
1365 peer->rec_index += 1;
1366 peer->last_broadcast = time(NULL);
1372 DEBUG(10, ("%s: Dropping peer %s\n", __func__,
1373 server_id_str_buf(peer->pid, &idbuf)));
1378 * Receive messaging_reclog (log of MSG_SMB_NOTIFY_REC_CHANGE
1379 * messages) broadcasts by other notifyds. Several cases:
1381 * We don't know the source. This creates a new peer. Creating a peer
1382 * involves asking the peer for its full database. We assume ordered
1383 * messages, so the new database will arrive before the next broadcast
1386 * We know the source and the log index matches. We will apply the log
1387 * locally to our peer's db as if we had received it from a local
1390 * We know the source but the log index does not match. This means we
1391 * lost a message. We just drop the whole peer and wait for the next
1392 * broadcast, which will then trigger a fresh database pull.
1395 static int notifyd_snoop_broadcast(uint32_t src_vnn, uint32_t dst_vnn,
1397 const uint8_t *msg, size_t msglen,
1400 struct notifyd_state *state = talloc_get_type_abort(
1401 private_data, struct notifyd_state);
1402 struct server_id my_id = messaging_server_id(state->msg_ctx);
1403 struct notifyd_peer *p;
1406 struct server_id src, dst;
1407 struct server_id_buf idbuf;
1410 if (msglen < MESSAGE_HDR_LENGTH) {
1411 DEBUG(10, ("%s: Got short broadcast\n", __func__));
1414 message_hdr_get(&msg_type, &src, &dst, msg);
1416 if (msg_type != MSG_SMB_NOTIFY_REC_CHANGES) {
1417 DEBUG(10, ("%s Got message %u, ignoring\n", __func__,
1418 (unsigned)msg_type));
1421 if (server_id_equal(&src, &my_id)) {
1422 DEBUG(10, ("%s: Ignoring my own broadcast\n", __func__));
1426 DEBUG(10, ("%s: Got MSG_SMB_NOTIFY_REC_CHANGES from %s\n",
1427 __func__, server_id_str_buf(src, &idbuf)));
1429 for (i=0; i<state->num_peers; i++) {
1430 if (server_id_equal(&state->peers[i]->pid, &src)) {
1432 DEBUG(10, ("%s: Applying changes to peer %u\n",
1433 __func__, (unsigned)i));
1435 notifyd_apply_reclog(state->peers[i],
1436 msg + MESSAGE_HDR_LENGTH,
1437 msglen - MESSAGE_HDR_LENGTH);
1442 DEBUG(10, ("%s: Creating new peer for %s\n", __func__,
1443 server_id_str_buf(src, &idbuf)));
1445 p = notifyd_peer_new(state, src);
1447 DEBUG(10, ("%s: notifyd_peer_new failed\n", __func__));
1451 status = messaging_send_buf(state->msg_ctx, src, MSG_SMB_NOTIFY_GET_DB,
1453 if (!NT_STATUS_IS_OK(status)) {
1454 DEBUG(10, ("%s: messaging_send_buf failed: %s\n",
1455 __func__, nt_errstr(status)));
1464 struct notifyd_parse_db_state {
1465 bool (*fn)(const char *path,
1466 struct server_id server,
1467 const struct notify_instance *instance,
1468 void *private_data);
1472 static bool notifyd_parse_db_parser(TDB_DATA key, TDB_DATA value,
1475 struct notifyd_parse_db_state *state = private_data;
1476 char path[key.dsize+1];
1477 struct notifyd_instance *instances = NULL;
1478 size_t num_instances = 0;
1482 memcpy(path, key.dptr, key.dsize);
1483 path[key.dsize] = 0;
1485 ok = notifyd_parse_entry(value.dptr, value.dsize, &instances,
1488 DEBUG(10, ("%s: Could not parse entry for path %s\n",
1493 for (i=0; i<num_instances; i++) {
1494 ok = state->fn(path, instances[i].client,
1495 &instances[i].instance,
1496 state->private_data);
1505 int notifyd_parse_db(const uint8_t *buf, size_t buflen,
1506 uint64_t *log_index,
1507 bool (*fn)(const char *path,
1508 struct server_id server,
1509 const struct notify_instance *instance,
1510 void *private_data),
1513 struct notifyd_parse_db_state state = {
1514 .fn = fn, .private_data = private_data
1521 *log_index = BVAL(buf, 0);
1526 status = dbwrap_parse_marshall_buf(
1527 buf, buflen, notifyd_parse_db_parser, &state);
1528 if (!NT_STATUS_IS_OK(status)) {
1529 return map_errno_from_nt_status(status);