talloc_free(op);
s->ops.n_current = NULL;
- dreplsrv_notify_run_ops(s);
+ dreplsrv_run_pending_ops(s);
}
/*
op->uSN = uSN;
op->is_urgent = is_urgent;
op->replica_flags = replica_flags;
+ op->schedule_time = time(NULL);
DLIST_ADD_END(service->ops.notifies, op, struct dreplsrv_notify_operation *);
talloc_steal(service, op);
talloc_free(mem_ctx);
dreplsrv_run_pending_ops(service);
- dreplsrv_notify_run_ops(service);
}
op->fsmo_info = fsmo_info;
op->callback = callback;
op->cb_data = cb_data;
+ op->schedule_time = time(NULL);
DLIST_ADD_END(s->ops.pending, op, struct dreplsrv_out_operation *);
talloc_free(op);
s->ops.current = NULL;
dreplsrv_run_pending_ops(s);
- dreplsrv_notify_run_ops(s);
}
-void dreplsrv_run_pending_ops(struct dreplsrv_service *s)
+void dreplsrv_run_pull_ops(struct dreplsrv_service *s)
{
struct dreplsrv_out_operation *op;
time_t t;
NTTIME now;
struct tevent_req *subreq;
- if (s->ops.current || s->ops.n_current) {
+ if (s->ops.current) {
/* if there's still one running, we're done */
return;
}
dreplsrv_ridalloc_check_rid_pool(service);
dreplsrv_run_pending_ops(service);
- dreplsrv_notify_run_ops(service);
+}
+
+/*
+ run the next pending op, either a notify or a pull
+ */
+void dreplsrv_run_pending_ops(struct dreplsrv_service *s)
+{
+ if (!s->ops.notifies && !s->ops.pending) {
+ return;
+ }
+ if (!s->ops.notifies ||
+ (s->ops.pending &&
+ s->ops.notifies->schedule_time > s->ops.pending->schedule_time)) {
+ dreplsrv_run_pull_ops(s);
+ } else {
+ dreplsrv_notify_run_ops(s);
+ }
}
struct dreplsrv_out_operation {
struct dreplsrv_out_operation *prev, *next;
+ time_t schedule_time;
struct dreplsrv_service *service;
struct dreplsrv_notify_operation {
struct dreplsrv_notify_operation *prev, *next;
+ time_t schedule_time;
struct dreplsrv_service *service;
uint64_t uSN;