2 * Socket and pipe I/O utilities used in rsync.
4 * Copyright (C) 1996-2001 Andrew Tridgell
5 * Copyright (C) 1996 Paul Mackerras
6 * Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org>
7 * Copyright (C) 2003-2022 Wayne Davison
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, visit the http://fsf.org website.
23 /* Rsync provides its own multiplexing system, which is used to send
24 * stderr and stdout over a single socket.
26 * For historical reasons this is off during the start of the
27 * connection, but it's switched on quite early using
28 * io_start_multiplex_out() and io_start_multiplex_in(). */
34 /** If no timeout is specified then use a 60 second select timeout */
35 #define SELECT_TIMEOUT 60
38 extern size_t bwlimit_writemax;
39 extern int io_timeout;
42 extern int am_receiver;
43 extern int am_generator;
44 extern int msgs2stderr;
45 extern int inc_recurse;
50 extern int file_total;
51 extern int file_old_total;
53 extern int read_batch;
54 extern int compat_flags;
55 extern int protect_args;
56 extern int checksum_seed;
57 extern int daemon_connection;
58 extern int protocol_version;
59 extern int remove_source_files;
60 extern int preserve_hard_links;
61 extern BOOL extra_flist_sending_enabled;
62 extern BOOL flush_ok_after_signal;
63 extern struct stats stats;
64 extern time_t stop_at_utime;
65 extern struct file_list *cur_flist;
67 extern int filesfrom_convert;
68 extern iconv_t ic_send, ic_recv;
71 int csum_length = SHORT_SUM_LENGTH; /* initial value */
74 int forward_flist_data = 0;
75 BOOL flist_receiving_enabled = False;
77 /* Ignore an EOF error if non-zero. See whine_about_eof(). */
78 int kluge_around_eof = 0;
79 int got_kill_signal = -1; /* is set to 0 only after multiplexed I/O starts */
84 int64 total_data_read = 0;
85 int64 total_data_written = 0;
90 int out_fd; /* Both "out" and "msg" go to this fd. */
92 unsigned out_empty_len;
93 size_t raw_data_header_pos; /* in the out xbuf */
94 size_t raw_flushing_ends_before; /* in the out xbuf */
95 size_t raw_input_ends_before; /* in the in xbuf */
96 } iobuf = { .in_fd = -1, .out_fd = -1 };
98 static time_t last_io_in;
99 static time_t last_io_out;
101 static int write_batch_monitor_in = -1;
102 static int write_batch_monitor_out = -1;
104 static int ff_forward_fd = -1;
105 static int ff_reenable_multiplex = -1;
106 static char ff_lastchar = '\0';
107 static xbuf ff_xb = EMPTY_XBUF;
109 static xbuf iconv_buf = EMPTY_XBUF;
111 static int select_timeout = SELECT_TIMEOUT;
112 static int active_filecnt = 0;
113 static OFF_T active_bytecnt = 0;
114 static int first_message = 1;
116 static char int_byte_extra[64] = {
117 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (00 - 3F)/4 */
118 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (40 - 7F)/4 */
119 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* (80 - BF)/4 */
120 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, /* (C0 - FF)/4 */
123 /* Our I/O buffers are sized with no bits on in the lowest byte of the "size"
124 * (indeed, our rounding of sizes in 1024-byte units assures more than this).
125 * This allows the code that is storing bytes near the physical end of a
126 * circular buffer to temporarily reduce the buffer's size (in order to make
127 * some storing idioms easier), while also making it simple to restore the
128 * buffer's actual size when the buffer's "pos" wraps around to the start (we
129 * just round the buffer's size up again). */
131 #define IOBUF_WAS_REDUCED(siz) ((siz) & 0xFF)
132 #define IOBUF_RESTORE_SIZE(siz) (((siz) | 0xFF) + 1)
134 #define IN_MULTIPLEXED (iobuf.in_multiplexed != 0)
135 #define IN_MULTIPLEXED_AND_READY (iobuf.in_multiplexed > 0)
136 #define OUT_MULTIPLEXED (iobuf.out_empty_len != 0)
138 #define PIO_NEED_INPUT (1<<0) /* The *_NEED_* flags are mutually exclusive. */
139 #define PIO_NEED_OUTROOM (1<<1)
140 #define PIO_NEED_MSGROOM (1<<2)
142 #define PIO_CONSUME_INPUT (1<<4) /* Must becombined with PIO_NEED_INPUT. */
144 #define PIO_INPUT_AND_CONSUME (PIO_NEED_INPUT | PIO_CONSUME_INPUT)
145 #define PIO_NEED_FLAGS (PIO_NEED_INPUT | PIO_NEED_OUTROOM | PIO_NEED_MSGROOM)
147 #define REMOTE_OPTION_ERROR "rsync: on remote machine: -"
148 #define REMOTE_OPTION_ERROR2 ": unknown option"
150 #define FILESFROM_BUFLEN 2048
152 enum festatus { FES_SUCCESS, FES_REDO, FES_NO_SEND };
154 static flist_ndx_list redo_list, hlink_list;
156 static void read_a_msg(void);
157 static void drain_multiplex_messages(void);
158 static void sleep_for_bwlimit(int bytes_written);
160 static void check_timeout(BOOL allow_keepalive, int keepalive_flags)
164 /* On the receiving side, the generator is now the one that decides
165 * when a timeout has occurred. When it is sifting through a lot of
166 * files looking for work, it will be sending keep-alive messages to
167 * the sender, and even though the receiver won't be sending/receiving
168 * anything (not even keep-alive messages), the successful writes to
169 * the sender will keep things going. If the receiver is actively
170 * receiving data, it will ensure that the generator knows that it is
171 * not idle by sending the generator keep-alive messages (since the
172 * generator might be blocked trying to send checksums, it needs to
173 * know that the receiver is active). Thus, as long as one or the
174 * other is successfully doing work, the generator will not timeout. */
180 if (allow_keepalive) {
181 /* This may put data into iobuf.msg w/o flushing. */
182 maybe_send_keepalive(t, keepalive_flags);
191 chk = MAX(last_io_out, last_io_in);
192 if (t - chk >= io_timeout) {
195 rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n",
196 who_am_i(), (int)(t-chk));
197 exit_cleanup(RERR_TIMEOUT);
201 /* It's almost always an error to get an EOF when we're trying to read from the
202 * network, because the protocol is (for the most part) self-terminating.
204 * There is one case for the receiver when it is at the end of the transfer
205 * (hanging around reading any keep-alive packets that might come its way): if
206 * the sender dies before the generator's kill-signal comes through, we can end
207 * up here needing to loop until the kill-signal arrives. In this situation,
208 * kluge_around_eof will be < 0.
210 * There is another case for older protocol versions (< 24) where the module
211 * listing was not terminated, so we must ignore an EOF error in that case and
212 * exit. In this situation, kluge_around_eof will be > 0. */
213 static NORETURN void whine_about_eof(BOOL allow_kluge)
215 if (kluge_around_eof && allow_kluge) {
217 if (kluge_around_eof > 0)
219 /* If we're still here after 10 seconds, exit with an error. */
220 for (i = 10*1000/20; i--; )
224 rprintf(FERROR, RSYNC_NAME ": connection unexpectedly closed "
225 "(%s bytes received so far) [%s]\n",
226 big_num(stats.total_read), who_am_i());
228 exit_cleanup(RERR_STREAMIO);
231 /* Do a safe read, handling any needed looping and error handling.
232 * Returns the count of the bytes read, which will only be different
233 * from "len" if we encountered an EOF. This routine is not used on
234 * the socket except very early in the transfer. */
235 static size_t safe_read(int fd, char *buf, size_t len)
239 assert(fd != iobuf.in_fd);
250 tv.tv_sec = select_timeout;
253 cnt = select(fd+1, &r_fds, NULL, &e_fds, &tv);
255 if (cnt < 0 && errno == EBADF) {
256 rsyserr(FERROR, errno, "safe_read select failed");
257 exit_cleanup(RERR_FILEIO);
259 check_timeout(1, MSK_ALLOW_FLUSH);
263 /*if (FD_ISSET(fd, &e_fds))
264 rprintf(FINFO, "select exception on fd %d\n", fd); */
266 if (FD_ISSET(fd, &r_fds)) {
267 ssize_t n = read(fd, buf + got, len - got);
268 if (DEBUG_GTE(IO, 2)) {
269 rprintf(FINFO, "[%s] safe_read(%d)=%" SIZE_T_FMT_MOD "d\n",
270 who_am_i(), fd, (SIZE_T_FMT_CAST)n);
277 rsyserr(FERROR, errno, "safe_read failed to read %" SIZE_T_FMT_MOD "d bytes",
278 (SIZE_T_FMT_CAST)len);
279 exit_cleanup(RERR_STREAMIO);
281 if ((got += (size_t)n) == len)
289 static const char *what_fd_is(int fd)
293 if (fd == sock_f_out)
295 else if (fd == iobuf.out_fd)
297 else if (fd == batch_fd)
300 snprintf(buf, sizeof buf, "fd %d", fd);
305 /* Do a safe write, handling any needed looping and error handling.
306 * Returns only if everything was successfully written. This routine
307 * is not used on the socket except very early in the transfer. */
308 static void safe_write(int fd, const char *buf, size_t len)
312 assert(fd != iobuf.out_fd);
314 n = write(fd, buf, len);
315 if ((size_t)n == len)
318 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
320 rsyserr(FERROR, errno,
321 "safe_write failed to write %" SIZE_T_FMT_MOD "d bytes to %s",
322 (SIZE_T_FMT_CAST)len, what_fd_is(fd));
323 exit_cleanup(RERR_STREAMIO);
337 tv.tv_sec = select_timeout;
340 cnt = select(fd + 1, NULL, &w_fds, NULL, &tv);
342 if (cnt < 0 && errno == EBADF) {
343 rsyserr(FERROR, errno, "safe_write select failed on %s", what_fd_is(fd));
344 exit_cleanup(RERR_FILEIO);
347 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
351 if (FD_ISSET(fd, &w_fds)) {
352 n = write(fd, buf, len);
364 /* This is only called when files-from data is known to be available. We read
365 * a chunk of data and put it into the output buffer. */
366 static void forward_filesfrom_data(void)
370 len = read(ff_forward_fd, ff_xb.buf + ff_xb.len, ff_xb.size - ff_xb.len);
372 if (len == 0 || errno != EINTR) {
373 /* Send end-of-file marker */
375 write_buf(iobuf.out_fd, "\0\0", ff_lastchar ? 2 : 1);
377 if (ff_reenable_multiplex >= 0)
378 io_start_multiplex_out(ff_reenable_multiplex);
383 if (DEBUG_GTE(IO, 2)) {
384 rprintf(FINFO, "[%s] files-from read=%" SIZE_T_FMT_MOD "d\n",
385 who_am_i(), (SIZE_T_FMT_CAST)len);
393 char *s = ff_xb.buf + len;
394 /* Transform CR and/or LF into '\0' */
395 while (s-- > ff_xb.buf) {
396 if (*s == '\n' || *s == '\r')
405 /* Last buf ended with a '\0', so don't let this buf start with one. */
406 while (len && *s == '\0')
408 ff_xb.pos = s - ff_xb.buf;
412 if (filesfrom_convert && len) {
413 char *sob = ff_xb.buf + ff_xb.pos, *s = sob;
414 char *eob = sob + len;
415 int flags = ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT;
416 if (ff_lastchar == '\0')
418 /* Convert/send each null-terminated string separately, skipping empties. */
421 ff_xb.len = s - sob - 1;
422 add_implied_include(sob);
423 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
424 exit_cleanup(RERR_PROTOCOL); /* impossible? */
425 write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
426 while (s != eob && *s == '\0')
429 ff_xb.pos = sob - ff_xb.buf;
434 if ((ff_xb.len = s - sob) == 0)
437 /* Handle a partial string specially, saving any incomplete chars. */
438 flags &= ~ICB_INCLUDE_INCOMPLETE;
439 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) {
441 exit_cleanup(RERR_PROTOCOL); /* impossible? */
443 memmove(ff_xb.buf, ff_xb.buf + ff_xb.pos, ff_xb.len);
445 ff_lastchar = 'x'; /* Anything non-zero. */
451 char *f = ff_xb.buf + ff_xb.pos;
455 /* Eliminate any multi-'\0' runs. */
457 if (!(*t++ = *f++)) {
458 add_implied_include(cur);
460 while (f != eob && *f == '\0')
465 if ((len = t - ff_xb.buf) != 0) {
466 /* This will not circle back to perform_io() because we only get
467 * called when there is plenty of room in the output buffer. */
468 write_buf(iobuf.out_fd, ff_xb.buf, len);
473 void reduce_iobuf_size(xbuf *out, size_t new_size)
475 if (new_size < out->size) {
476 /* Avoid weird buffer interactions by only outputting this to stderr. */
477 if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) {
478 const char *name = out == &iobuf.out ? "iobuf.out"
479 : out == &iobuf.msg ? "iobuf.msg"
482 rprintf(FINFO, "[%s] reduced size of %s (-%d)\n",
483 who_am_i(), name, (int)(out->size - new_size));
486 out->size = new_size;
490 void restore_iobuf_size(xbuf *out)
492 if (IOBUF_WAS_REDUCED(out->size)) {
493 size_t new_size = IOBUF_RESTORE_SIZE(out->size);
494 /* Avoid weird buffer interactions by only outputting this to stderr. */
495 if (msgs2stderr == 1 && DEBUG_GTE(IO, 4)) {
496 const char *name = out == &iobuf.out ? "iobuf.out"
497 : out == &iobuf.msg ? "iobuf.msg"
500 rprintf(FINFO, "[%s] restored size of %s (+%d)\n",
501 who_am_i(), name, (int)(new_size - out->size));
504 out->size = new_size;
508 static void handle_kill_signal(BOOL flush_ok)
510 got_kill_signal = -1;
511 flush_ok_after_signal = flush_ok;
512 exit_cleanup(RERR_SIGNAL);
515 /* Perform buffered input and/or output until specified conditions are met.
516 * When given a "needed" read or write request, this returns without doing any
517 * I/O if the needed input bytes or write space is already available. Once I/O
518 * is needed, this will try to do whatever reading and/or writing is currently
519 * possible, up to the maximum buffer allowances, no matter if this is a read
520 * or write request. However, the I/O stops as soon as the required input
521 * bytes or output space is available. If this is not a read request, the
522 * routine may also do some advantageous reading of messages from a multiplexed
523 * input source (which ensures that we don't jam up with everyone in their
524 * "need to write" code and nobody reading the accumulated data that would make
527 * The iobuf.in, .out and .msg buffers are all circular. Callers need to be
528 * aware that some data copies will need to be split when the bytes wrap around
529 * from the end to the start. In order to help make writing into the output
530 * buffers easier for some operations (such as the use of SIVAL() into the
531 * buffer) a buffer may be temporarily shortened by a small amount, but the
532 * original size will be automatically restored when the .pos wraps to the
533 * start. See also the 3 raw_* iobuf vars that are used in the handling of
534 * MSG_DATA bytes as they are read-from/written-into the buffers.
536 * When writing, we flush data in the following priority order:
538 * 1. Finish writing any in-progress MSG_DATA sequence from iobuf.out.
540 * 2. Write out all the messages from the message buf (if iobuf.msg is active).
541 * Yes, this means that a PIO_NEED_OUTROOM call will completely flush any
542 * messages before getting to the iobuf.out flushing (except for rule 1).
544 * 3. Write out the raw data from iobuf.out, possibly filling in the multiplexed
545 * MSG_DATA header that was pre-allocated (when output is multiplexed).
547 * TODO: items for possible future work:
549 * - Make this routine able to read the generator-to-receiver batch flow?
551 * Unlike the old routines that this replaces, it is OK to read ahead as far as
552 * we can because the read_a_msg() routine now reads its bytes out of the input
553 * buffer. In the old days, only raw data was in the input buffer, and any
554 * unused raw data in the buf would prevent the reading of socket data. */
555 static char *perform_io(size_t needed, int flags)
557 fd_set r_fds, e_fds, w_fds;
560 size_t empty_buf_len = 0;
564 if (iobuf.in.len == 0 && iobuf.in.pos != 0) {
565 if (iobuf.raw_input_ends_before)
566 iobuf.raw_input_ends_before -= iobuf.in.pos;
570 switch (flags & PIO_NEED_FLAGS) {
572 /* We never resize the circular input buffer. */
573 if (iobuf.in.size < needed) {
574 rprintf(FERROR, "need to read %" SIZE_T_FMT_MOD "d bytes,"
575 " iobuf.in.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
576 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.in.size);
577 exit_cleanup(RERR_PROTOCOL);
580 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
581 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %sinput)\n",
582 who_am_i(), (SIZE_T_FMT_CAST)needed, flags & PIO_CONSUME_INPUT ? "consume&" : "");
586 case PIO_NEED_OUTROOM:
587 /* We never resize the circular output buffer. */
588 if (iobuf.out.size - iobuf.out_empty_len < needed) {
589 fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes,"
590 " iobuf.out.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
591 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)(iobuf.out.size - iobuf.out_empty_len));
592 exit_cleanup(RERR_PROTOCOL);
595 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
596 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d,"
597 " outroom) needs to flush %" SIZE_T_FMT_MOD "d\n",
598 who_am_i(), (SIZE_T_FMT_CAST)needed,
599 iobuf.out.len + needed > iobuf.out.size
600 ? (SIZE_T_FMT_CAST)(iobuf.out.len + needed - iobuf.out.size) : (SIZE_T_FMT_CAST)0);
604 case PIO_NEED_MSGROOM:
605 /* We never resize the circular message buffer. */
606 if (iobuf.msg.size < needed) {
607 fprintf(stderr, "need to write %" SIZE_T_FMT_MOD "d bytes,"
608 " iobuf.msg.buf is only %" SIZE_T_FMT_MOD "d bytes.\n",
609 (SIZE_T_FMT_CAST)needed, (SIZE_T_FMT_CAST)iobuf.msg.size);
610 exit_cleanup(RERR_PROTOCOL);
613 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
614 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d,"
615 " msgroom) needs to flush %" SIZE_T_FMT_MOD "d\n",
616 who_am_i(), (SIZE_T_FMT_CAST)needed,
617 iobuf.msg.len + needed > iobuf.msg.size
618 ? (SIZE_T_FMT_CAST)(iobuf.msg.len + needed - iobuf.msg.size) : (SIZE_T_FMT_CAST)0);
623 if (msgs2stderr == 1 && DEBUG_GTE(IO, 3)) {
624 rprintf(FINFO, "[%s] perform_io(%" SIZE_T_FMT_MOD "d, %d)\n",
625 who_am_i(), (SIZE_T_FMT_CAST)needed, flags);
630 exit_cleanup(RERR_UNSUPPORTED);
634 switch (flags & PIO_NEED_FLAGS) {
636 if (iobuf.in.len >= needed)
639 case PIO_NEED_OUTROOM:
640 /* Note that iobuf.out_empty_len doesn't factor into this check
641 * because iobuf.out.len already holds any needed header len. */
642 if (iobuf.out.len + needed <= iobuf.out.size)
645 case PIO_NEED_MSGROOM:
646 if (iobuf.msg.len + needed <= iobuf.msg.size)
655 if (iobuf.in_fd >= 0 && iobuf.in.size - iobuf.in.len) {
656 if (!read_batch || batch_fd >= 0) {
657 FD_SET(iobuf.in_fd, &r_fds);
658 FD_SET(iobuf.in_fd, &e_fds);
660 if (iobuf.in_fd > max_fd)
661 max_fd = iobuf.in_fd;
664 /* Only do more filesfrom processing if there is enough room in the out buffer. */
665 if (ff_forward_fd >= 0 && iobuf.out.size - iobuf.out.len > FILESFROM_BUFLEN*2) {
666 FD_SET(ff_forward_fd, &r_fds);
667 if (ff_forward_fd > max_fd)
668 max_fd = ff_forward_fd;
672 if (iobuf.out_fd >= 0) {
673 if (iobuf.raw_flushing_ends_before
674 || (!iobuf.msg.len && iobuf.out.len > iobuf.out_empty_len && !(flags & PIO_NEED_MSGROOM))) {
675 if (OUT_MULTIPLEXED && !iobuf.raw_flushing_ends_before) {
676 /* The iobuf.raw_flushing_ends_before value can point off the end
677 * of the iobuf.out buffer for a while, for easier subtracting. */
678 iobuf.raw_flushing_ends_before = iobuf.out.pos + iobuf.out.len;
680 SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0,
681 ((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4);
683 if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) {
684 rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n",
685 who_am_i(), (int)MSG_DATA, (SIZE_T_FMT_CAST)iobuf.out.len - 4);
688 /* reserve room for the next MSG_DATA header */
689 iobuf.raw_data_header_pos = iobuf.raw_flushing_ends_before;
690 if (iobuf.raw_data_header_pos >= iobuf.out.size)
691 iobuf.raw_data_header_pos -= iobuf.out.size;
692 else if (iobuf.raw_data_header_pos + 4 > iobuf.out.size) {
693 /* The 4-byte header won't fit at the end of the buffer,
694 * so we'll temporarily reduce the output buffer's size
695 * and put the header at the start of the buffer. */
696 reduce_iobuf_size(&iobuf.out, iobuf.raw_data_header_pos);
697 iobuf.raw_data_header_pos = 0;
699 /* Yes, it is possible for this to make len > size for a while. */
703 empty_buf_len = iobuf.out_empty_len;
705 } else if (iobuf.msg.len) {
711 FD_SET(iobuf.out_fd, &w_fds);
712 if (iobuf.out_fd > max_fd)
713 max_fd = iobuf.out_fd;
719 switch (flags & PIO_NEED_FLAGS) {
722 if (kluge_around_eof == 2)
724 if (iobuf.in_fd == -2)
725 whine_about_eof(True);
726 rprintf(FERROR, "error in perform_io: no fd for input.\n");
727 exit_cleanup(RERR_PROTOCOL);
728 case PIO_NEED_OUTROOM:
729 case PIO_NEED_MSGROOM:
731 drain_multiplex_messages();
732 if (iobuf.out_fd == -2)
733 whine_about_eof(True);
734 rprintf(FERROR, "error in perform_io: no fd for output.\n");
735 exit_cleanup(RERR_PROTOCOL);
737 /* No stated needs, so I guess this is OK. */
743 if (got_kill_signal > 0)
744 handle_kill_signal(True);
746 if (extra_flist_sending_enabled) {
747 if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY)
750 extra_flist_sending_enabled = False;
751 tv.tv_sec = select_timeout;
754 tv.tv_sec = select_timeout;
757 cnt = select(max_fd + 1, &r_fds, &w_fds, &e_fds, &tv);
760 if (cnt < 0 && errno == EBADF) {
762 exit_cleanup(RERR_SOCKETIO);
764 if (extra_flist_sending_enabled) {
765 extra_flist_sending_enabled = False;
766 send_extra_file_list(sock_f_out, -1);
767 extra_flist_sending_enabled = !flist_eof;
769 check_timeout((flags & PIO_NEED_INPUT) != 0, 0);
770 FD_ZERO(&r_fds); /* Just in case... */
774 if (iobuf.in_fd >= 0 && FD_ISSET(iobuf.in_fd, &r_fds)) {
775 size_t len, pos = iobuf.in.pos + iobuf.in.len;
777 if (pos >= iobuf.in.size) {
778 pos -= iobuf.in.size;
779 len = iobuf.in.size - iobuf.in.len;
781 len = iobuf.in.size - pos;
782 if ((n = read(iobuf.in_fd, iobuf.in.buf + pos, len)) <= 0) {
784 /* Signal that input has become invalid. */
785 if (!read_batch || batch_fd < 0 || am_generator)
790 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
793 /* Don't write errors on a dead socket. */
794 if (iobuf.in_fd == sock_f_in) {
797 rsyserr(FERROR_SOCKET, errno, "read error");
799 rsyserr(FERROR, errno, "read error");
800 exit_cleanup(RERR_SOCKETIO);
803 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
804 rprintf(FINFO, "[%s] recv=%" SIZE_T_FMT_MOD "d\n",
805 who_am_i(), (SIZE_T_FMT_CAST)n);
809 last_io_in = time(NULL);
810 if (io_timeout && flags & PIO_NEED_INPUT)
811 maybe_send_keepalive(last_io_in, 0);
813 stats.total_read += n;
818 if (stop_at_utime && time(NULL) >= stop_at_utime) {
819 rprintf(FERROR, "stopping at requested limit\n");
820 exit_cleanup(RERR_TIMEOUT);
823 if (out && FD_ISSET(iobuf.out_fd, &w_fds)) {
824 size_t len = iobuf.raw_flushing_ends_before ? iobuf.raw_flushing_ends_before - out->pos : out->len;
827 if (bwlimit_writemax && len > bwlimit_writemax)
828 len = bwlimit_writemax;
830 if (out->pos + len > out->size)
831 len = out->size - out->pos;
832 if ((n = write(iobuf.out_fd, out->buf + out->pos, len)) <= 0) {
833 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
836 /* Don't write errors on a dead socket. */
839 iobuf.out.len = iobuf.msg.len = iobuf.raw_flushing_ends_before = 0;
840 rsyserr(FERROR_SOCKET, errno, "write error");
841 drain_multiplex_messages();
842 exit_cleanup(RERR_SOCKETIO);
845 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
846 rprintf(FINFO, "[%s] %s sent=%" SIZE_T_FMT_MOD "d\n",
847 who_am_i(), out == &iobuf.out ? "out" : "msg", (SIZE_T_FMT_CAST)n);
851 last_io_out = time(NULL);
852 stats.total_written += n;
854 if (bwlimit_writemax)
855 sleep_for_bwlimit(n);
857 if ((out->pos += n) == out->size) {
858 if (iobuf.raw_flushing_ends_before)
859 iobuf.raw_flushing_ends_before -= out->size;
861 restore_iobuf_size(out);
862 } else if (out->pos == iobuf.raw_flushing_ends_before)
863 iobuf.raw_flushing_ends_before = 0;
864 if ((out->len -= n) == empty_buf_len) {
866 restore_iobuf_size(out);
868 iobuf.raw_data_header_pos = 0;
872 if (got_kill_signal > 0)
873 handle_kill_signal(True);
875 /* We need to help prevent deadlock by doing what reading
876 * we can whenever we are here trying to write. */
877 if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) {
878 while (!iobuf.raw_input_ends_before && iobuf.in.len > 512)
880 if (flist_receiving_enabled && iobuf.in.len > 512)
881 wait_for_receiver(); /* generator only */
884 if (ff_forward_fd >= 0 && FD_ISSET(ff_forward_fd, &r_fds)) {
885 /* This can potentially flush all output and enable
886 * multiplexed output, so keep this last in the loop
887 * and be sure to not cache anything that would break
889 forward_filesfrom_data();
894 if (got_kill_signal > 0)
895 handle_kill_signal(True);
897 data = iobuf.in.buf + iobuf.in.pos;
899 if (flags & PIO_CONSUME_INPUT) {
900 iobuf.in.len -= needed;
901 iobuf.in.pos += needed;
902 if (iobuf.in.pos == iobuf.raw_input_ends_before)
903 iobuf.raw_input_ends_before = 0;
904 if (iobuf.in.pos >= iobuf.in.size) {
905 iobuf.in.pos -= iobuf.in.size;
906 if (iobuf.raw_input_ends_before)
907 iobuf.raw_input_ends_before -= iobuf.in.size;
914 static void raw_read_buf(char *buf, size_t len)
916 size_t pos = iobuf.in.pos;
917 char *data = perform_io(len, PIO_INPUT_AND_CONSUME);
918 if (iobuf.in.pos <= pos && len) {
919 size_t siz = len - iobuf.in.pos;
920 memcpy(buf, data, siz);
921 memcpy(buf + siz, iobuf.in.buf, iobuf.in.pos);
923 memcpy(buf, data, len);
926 static int32 raw_read_int(void)
929 if (iobuf.in.size - iobuf.in.pos >= 4)
930 data = perform_io(4, PIO_INPUT_AND_CONSUME);
932 raw_read_buf(data = buf, 4);
933 return IVAL(data, 0);
936 void noop_io_until_death(void)
940 if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
943 /* If we're talking to a daemon over a socket, don't short-circuit this logic */
944 if (msgs2stderr && daemon_connection >= 0)
947 kluge_around_eof = 2;
948 /* Setting an I/O timeout ensures that if something inexplicably weird
949 * happens, we won't hang around forever. */
954 read_buf(iobuf.in_fd, buf, sizeof buf);
957 /* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */
958 int send_msg(enum msgcode code, const char *buf, size_t len, int convert)
962 BOOL want_debug = DEBUG_GTE(IO, 1) && convert >= 0 && (msgs2stderr == 1 || code != MSG_INFO);
964 if (!OUT_MULTIPLEXED)
968 rprintf(FINFO, "[%s] send_msg(%d, %" SIZE_T_FMT_MOD "d)\n",
969 who_am_i(), (int)code, (SIZE_T_FMT_CAST)len);
972 /* When checking for enough free space for this message, we need to
973 * make sure that there is space for the 4-byte header, plus we'll
974 * assume that we may waste up to 3 bytes (if the header doesn't fit
975 * at the physical end of the buffer). */
977 if (convert > 0 && ic_send == (iconv_t)-1)
980 /* Ensuring double-size room leaves space for maximal conversion expansion. */
981 needed = len*2 + 4 + 3;
984 needed = len + 4 + 3;
985 if (iobuf.msg.len + needed > iobuf.msg.size) {
987 perform_io(needed, PIO_NEED_MSGROOM);
988 else { /* We sometimes allow the iobuf.msg size to increase to avoid a deadlock. */
989 size_t old_size = iobuf.msg.size;
990 restore_iobuf_size(&iobuf.msg);
991 realloc_xbuf(&iobuf.msg, iobuf.msg.size * 2);
992 if (iobuf.msg.pos + iobuf.msg.len > old_size)
993 memcpy(iobuf.msg.buf + old_size, iobuf.msg.buf, iobuf.msg.pos + iobuf.msg.len - old_size);
997 pos = iobuf.msg.pos + iobuf.msg.len; /* Must be set after any flushing. */
998 if (pos >= iobuf.msg.size)
999 pos -= iobuf.msg.size;
1000 else if (pos + 4 > iobuf.msg.size) {
1001 /* The 4-byte header won't fit at the end of the buffer,
1002 * so we'll temporarily reduce the message buffer's size
1003 * and put the header at the start of the buffer. */
1004 reduce_iobuf_size(&iobuf.msg, pos);
1007 hdr = iobuf.msg.buf + pos;
1009 iobuf.msg.len += 4; /* Allocate room for the coming header bytes. */
1015 INIT_XBUF(inbuf, (char*)buf, len, (size_t)-1);
1017 len = iobuf.msg.len;
1018 iconvbufs(ic_send, &inbuf, &iobuf.msg,
1019 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT | ICB_INIT);
1020 if (inbuf.len > 0) {
1021 rprintf(FERROR, "overflowed iobuf.msg buffer in send_msg");
1022 exit_cleanup(RERR_UNSUPPORTED);
1024 len = iobuf.msg.len - len;
1030 if ((pos += 4) == iobuf.msg.size)
1033 /* Handle a split copy if we wrap around the end of the circular buffer. */
1034 if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) {
1035 memcpy(iobuf.msg.buf + pos, buf, siz);
1036 memcpy(iobuf.msg.buf, buf + siz, len - siz);
1038 memcpy(iobuf.msg.buf + pos, buf, len);
1040 iobuf.msg.len += len;
1043 SIVAL(hdr, 0, ((MPLEX_BASE + (int)code)<<24) + len);
1045 if (want_debug && convert > 0) {
1046 rprintf(FINFO, "[%s] converted msg len=%" SIZE_T_FMT_MOD "d\n",
1047 who_am_i(), (SIZE_T_FMT_CAST)len);
1053 void send_msg_int(enum msgcode code, int num)
1057 if (DEBUG_GTE(IO, 1))
1058 rprintf(FINFO, "[%s] send_msg_int(%d, %d)\n", who_am_i(), (int)code, num);
1060 SIVAL(numbuf, 0, num);
1061 send_msg(code, numbuf, 4, -1);
1064 static void got_flist_entry_status(enum festatus status, int ndx)
1066 struct file_list *flist = flist_for_ndx(ndx, "got_flist_entry_status");
1068 if (remove_source_files) {
1070 active_bytecnt -= F_LENGTH(flist->files[ndx - flist->ndx_start]);
1074 flist->in_progress--;
1078 if (remove_source_files)
1079 send_msg_int(MSG_SUCCESS, ndx);
1082 #ifdef SUPPORT_HARD_LINKS
1083 if (preserve_hard_links) {
1084 struct file_struct *file = flist->files[ndx - flist->ndx_start];
1085 if (F_IS_HLINKED(file)) {
1086 if (status == FES_NO_SEND)
1087 flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */
1088 flist_ndx_push(&hlink_list, ndx);
1090 flist->in_progress++;
1098 flist->in_progress++;
1103 flist_ndx_push(&redo_list, ndx);
1108 /* Note the fds used for the main socket (which might really be a pipe
1109 * for a local transfer, but we can ignore that). */
1110 void io_set_sock_fds(int f_in, int f_out)
1116 void set_io_timeout(int secs)
1119 allowed_lull = (io_timeout + 1) / 2;
1121 if (!io_timeout || allowed_lull > SELECT_TIMEOUT)
1122 select_timeout = SELECT_TIMEOUT;
1124 select_timeout = allowed_lull;
1130 static void check_for_d_option_error(const char *msg)
1132 static char rsync263_opts[] = "BCDHIKLPRSTWabceghlnopqrtuvxz";
1137 || strncmp(msg, REMOTE_OPTION_ERROR, sizeof REMOTE_OPTION_ERROR - 1) != 0)
1140 msg += sizeof REMOTE_OPTION_ERROR - 1;
1141 if (*msg == '-' || (colon = strchr(msg, ':')) == NULL
1142 || strncmp(colon, REMOTE_OPTION_ERROR2, sizeof REMOTE_OPTION_ERROR2 - 1) != 0)
1145 for ( ; *msg != ':'; msg++) {
1148 else if (*msg == 'e')
1150 else if (strchr(rsync263_opts, *msg) == NULL)
1155 rprintf(FWARNING, "*** Try using \"--old-d\" if remote rsync is <= 2.6.3 ***\n");
1159 /* This is used by the generator to limit how many file transfers can
1160 * be active at once when --remove-source-files is specified. Without
1161 * this, sender-side deletions were mostly happening at the end. */
1162 void increment_active_files(int ndx, int itemizing, enum logcode code)
1165 /* TODO: tune these limits? */
1166 int limit = active_bytecnt >= 128*1024 ? 10 : 50;
1167 if (active_filecnt < limit)
1169 check_for_finished_files(itemizing, code, 0);
1170 if (active_filecnt < limit)
1172 wait_for_receiver();
1176 active_bytecnt += F_LENGTH(cur_flist->files[ndx - cur_flist->ndx_start]);
1179 int get_redo_num(void)
1181 return flist_ndx_pop(&redo_list);
1184 int get_hlink_num(void)
1186 return flist_ndx_pop(&hlink_list);
1189 /* When we're the receiver and we have a local --files-from list of names
1190 * that needs to be sent over the socket to the sender, we have to do two
1191 * things at the same time: send the sender a list of what files we're
1192 * processing and read the incoming file+info list from the sender. We do
1193 * this by making recv_file_list() call forward_filesfrom_data(), which
1194 * will ensure that we forward data to the sender until we get some data
1195 * for recv_file_list() to use. */
1196 void start_filesfrom_forwarding(int fd)
1198 if (protocol_version < 31 && OUT_MULTIPLEXED) {
1199 /* Older protocols send the files-from data w/o packaging
1200 * it in multiplexed I/O packets, so temporarily switch
1201 * to buffered I/O to match this behavior. */
1202 iobuf.msg.pos = iobuf.msg.len = 0; /* Be extra sure no messages go out. */
1203 ff_reenable_multiplex = io_end_multiplex_out(MPLX_TO_BUFFERED);
1207 alloc_xbuf(&ff_xb, FILESFROM_BUFLEN);
1210 /* Read a line into the "buf" buffer. */
1211 int read_line(int fd, char *buf, size_t bufsiz, int flags)
1216 if (flags & RL_CONVERT && iconv_buf.size < bufsiz)
1217 realloc_xbuf(&iconv_buf, ROUND_UP_1024(bufsiz) + 1024);
1222 s = flags & RL_CONVERT ? iconv_buf.buf : buf;
1226 eob = s + bufsiz - 1;
1228 /* We avoid read_byte() for files because files can return an EOF. */
1229 if (fd == iobuf.in_fd)
1231 else if (safe_read(fd, &ch, 1) == 0)
1233 if (flags & RL_EOL_NULLS ? ch == '\0' : (ch == '\r' || ch == '\n')) {
1234 /* Skip empty lines if dumping comments. */
1235 if (flags & RL_DUMP_COMMENTS && s == buf)
1244 if (flags & RL_DUMP_COMMENTS && (*buf == '#' || *buf == ';'))
1248 if (flags & RL_CONVERT) {
1250 INIT_XBUF(outbuf, buf, 0, bufsiz);
1252 iconv_buf.len = s - iconv_buf.buf;
1253 iconvbufs(ic_recv, &iconv_buf, &outbuf,
1254 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_INIT);
1255 outbuf.buf[outbuf.len] = '\0';
1263 void read_args(int f_in, char *mod_name, char *buf, size_t bufsiz, int rl_nulls,
1264 char ***argv_p, int *argc_p, char **request_p)
1266 int maxargs = MAX_ARGS;
1267 int dot_pos = 0, argc = 0, request_len = 0;
1269 int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0);
1272 rl_flags |= (protect_args && ic_recv != (iconv_t)-1 ? RL_CONVERT : 0);
1275 argv = new_array(char *, maxargs);
1276 if (mod_name && !protect_args)
1277 argv[argc++] = "rsyncd";
1283 if (read_line(f_in, buf, bufsiz, rl_flags) == 0)
1286 if (argc == maxargs-1) {
1287 maxargs += MAX_ARGS;
1288 argv = realloc_array(argv, char *, maxargs);
1292 if (request_p && request_len < 1024) {
1293 int len = strlen(buf);
1295 request_p[0][request_len++] = ' ';
1296 *request_p = realloc_array(*request_p, char, request_len + len + 1);
1297 memcpy(*request_p + request_len, buf, len + 1);
1301 glob_expand_module(mod_name, buf, &argv, &argc, &maxargs);
1303 glob_expand(buf, &argv, &argc, &maxargs);
1307 if (*p == '.' && p[1] == '\0')
1313 glob_expand(NULL, NULL, NULL, NULL);
1319 BOOL io_start_buffering_out(int f_out)
1321 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
1322 rprintf(FINFO, "[%s] io_start_buffering_out(%d)\n", who_am_i(), f_out);
1324 if (iobuf.out.buf) {
1325 if (iobuf.out_fd == -1)
1326 iobuf.out_fd = f_out;
1328 assert(f_out == iobuf.out_fd);
1332 alloc_xbuf(&iobuf.out, ROUND_UP_1024(IO_BUFFER_SIZE * 2));
1333 iobuf.out_fd = f_out;
1338 BOOL io_start_buffering_in(int f_in)
1340 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
1341 rprintf(FINFO, "[%s] io_start_buffering_in(%d)\n", who_am_i(), f_in);
1344 if (iobuf.in_fd == -1)
1347 assert(f_in == iobuf.in_fd);
1351 alloc_xbuf(&iobuf.in, ROUND_UP_1024(IO_BUFFER_SIZE));
1357 void io_end_buffering_in(BOOL free_buffers)
1359 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
1360 rprintf(FINFO, "[%s] io_end_buffering_in(IOBUF_%s_BUFS)\n",
1361 who_am_i(), free_buffers ? "FREE" : "KEEP");
1365 free_xbuf(&iobuf.in);
1367 iobuf.in.pos = iobuf.in.len = 0;
1372 void io_end_buffering_out(BOOL free_buffers)
1374 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2)) {
1375 rprintf(FINFO, "[%s] io_end_buffering_out(IOBUF_%s_BUFS)\n",
1376 who_am_i(), free_buffers ? "FREE" : "KEEP");
1379 io_flush(FULL_FLUSH);
1382 free_xbuf(&iobuf.out);
1383 free_xbuf(&iobuf.msg);
1389 void maybe_flush_socket(int important)
1391 if (flist_eof && iobuf.out.buf && iobuf.out.len > iobuf.out_empty_len
1392 && (important || time(NULL) - last_io_out >= 5))
1393 io_flush(NORMAL_FLUSH);
1396 /* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a
1397 * raw-data-based keep-alive (protocol 29), both of which implied forwarding of
1398 * the message through the sender. Since the new timeout method does not need
1399 * any forwarding, we just send an empty MSG_DATA message, which works with all
1400 * rsync versions. This avoids any message forwarding, and leaves the raw-data
1401 * stream alone (since we can never be quite sure if that stream is in the
1402 * right state for a keep-alive message). */
1403 void maybe_send_keepalive(time_t now, int flags)
1405 if (flags & MSK_ACTIVE_RECEIVER)
1406 last_io_in = now; /* Fudge things when we're working hard on the files. */
1408 /* Early in the transfer (before the receiver forks) the receiving side doesn't
1409 * care if it hasn't sent data in a while as long as it is receiving data (in
1410 * fact, a pre-3.1.0 rsync would die if we tried to send it a keep alive during
1411 * this time). So, if we're an early-receiving proc, just return and let the
1412 * incoming data determine if we timeout. */
1413 if (!am_sender && !am_receiver && !am_generator)
1416 if (now - last_io_out >= allowed_lull) {
1417 /* The receiver is special: it only sends keep-alive messages if it is
1418 * actively receiving data. Otherwise, it lets the generator timeout. */
1419 if (am_receiver && now - last_io_in >= io_timeout)
1422 if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len)
1423 send_msg(MSG_DATA, "", 0, 0);
1424 if (!(flags & MSK_ALLOW_FLUSH)) {
1425 /* Let the caller worry about writing out the data. */
1426 } else if (iobuf.msg.len)
1427 perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM);
1428 else if (iobuf.out.len > iobuf.out_empty_len)
1429 io_flush(NORMAL_FLUSH);
1433 void start_flist_forward(int ndx)
1435 write_int(iobuf.out_fd, ndx);
1436 forward_flist_data = 1;
1439 void stop_flist_forward(void)
1441 forward_flist_data = 0;
1444 /* Read a message from a multiplexed source. */
1445 static void read_a_msg(void)
1447 char data[BIGPATHBUFLEN];
1451 /* This ensures that perform_io() does not try to do any message reading
1452 * until we've read all of the data for this message. We should also
1453 * try to avoid calling things that will cause data to be written via
1454 * perform_io() prior to this being reset to 1. */
1455 iobuf.in_multiplexed = -1;
1457 tag = raw_read_int();
1459 msg_bytes = tag & 0xFFFFFF;
1460 tag = (tag >> 24) - MPLEX_BASE;
1462 if (msgs2stderr == 1 && DEBUG_GTE(IO, 1)) {
1463 rprintf(FINFO, "[%s] got msg=%d, len=%" SIZE_T_FMT_MOD "d\n",
1464 who_am_i(), (int)tag, (SIZE_T_FMT_CAST)msg_bytes);
1469 assert(iobuf.raw_input_ends_before == 0);
1470 /* Though this does not yet read the data, we do mark where in
1471 * the buffer the msg data will end once it is read. It is
1472 * possible that this points off the end of the buffer, in
1473 * which case the gradual reading of the input stream will
1474 * cause this value to wrap around and eventually become real. */
1476 iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
1477 iobuf.in_multiplexed = 1;
1480 if (msg_bytes != sizeof stats.total_read || !am_generator)
1482 raw_read_buf((char*)&stats.total_read, sizeof stats.total_read);
1483 iobuf.in_multiplexed = 1;
1486 if (msg_bytes != 4 || !am_generator)
1488 val = raw_read_int();
1489 iobuf.in_multiplexed = 1;
1490 got_flist_entry_status(FES_REDO, val);
1495 val = raw_read_int();
1496 iobuf.in_multiplexed = 1;
1499 send_msg_int(MSG_IO_ERROR, val);
1501 case MSG_IO_TIMEOUT:
1502 if (msg_bytes != 4 || am_server || am_generator)
1504 val = raw_read_int();
1505 iobuf.in_multiplexed = 1;
1506 if (!io_timeout || io_timeout > val) {
1507 if (INFO_GTE(MISC, 2))
1508 rprintf(FINFO, "Setting --timeout=%d to match server\n", val);
1509 set_io_timeout(val);
1513 /* Support protocol-30 keep-alive method. */
1516 iobuf.in_multiplexed = 1;
1518 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
1521 if (msg_bytes >= sizeof data)
1524 raw_read_buf(data, msg_bytes);
1525 iobuf.in_multiplexed = 1;
1526 send_msg(MSG_DELETED, data, msg_bytes, 1);
1530 if (ic_recv != (iconv_t)-1) {
1534 int flags = ICB_INCLUDE_BAD | ICB_INIT;
1536 INIT_CONST_XBUF(outbuf, data);
1537 INIT_XBUF(inbuf, ibuf, 0, (size_t)-1);
1540 size_t len = msg_bytes > sizeof ibuf - inbuf.len ? sizeof ibuf - inbuf.len : msg_bytes;
1541 raw_read_buf(ibuf + inbuf.len, len);
1544 if (!(msg_bytes -= len) && !ibuf[inbuf.len-1])
1545 inbuf.len--, add_null = 1;
1546 if (iconvbufs(ic_send, &inbuf, &outbuf, flags) < 0) {
1549 /* Buffer ended with an incomplete char, so move the
1550 * bytes to the start of the buffer and continue. */
1551 memmove(ibuf, ibuf + inbuf.pos, inbuf.len);
1556 if (outbuf.len == outbuf.size)
1558 outbuf.buf[outbuf.len++] = '\0';
1560 msg_bytes = outbuf.len;
1563 raw_read_buf(data, msg_bytes);
1564 iobuf.in_multiplexed = 1;
1565 /* A directory name was sent with the trailing null */
1566 if (msg_bytes > 0 && !data[msg_bytes-1])
1567 log_delete(data, S_IFDIR);
1569 data[msg_bytes] = '\0';
1570 log_delete(data, S_IFREG);
1574 if (msg_bytes != 4) {
1576 rprintf(FERROR, "invalid multi-message %d:%lu [%s%s]\n",
1577 tag, (unsigned long)msg_bytes, who_am_i(),
1578 inc_recurse ? "/inc" : "");
1579 exit_cleanup(RERR_STREAMIO);
1581 val = raw_read_int();
1582 iobuf.in_multiplexed = 1;
1584 got_flist_entry_status(FES_SUCCESS, val);
1586 successful_send(val);
1591 val = raw_read_int();
1592 iobuf.in_multiplexed = 1;
1594 got_flist_entry_status(FES_NO_SEND, val);
1596 send_msg_int(MSG_NO_SEND, val);
1598 case MSG_ERROR_SOCKET:
1599 case MSG_ERROR_UTF8:
1604 if (tag == MSG_ERROR_SOCKET)
1609 case MSG_ERROR_XFER:
1611 if (msg_bytes >= sizeof data) {
1614 "multiplexing overflow %d:%lu [%s%s]\n",
1615 tag, (unsigned long)msg_bytes, who_am_i(),
1616 inc_recurse ? "/inc" : "");
1617 exit_cleanup(RERR_STREAMIO);
1619 raw_read_buf(data, msg_bytes);
1620 /* We don't set in_multiplexed value back to 1 before writing this message
1621 * because the write might loop back and read yet another message, over and
1622 * over again, while waiting for room to put the message in the msg buffer. */
1623 rwrite((enum logcode)tag, data, msg_bytes, !am_generator);
1624 iobuf.in_multiplexed = 1;
1625 if (first_message) {
1626 if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) {
1627 data[msg_bytes] = '\0';
1628 check_for_d_option_error(data);
1633 case MSG_ERROR_EXIT:
1635 val = raw_read_int();
1636 else if (msg_bytes == 0)
1640 iobuf.in_multiplexed = 1;
1641 if (DEBUG_GTE(EXIT, 3)) {
1642 rprintf(FINFO, "[%s] got MSG_ERROR_EXIT with %" SIZE_T_FMT_MOD "d bytes\n",
1643 who_am_i(), (SIZE_T_FMT_CAST)msg_bytes);
1645 if (msg_bytes == 0) {
1646 if (!am_sender && !am_generator) {
1647 if (DEBUG_GTE(EXIT, 3)) {
1648 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1651 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1652 io_flush(FULL_FLUSH);
1654 } else if (protocol_version >= 31) {
1655 if (am_generator || am_receiver) {
1656 if (DEBUG_GTE(EXIT, 3)) {
1657 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
1660 send_msg_int(MSG_ERROR_EXIT, val);
1662 if (DEBUG_GTE(EXIT, 3)) {
1663 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1666 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1669 /* Send a negative linenum so that we don't end up
1670 * with a duplicate exit message. */
1671 _exit_cleanup(val, __FILE__, 0 - __LINE__);
1673 rprintf(FERROR, "unexpected tag %d [%s%s]\n",
1674 tag, who_am_i(), inc_recurse ? "/inc" : "");
1675 exit_cleanup(RERR_STREAMIO);
1678 assert(iobuf.in_multiplexed > 0);
1681 static void drain_multiplex_messages(void)
1683 while (IN_MULTIPLEXED_AND_READY && iobuf.in.len) {
1684 if (iobuf.raw_input_ends_before) {
1685 size_t raw_len = iobuf.raw_input_ends_before - iobuf.in.pos;
1686 iobuf.raw_input_ends_before = 0;
1687 if (raw_len >= iobuf.in.len) {
1691 iobuf.in.len -= raw_len;
1692 if ((iobuf.in.pos += raw_len) >= iobuf.in.size)
1693 iobuf.in.pos -= iobuf.in.size;
1699 void wait_for_receiver(void)
1701 if (!iobuf.raw_input_ends_before)
1704 if (iobuf.raw_input_ends_before) {
1705 int ndx = read_int(iobuf.in_fd);
1710 if (DEBUG_GTE(FLIST, 3))
1711 rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
1717 exit_cleanup(RERR_STREAMIO);
1720 struct file_list *flist;
1721 flist_receiving_enabled = False;
1722 if (DEBUG_GTE(FLIST, 2)) {
1723 rprintf(FINFO, "[%s] receiving flist for dir %d\n",
1726 flist = recv_file_list(iobuf.in_fd, ndx);
1727 flist->parent_ndx = ndx;
1728 #ifdef SUPPORT_HARD_LINKS
1729 if (preserve_hard_links)
1730 match_hard_links(flist);
1732 flist_receiving_enabled = True;
1737 unsigned short read_shortint(int f)
1741 return (UVAL(b, 1) << 8) + UVAL(b, 0);
1744 int32 read_int(int f)
1751 #if SIZEOF_INT32 > 4
1752 if (num & (int32)0x80000000)
1753 num |= ~(int32)0xffffffff;
1758 int32 read_varint(int f)
1769 extra = int_byte_extra[ch / 4];
1771 uchar bit = ((uchar)1<<(8-extra));
1772 if (extra >= (int)sizeof u.b) {
1773 rprintf(FERROR, "Overflow in read_varint()\n");
1774 exit_cleanup(RERR_STREAMIO);
1776 read_buf(f, u.b, extra);
1777 u.b[extra] = ch & (bit-1);
1780 #if CAREFUL_ALIGNMENT
1783 #if SIZEOF_INT32 > 4
1784 if (u.x & (int32)0x80000000)
1785 u.x |= ~(int32)0xffffffff;
1790 int64 read_varlong(int f, uchar min_bytes)
1799 #if SIZEOF_INT64 < 8
1804 read_buf(f, b2, min_bytes);
1805 memcpy(u.b, b2+1, min_bytes-1);
1806 extra = int_byte_extra[CVAL(b2, 0) / 4];
1808 uchar bit = ((uchar)1<<(8-extra));
1809 if (min_bytes + extra > (int)sizeof u.b) {
1810 rprintf(FERROR, "Overflow in read_varlong()\n");
1811 exit_cleanup(RERR_STREAMIO);
1813 read_buf(f, u.b + min_bytes - 1, extra);
1814 u.b[min_bytes + extra - 1] = CVAL(b2, 0) & (bit-1);
1815 #if SIZEOF_INT64 < 8
1816 if (min_bytes + extra > 5 || u.b[4] || CVAL(u.b,3) & 0x80) {
1817 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1818 exit_cleanup(RERR_UNSUPPORTED);
1822 u.b[min_bytes + extra - 1] = CVAL(b2, 0);
1823 #if SIZEOF_INT64 < 8
1825 #elif CAREFUL_ALIGNMENT
1826 u.x = IVAL64(u.b,0);
1831 int64 read_longint(int f)
1833 #if SIZEOF_INT64 >= 8
1836 int32 num = read_int(f);
1838 if (num != (int32)0xffffffff)
1841 #if SIZEOF_INT64 < 8
1842 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1843 exit_cleanup(RERR_UNSUPPORTED);
1846 return IVAL(b,0) | (((int64)IVAL(b,4))<<32);
1850 /* Debugging note: this will be named read_buf_() when using an external zlib. */
1851 void read_buf(int f, char *buf, size_t len)
1853 if (f != iobuf.in_fd) {
1854 if (safe_read(f, buf, len) != len)
1855 whine_about_eof(False); /* Doesn't return. */
1859 if (!IN_MULTIPLEXED) {
1860 raw_read_buf(buf, len);
1861 total_data_read += len;
1862 if (forward_flist_data)
1863 write_buf(iobuf.out_fd, buf, len);
1865 if (f == write_batch_monitor_in)
1866 safe_write(batch_fd, buf, len);
1873 while (!iobuf.raw_input_ends_before)
1876 siz = MIN(len, iobuf.raw_input_ends_before - iobuf.in.pos);
1877 if (siz >= iobuf.in.size)
1878 siz = iobuf.in.size;
1879 raw_read_buf(buf, siz);
1880 total_data_read += siz;
1882 if (forward_flist_data)
1883 write_buf(iobuf.out_fd, buf, siz);
1885 if (f == write_batch_monitor_in)
1886 safe_write(batch_fd, buf, siz);
1888 if ((len -= siz) == 0)
1894 void read_sbuf(int f, char *buf, size_t len)
1896 read_buf(f, buf, len);
1900 uchar read_byte(int f)
1903 read_buf(f, (char*)&c, 1);
1907 int read_vstring(int f, char *buf, int bufsize)
1909 int len = read_byte(f);
1912 len = (len & ~0x80) * 0x100 + read_byte(f);
1914 if (len >= bufsize) {
1915 rprintf(FERROR, "over-long vstring received (%d > %d)\n",
1921 read_buf(f, buf, len);
1926 /* Populate a sum_struct with values from the socket. This is
1927 * called by both the sender and the receiver. */
1928 void read_sum_head(int f, struct sum_struct *sum)
1930 int32 max_blength = protocol_version < 30 ? OLD_MAX_BLOCK_SIZE : MAX_BLOCK_SIZE;
1931 sum->count = read_int(f);
1932 if (sum->count < 0) {
1933 rprintf(FERROR, "Invalid checksum count %ld [%s]\n",
1934 (long)sum->count, who_am_i());
1935 exit_cleanup(RERR_PROTOCOL);
1937 sum->blength = read_int(f);
1938 if (sum->blength < 0 || sum->blength > max_blength) {
1939 rprintf(FERROR, "Invalid block length %ld [%s]\n",
1940 (long)sum->blength, who_am_i());
1941 exit_cleanup(RERR_PROTOCOL);
1943 sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f);
1944 if (sum->s2length < 0 || sum->s2length > MAX_DIGEST_LEN) {
1945 rprintf(FERROR, "Invalid checksum length %d [%s]\n",
1946 sum->s2length, who_am_i());
1947 exit_cleanup(RERR_PROTOCOL);
1949 sum->remainder = read_int(f);
1950 if (sum->remainder < 0 || sum->remainder > sum->blength) {
1951 rprintf(FERROR, "Invalid remainder length %ld [%s]\n",
1952 (long)sum->remainder, who_am_i());
1953 exit_cleanup(RERR_PROTOCOL);
1957 /* Send the values from a sum_struct over the socket. Set sum to
1958 * NULL if there are no checksums to send. This is called by both
1959 * the generator and the sender. */
1960 void write_sum_head(int f, struct sum_struct *sum)
1962 static struct sum_struct null_sum;
1967 write_int(f, sum->count);
1968 write_int(f, sum->blength);
1969 if (protocol_version >= 27)
1970 write_int(f, sum->s2length);
1971 write_int(f, sum->remainder);
1974 /* Sleep after writing to limit I/O bandwidth usage.
1976 * @todo Rather than sleeping after each write, it might be better to
1977 * use some kind of averaging. The current algorithm seems to always
1978 * use a bit less bandwidth than specified, because it doesn't make up
1979 * for slow periods. But arguably this is a feature. In addition, we
1980 * ought to take the time used to write the data into account.
1982 * During some phases of big transfers (file FOO is uptodate) this is
1983 * called with a small bytes_written every time. As the kernel has to
1984 * round small waits up to guarantee that we actually wait at least the
1985 * requested number of microseconds, this can become grossly inaccurate.
1986 * We therefore keep track of the bytes we've written over time and only
1987 * sleep when the accumulated delay is at least 1 tenth of a second. */
1988 static void sleep_for_bwlimit(int bytes_written)
1990 static struct timeval prior_tv;
1991 static long total_written = 0;
1992 struct timeval tv, start_tv;
1993 long elapsed_usec, sleep_usec;
1995 #define ONE_SEC 1000000L /* # of microseconds in a second */
1997 total_written += bytes_written;
1999 gettimeofday(&start_tv, NULL);
2000 if (prior_tv.tv_sec) {
2001 elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
2002 + (start_tv.tv_usec - prior_tv.tv_usec);
2003 total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024);
2004 if (total_written < 0)
2008 sleep_usec = total_written * (ONE_SEC/1024) / bwlimit;
2009 if (sleep_usec < ONE_SEC / 10) {
2010 prior_tv = start_tv;
2014 tv.tv_sec = sleep_usec / ONE_SEC;
2015 tv.tv_usec = sleep_usec % ONE_SEC;
2016 select(0, NULL, NULL, NULL, &tv);
2018 gettimeofday(&prior_tv, NULL);
2019 elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC
2020 + (prior_tv.tv_usec - start_tv.tv_usec);
2021 total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024);
2024 void io_flush(int flush_type)
2026 if (iobuf.out.len > iobuf.out_empty_len) {
2027 if (flush_type == FULL_FLUSH) /* flush everything in the output buffers */
2028 perform_io(iobuf.out.size - iobuf.out_empty_len, PIO_NEED_OUTROOM);
2029 else if (flush_type == NORMAL_FLUSH) /* flush at least 1 byte */
2030 perform_io(iobuf.out.size - iobuf.out.len + 1, PIO_NEED_OUTROOM);
2031 /* MSG_FLUSH: flush iobuf.msg only */
2034 perform_io(iobuf.msg.size, PIO_NEED_MSGROOM);
2037 void write_shortint(int f, unsigned short x)
2041 b[1] = (char)(x >> 8);
2045 void write_int(int f, int32 x)
2052 void write_varint(int f, int32 x)
2060 for (cnt = 4; cnt > 1 && b[cnt] == 0; cnt--) {}
2061 bit = ((uchar)1<<(7-cnt+1));
2063 if (CVAL(b, cnt) >= bit) {
2067 *b = b[cnt] | ~(bit*2-1);
2071 write_buf(f, b, cnt);
2074 void write_varlong(int f, int64 x, uchar min_bytes)
2080 #if SIZEOF_INT64 >= 8
2084 if (x <= 0x7FFFFFFF && x >= 0)
2085 memset(b + 5, 0, 4);
2087 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2088 exit_cleanup(RERR_UNSUPPORTED);
2092 while (cnt > min_bytes && b[cnt] == 0)
2094 bit = ((uchar)1<<(7-cnt+min_bytes));
2095 if (CVAL(b, cnt) >= bit) {
2098 } else if (cnt > min_bytes)
2099 *b = b[cnt] | ~(bit*2-1);
2103 write_buf(f, b, cnt);
2107 * Note: int64 may actually be a 32-bit type if ./configure couldn't find any
2108 * 64-bit types on this platform.
2110 void write_longint(int f, int64 x)
2112 char b[12], * const s = b+4;
2115 if (x <= 0x7FFFFFFF && x >= 0) {
2120 #if SIZEOF_INT64 < 8
2121 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2122 exit_cleanup(RERR_UNSUPPORTED);
2125 SIVAL(s, 4, x >> 32);
2126 write_buf(f, b, 12);
2130 void write_bigbuf(int f, const char *buf, size_t len)
2132 size_t half_max = (iobuf.out.size - iobuf.out_empty_len) / 2;
2134 while (len > half_max + 1024) {
2135 write_buf(f, buf, half_max);
2140 write_buf(f, buf, len);
2143 void write_buf(int f, const char *buf, size_t len)
2147 if (f != iobuf.out_fd) {
2148 safe_write(f, buf, len);
2152 if (iobuf.out.len + len > iobuf.out.size)
2153 perform_io(len, PIO_NEED_OUTROOM);
2155 pos = iobuf.out.pos + iobuf.out.len; /* Must be set after any flushing. */
2156 if (pos >= iobuf.out.size)
2157 pos -= iobuf.out.size;
2159 /* Handle a split copy if we wrap around the end of the circular buffer. */
2160 if (pos >= iobuf.out.pos && (siz = iobuf.out.size - pos) < len) {
2161 memcpy(iobuf.out.buf + pos, buf, siz);
2162 memcpy(iobuf.out.buf, buf + siz, len - siz);
2164 memcpy(iobuf.out.buf + pos, buf, len);
2166 iobuf.out.len += len;
2167 total_data_written += len;
2170 if (f == write_batch_monitor_out)
2171 safe_write(batch_fd, buf, len);
2174 /* Write a string to the connection */
2175 void write_sbuf(int f, const char *buf)
2177 write_buf(f, buf, strlen(buf));
2180 void write_byte(int f, uchar c)
2182 write_buf(f, (char *)&c, 1);
2185 void write_vstring(int f, const char *str, int len)
2187 uchar lenbuf[3], *lb = lenbuf;
2192 "attempting to send over-long vstring (%d > %d)\n",
2194 exit_cleanup(RERR_PROTOCOL);
2196 *lb++ = len / 0x100 + 0x80;
2200 write_buf(f, (char*)lenbuf, lb - lenbuf + 1);
2202 write_buf(f, str, len);
2205 /* Send a file-list index using a byte-reduction method. */
2206 void write_ndx(int f, int32 ndx)
2208 static int32 prev_positive = -1, prev_negative = 1;
2209 int32 diff, cnt = 0;
2212 if (protocol_version < 30 || read_batch) {
2217 /* Send NDX_DONE as a single-byte 0 with no side effects. Send
2218 * negative nums as a positive after sending a leading 0xFF. */
2220 diff = ndx - prev_positive;
2221 prev_positive = ndx;
2222 } else if (ndx == NDX_DONE) {
2227 b[cnt++] = (char)0xFF;
2229 diff = ndx - prev_negative;
2230 prev_negative = ndx;
2233 /* A diff of 1 - 253 is sent as a one-byte diff; a diff of 254 - 32767
2234 * or 0 is sent as a 0xFE + a two-byte diff; otherwise we send 0xFE
2235 * & all 4 bytes of the (non-negative) num with the high-bit set. */
2236 if (diff < 0xFE && diff > 0)
2237 b[cnt++] = (char)diff;
2238 else if (diff < 0 || diff > 0x7FFF) {
2239 b[cnt++] = (char)0xFE;
2240 b[cnt++] = (char)((ndx >> 24) | 0x80);
2241 b[cnt++] = (char)ndx;
2242 b[cnt++] = (char)(ndx >> 8);
2243 b[cnt++] = (char)(ndx >> 16);
2245 b[cnt++] = (char)0xFE;
2246 b[cnt++] = (char)(diff >> 8);
2247 b[cnt++] = (char)diff;
2249 write_buf(f, b, cnt);
2252 /* Receive a file-list index using a byte-reduction method. */
2253 int32 read_ndx(int f)
2255 static int32 prev_positive = -1, prev_negative = 1;
2256 int32 *prev_ptr, num;
2259 if (protocol_version < 30)
2263 if (CVAL(b, 0) == 0xFF) {
2265 prev_ptr = &prev_negative;
2266 } else if (CVAL(b, 0) == 0)
2269 prev_ptr = &prev_positive;
2270 if (CVAL(b, 0) == 0xFE) {
2272 if (CVAL(b, 0) & 0x80) {
2273 b[3] = CVAL(b, 0) & ~0x80;
2275 read_buf(f, b+1, 2);
2278 num = (UVAL(b,0)<<8) + UVAL(b,1) + *prev_ptr;
2280 num = UVAL(b, 0) + *prev_ptr;
2282 if (prev_ptr == &prev_negative)
2287 /* Read a line of up to bufsiz-1 characters into buf. Strips
2288 * the (required) trailing newline and all carriage returns.
2289 * Returns 1 for success; 0 for I/O error or truncation. */
2290 int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok)
2292 assert(fd != iobuf.in_fd);
2293 bufsiz--; /* leave room for the null */
2294 while (bufsiz > 0) {
2295 if (safe_read(fd, buf, 1) == 0) {
2313 void io_printf(int fd, const char *format, ...)
2316 char buf[BIGPATHBUFLEN];
2319 va_start(ap, format);
2320 len = vsnprintf(buf, sizeof buf, format, ap);
2324 exit_cleanup(RERR_PROTOCOL);
2326 if (len >= (int)sizeof buf) {
2327 rprintf(FERROR, "io_printf() was too long for the buffer.\n");
2328 exit_cleanup(RERR_PROTOCOL);
2331 write_sbuf(fd, buf);
2334 /* Setup for multiplexing a MSG_* stream with the data stream. */
2335 void io_start_multiplex_out(int fd)
2337 io_flush(FULL_FLUSH);
2339 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2340 rprintf(FINFO, "[%s] io_start_multiplex_out(%d)\n", who_am_i(), fd);
2343 alloc_xbuf(&iobuf.msg, ROUND_UP_1024(IO_BUFFER_SIZE));
2345 iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */
2346 io_start_buffering_out(fd);
2347 got_kill_signal = 0;
2349 iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len;
2353 /* Setup for multiplexing a MSG_* stream with the data stream. */
2354 void io_start_multiplex_in(int fd)
2356 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2357 rprintf(FINFO, "[%s] io_start_multiplex_in(%d)\n", who_am_i(), fd);
2359 iobuf.in_multiplexed = 1; /* See also IN_MULTIPLEXED */
2360 io_start_buffering_in(fd);
2363 int io_end_multiplex_in(int mode)
2365 int ret = iobuf.in_multiplexed ? iobuf.in_fd : -1;
2367 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2368 rprintf(FINFO, "[%s] io_end_multiplex_in(mode=%d)\n", who_am_i(), mode);
2370 iobuf.in_multiplexed = 0;
2371 if (mode == MPLX_SWITCHING)
2372 iobuf.raw_input_ends_before = 0;
2374 assert(iobuf.raw_input_ends_before == 0);
2375 if (mode != MPLX_TO_BUFFERED)
2376 io_end_buffering_in(mode);
2381 int io_end_multiplex_out(int mode)
2383 int ret = iobuf.out_empty_len ? iobuf.out_fd : -1;
2385 if (msgs2stderr == 1 && DEBUG_GTE(IO, 2))
2386 rprintf(FINFO, "[%s] io_end_multiplex_out(mode=%d)\n", who_am_i(), mode);
2388 if (mode != MPLX_TO_BUFFERED)
2389 io_end_buffering_out(mode);
2391 io_flush(FULL_FLUSH);
2394 iobuf.out_empty_len = 0;
2395 if (got_kill_signal > 0) /* Just in case... */
2396 handle_kill_signal(False);
2397 got_kill_signal = -1;
2402 void start_write_batch(int fd)
2404 /* Some communication has already taken place, but we don't
2405 * enable batch writing until here so that we can write a
2406 * canonical record of the communication even though the
2407 * actual communication so far depends on whether a daemon
2409 write_int(batch_fd, protocol_version);
2410 if (protocol_version >= 30)
2411 write_varint(batch_fd, compat_flags);
2412 write_int(batch_fd, checksum_seed);
2415 write_batch_monitor_out = fd;
2417 write_batch_monitor_in = fd;
2420 void stop_write_batch(void)
2422 write_batch_monitor_out = -1;
2423 write_batch_monitor_in = -1;