2 * Socket and pipe I/O utilities used in rsync.
4 * Copyright (C) 1996-2001 Andrew Tridgell
5 * Copyright (C) 1996 Paul Mackerras
6 * Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org>
7 * Copyright (C) 2003-2013 Wayne Davison
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, visit the http://fsf.org website.
23 /* Rsync provides its own multiplexing system, which is used to send
24 * stderr and stdout over a single socket.
26 * For historical reasons this is off during the start of the
27 * connection, but it's switched on quite early using
28 * io_start_multiplex_out() and io_start_multiplex_in(). */
34 /** If no timeout is specified then use a 60 second select timeout */
35 #define SELECT_TIMEOUT 60
38 extern size_t bwlimit_writemax;
39 extern int io_timeout;
42 extern int am_receiver;
43 extern int am_generator;
44 extern int msgs2stderr;
45 extern int inc_recurse;
49 extern int file_total;
50 extern int file_old_total;
52 extern int read_batch;
53 extern int compat_flags;
54 extern int protect_args;
55 extern int checksum_seed;
56 extern int protocol_version;
57 extern int remove_source_files;
58 extern int preserve_hard_links;
59 extern BOOL extra_flist_sending_enabled;
60 extern BOOL flush_ok_after_signal;
61 extern struct stats stats;
62 extern struct file_list *cur_flist;
64 extern int filesfrom_convert;
65 extern iconv_t ic_send, ic_recv;
68 int csum_length = SHORT_SUM_LENGTH; /* initial value */
72 int forward_flist_data = 0;
73 BOOL flist_receiving_enabled = False;
75 /* Ignore an EOF error if non-zero. See whine_about_eof(). */
76 int kluge_around_eof = 0;
77 int got_kill_signal = -1; /* is set to 0 only after multiplexed I/O starts */
82 int64 total_data_read = 0;
83 int64 total_data_written = 0;
88 int out_fd; /* Both "out" and "msg" go to this fd. */
90 unsigned out_empty_len;
91 size_t raw_data_header_pos; /* in the out xbuf */
92 size_t raw_flushing_ends_before; /* in the out xbuf */
93 size_t raw_input_ends_before; /* in the in xbuf */
94 } iobuf = { .in_fd = -1, .out_fd = -1 };
96 static time_t last_io_in;
97 static time_t last_io_out;
99 static int write_batch_monitor_in = -1;
100 static int write_batch_monitor_out = -1;
102 static int ff_forward_fd = -1;
103 static int ff_reenable_multiplex = -1;
104 static char ff_lastchar = '\0';
105 static xbuf ff_xb = EMPTY_XBUF;
107 static xbuf iconv_buf = EMPTY_XBUF;
109 static int select_timeout = SELECT_TIMEOUT;
110 static int active_filecnt = 0;
111 static OFF_T active_bytecnt = 0;
112 static int first_message = 1;
114 static char int_byte_extra[64] = {
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (00 - 3F)/4 */
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (40 - 7F)/4 */
117 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* (80 - BF)/4 */
118 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, /* (C0 - FF)/4 */
121 /* Our I/O buffers are sized with no bits on in the lowest byte of the "size"
122 * (indeed, our rounding of sizes in 1024-byte units assures more than this).
123 * This allows the code that is storing bytes near the physical end of a
124 * circular buffer to temporarily reduce the buffer's size (in order to make
125 * some storing idioms easier), while also making it simple to restore the
126 * buffer's actual size when the buffer's "pos" wraps around to the start (we
127 * just round the buffer's size up again). */
129 #define IOBUF_WAS_REDUCED(siz) ((siz) & 0xFF)
130 #define IOBUF_RESTORE_SIZE(siz) (((siz) | 0xFF) + 1)
132 #define IN_MULTIPLEXED (iobuf.in_multiplexed != 0)
133 #define IN_MULTIPLEXED_AND_READY (iobuf.in_multiplexed > 0)
134 #define OUT_MULTIPLEXED (iobuf.out_empty_len != 0)
136 #define PIO_NEED_INPUT (1<<0) /* The *_NEED_* flags are mutually exclusive. */
137 #define PIO_NEED_OUTROOM (1<<1)
138 #define PIO_NEED_MSGROOM (1<<2)
140 #define PIO_CONSUME_INPUT (1<<4) /* Must becombined with PIO_NEED_INPUT. */
142 #define PIO_INPUT_AND_CONSUME (PIO_NEED_INPUT | PIO_CONSUME_INPUT)
143 #define PIO_NEED_FLAGS (PIO_NEED_INPUT | PIO_NEED_OUTROOM | PIO_NEED_MSGROOM)
145 #define REMOTE_OPTION_ERROR "rsync: on remote machine: -"
146 #define REMOTE_OPTION_ERROR2 ": unknown option"
148 #define FILESFROM_BUFLEN 2048
150 enum festatus { FES_SUCCESS, FES_REDO, FES_NO_SEND };
152 static flist_ndx_list redo_list, hlink_list;
154 static void read_a_msg(void);
155 static void drain_multiplex_messages(void);
156 static void sleep_for_bwlimit(int bytes_written);
158 static void check_timeout(BOOL allow_keepalive)
162 /* On the receiving side, the generator is now the one that decides
163 * when a timeout has occurred. When it is sifting through a lot of
164 * files looking for work, it will be sending keep-alive messages to
165 * the sender, and even though the receiver won't be sending/receiving
166 * anything (not even keep-alive messages), the successful writes to
167 * the sender will keep things going. If the receiver is actively
168 * receiving data, it will ensure that the generator knows that it is
169 * not idle by sending the generator keep-alive messages (since the
170 * generator might be blocked trying to send checksums, it needs to
171 * know that the receiver is active). Thus, as long as one or the
172 * other is successfully doing work, the generator will not timeout. */
178 if (allow_keepalive) {
179 /* This may put data into iobuf.msg w/o flushing. */
180 maybe_send_keepalive(t, 0);
189 chk = MAX(last_io_out, last_io_in);
190 if (t - chk >= io_timeout) {
193 rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n",
194 who_am_i(), (int)(t-chk));
195 exit_cleanup(RERR_TIMEOUT);
199 /* It's almost always an error to get an EOF when we're trying to read from the
200 * network, because the protocol is (for the most part) self-terminating.
202 * There is one case for the receiver when it is at the end of the transfer
203 * (hanging around reading any keep-alive packets that might come its way): if
204 * the sender dies before the generator's kill-signal comes through, we can end
205 * up here needing to loop until the kill-signal arrives. In this situation,
206 * kluge_around_eof will be < 0.
208 * There is another case for older protocol versions (< 24) where the module
209 * listing was not terminated, so we must ignore an EOF error in that case and
210 * exit. In this situation, kluge_around_eof will be > 0. */
211 static NORETURN void whine_about_eof(BOOL allow_kluge)
213 if (kluge_around_eof && allow_kluge) {
215 if (kluge_around_eof > 0)
217 /* If we're still here after 10 seconds, exit with an error. */
218 for (i = 10*1000/20; i--; )
222 rprintf(FERROR, RSYNC_NAME ": connection unexpectedly closed "
223 "(%s bytes received so far) [%s]\n",
224 big_num(stats.total_read), who_am_i());
226 exit_cleanup(RERR_STREAMIO);
229 /* Do a safe read, handling any needed looping and error handling.
230 * Returns the count of the bytes read, which will only be different
231 * from "len" if we encountered an EOF. This routine is not used on
232 * the socket except very early in the transfer. */
233 static size_t safe_read(int fd, char *buf, size_t len)
238 assert(fd != iobuf.in_fd);
240 n = read(fd, buf, len);
241 if ((size_t)n == len || n == 0) {
242 if (DEBUG_GTE(IO, 2))
243 rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
247 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
249 rsyserr(FERROR, errno, "safe_read failed to read %ld bytes [%s]",
250 (long)len, who_am_i());
251 exit_cleanup(RERR_STREAMIO);
266 tv.tv_sec = select_timeout;
269 cnt = select(fd+1, &r_fds, NULL, &e_fds, &tv);
271 if (cnt < 0 && errno == EBADF) {
272 rsyserr(FERROR, errno, "safe_read select failed [%s]",
274 exit_cleanup(RERR_FILEIO);
277 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
281 /*if (FD_ISSET(fd, &e_fds))
282 rprintf(FINFO, "select exception on fd %d\n", fd); */
284 if (FD_ISSET(fd, &r_fds)) {
285 n = read(fd, buf + got, len - got);
286 if (DEBUG_GTE(IO, 2))
287 rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
295 if ((got += (size_t)n) == len)
303 static const char *what_fd_is(int fd)
307 if (fd == sock_f_out)
309 else if (fd == iobuf.out_fd)
311 else if (fd == batch_fd)
314 snprintf(buf, sizeof buf, "fd %d", fd);
319 /* Do a safe write, handling any needed looping and error handling.
320 * Returns only if everything was successfully written. This routine
321 * is not used on the socket except very early in the transfer. */
322 static void safe_write(int fd, const char *buf, size_t len)
326 assert(fd != iobuf.out_fd);
328 n = write(fd, buf, len);
329 if ((size_t)n == len)
332 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
334 rsyserr(FERROR, errno,
335 "safe_write failed to write %ld bytes to %s [%s]",
336 (long)len, what_fd_is(fd), who_am_i());
337 exit_cleanup(RERR_STREAMIO);
351 tv.tv_sec = select_timeout;
354 cnt = select(fd + 1, NULL, &w_fds, NULL, &tv);
356 if (cnt < 0 && errno == EBADF) {
357 rsyserr(FERROR, errno, "safe_write select failed on %s [%s]",
358 what_fd_is(fd), who_am_i());
359 exit_cleanup(RERR_FILEIO);
362 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
366 if (FD_ISSET(fd, &w_fds)) {
367 n = write(fd, buf, len);
379 /* This is only called when files-from data is known to be available. We read
380 * a chunk of data and put it into the output buffer. */
381 static void forward_filesfrom_data(void)
385 len = read(ff_forward_fd, ff_xb.buf + ff_xb.len, ff_xb.size - ff_xb.len);
387 if (len == 0 || errno != EINTR) {
388 /* Send end-of-file marker */
390 write_buf(iobuf.out_fd, "\0\0", ff_lastchar ? 2 : 1);
392 if (ff_reenable_multiplex >= 0)
393 io_start_multiplex_out(ff_reenable_multiplex);
398 if (DEBUG_GTE(IO, 2))
399 rprintf(FINFO, "[%s] files-from read=%ld\n", who_am_i(), (long)len);
406 char *s = ff_xb.buf + len;
407 /* Transform CR and/or LF into '\0' */
408 while (s-- > ff_xb.buf) {
409 if (*s == '\n' || *s == '\r')
418 /* Last buf ended with a '\0', so don't let this buf start with one. */
419 while (len && *s == '\0')
421 ff_xb.pos = s - ff_xb.buf;
425 if (filesfrom_convert && len) {
426 char *sob = ff_xb.buf + ff_xb.pos, *s = sob;
427 char *eob = sob + len;
428 int flags = ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT;
429 if (ff_lastchar == '\0')
431 /* Convert/send each null-terminated string separately, skipping empties. */
434 ff_xb.len = s - sob - 1;
435 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
436 exit_cleanup(RERR_PROTOCOL); /* impossible? */
437 write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
438 while (s != eob && *s == '\0')
441 ff_xb.pos = sob - ff_xb.buf;
446 if ((ff_xb.len = s - sob) == 0)
449 /* Handle a partial string specially, saving any incomplete chars. */
450 flags &= ~ICB_INCLUDE_INCOMPLETE;
451 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) {
453 exit_cleanup(RERR_PROTOCOL); /* impossible? */
455 memmove(ff_xb.buf, ff_xb.buf + ff_xb.pos, ff_xb.len);
457 ff_lastchar = 'x'; /* Anything non-zero. */
463 char *f = ff_xb.buf + ff_xb.pos;
466 /* Eliminate any multi-'\0' runs. */
468 if (!(*t++ = *f++)) {
469 while (f != eob && *f == '\0')
474 if ((len = t - ff_xb.buf) != 0) {
475 /* This will not circle back to perform_io() because we only get
476 * called when there is plenty of room in the output buffer. */
477 write_buf(iobuf.out_fd, ff_xb.buf, len);
482 void reduce_iobuf_size(xbuf *out, size_t new_size)
484 if (new_size < out->size) {
485 /* Avoid weird buffer interactions by only outputting this to stderr. */
486 if (msgs2stderr && DEBUG_GTE(IO, 4)) {
487 const char *name = out == &iobuf.out ? "iobuf.out"
488 : out == &iobuf.msg ? "iobuf.msg"
491 rprintf(FINFO, "[%s] reduced size of %s (-%d)\n",
492 who_am_i(), name, (int)(out->size - new_size));
495 out->size = new_size;
499 void restore_iobuf_size(xbuf *out)
501 if (IOBUF_WAS_REDUCED(out->size)) {
502 size_t new_size = IOBUF_RESTORE_SIZE(out->size);
503 /* Avoid weird buffer interactions by only outputting this to stderr. */
504 if (msgs2stderr && DEBUG_GTE(IO, 4)) {
505 const char *name = out == &iobuf.out ? "iobuf.out"
506 : out == &iobuf.msg ? "iobuf.msg"
509 rprintf(FINFO, "[%s] restored size of %s (+%d)\n",
510 who_am_i(), name, (int)(new_size - out->size));
513 out->size = new_size;
517 /* Perform buffered input and/or output until specified conditions are met.
518 * When given a "needed" read or write request, this returns without doing any
519 * I/O if the needed input bytes or write space is already available. Once I/O
520 * is needed, this will try to do whatever reading and/or writing is currently
521 * possible, up to the maximum buffer allowances, no matter if this is a read
522 * or write request. However, the I/O stops as soon as the required input
523 * bytes or output space is available. If this is not a read request, the
524 * routine may also do some advantageous reading of messages from a multiplexed
525 * input source (which ensures that we don't jam up with everyone in their
526 * "need to write" code and nobody reading the accumulated data that would make
529 * The iobuf.in, .out and .msg buffers are all circular. Callers need to be
530 * aware that some data copies will need to be split when the bytes wrap around
531 * from the end to the start. In order to help make writing into the output
532 * buffers easier for some operations (such as the use of SIVAL() into the
533 * buffer) a buffer may be temporarily shortened by a small amount, but the
534 * original size will be automatically restored when the .pos wraps to the
535 * start. See also the 3 raw_* iobuf vars that are used in the handling of
536 * MSG_DATA bytes as they are read-from/written-into the buffers.
538 * When writing, we flush data in the following priority order:
540 * 1. Finish writing any in-progress MSG_DATA sequence from iobuf.out.
542 * 2. Write out all the messages from the message buf (if iobuf.msg is active).
543 * Yes, this means that a PIO_NEED_OUTROOM call will completely flush any
544 * messages before getting to the iobuf.out flushing (except for rule 1).
546 * 3. Write out the raw data from iobuf.out, possibly filling in the multiplexed
547 * MSG_DATA header that was pre-allocated (when output is multiplexed).
549 * TODO: items for possible future work:
551 * - Make this routine able to read the generator-to-receiver batch flow?
553 * Unlike the old routines that this replaces, it is OK to read ahead as far as
554 * we can because the read_a_msg() routine now reads its bytes out of the input
555 * buffer. In the old days, only raw data was in the input buffer, and any
556 * unused raw data in the buf would prevent the reading of socket data. */
557 static char *perform_io(size_t needed, int flags)
559 fd_set r_fds, e_fds, w_fds;
562 size_t empty_buf_len = 0;
566 if (iobuf.in.len == 0 && iobuf.in.pos != 0) {
567 if (iobuf.raw_input_ends_before)
568 iobuf.raw_input_ends_before -= iobuf.in.pos;
572 switch (flags & PIO_NEED_FLAGS) {
574 /* We never resize the circular input buffer. */
575 if (iobuf.in.size < needed) {
576 rprintf(FERROR, "need to read %ld bytes, iobuf.in.buf is only %ld bytes.\n",
577 (long)needed, (long)iobuf.in.size);
578 exit_cleanup(RERR_PROTOCOL);
581 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
582 rprintf(FINFO, "[%s] perform_io(%ld, %sinput)\n",
583 who_am_i(), (long)needed, flags & PIO_CONSUME_INPUT ? "consume&" : "");
587 case PIO_NEED_OUTROOM:
588 /* We never resize the circular output buffer. */
589 if (iobuf.out.size - iobuf.out_empty_len < needed) {
590 fprintf(stderr, "need to write %ld bytes, iobuf.out.buf is only %ld bytes.\n",
591 (long)needed, (long)(iobuf.out.size - iobuf.out_empty_len));
592 exit_cleanup(RERR_PROTOCOL);
595 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
596 rprintf(FINFO, "[%s] perform_io(%ld, outroom) needs to flush %ld\n",
597 who_am_i(), (long)needed,
598 iobuf.out.len + needed > iobuf.out.size
599 ? (long)(iobuf.out.len + needed - iobuf.out.size) : 0L);
603 case PIO_NEED_MSGROOM:
604 /* We never resize the circular message buffer. */
605 if (iobuf.msg.size < needed) {
606 fprintf(stderr, "need to write %ld bytes, iobuf.msg.buf is only %ld bytes.\n",
607 (long)needed, (long)iobuf.msg.size);
608 exit_cleanup(RERR_PROTOCOL);
611 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
612 rprintf(FINFO, "[%s] perform_io(%ld, msgroom) needs to flush %ld\n",
613 who_am_i(), (long)needed,
614 iobuf.msg.len + needed > iobuf.msg.size
615 ? (long)(iobuf.msg.len + needed - iobuf.msg.size) : 0L);
620 if (msgs2stderr && DEBUG_GTE(IO, 3))
621 rprintf(FINFO, "[%s] perform_io(%ld, %d)\n", who_am_i(), (long)needed, flags);
625 exit_cleanup(RERR_UNSUPPORTED);
629 switch (flags & PIO_NEED_FLAGS) {
631 if (iobuf.in.len >= needed)
634 case PIO_NEED_OUTROOM:
635 /* Note that iobuf.out_empty_len doesn't factor into this check
636 * because iobuf.out.len already holds any needed header len. */
637 if (iobuf.out.len + needed <= iobuf.out.size)
640 case PIO_NEED_MSGROOM:
641 if (iobuf.msg.len + needed <= iobuf.msg.size)
650 if (iobuf.in_fd >= 0 && iobuf.in.size - iobuf.in.len) {
651 if (!read_batch || batch_fd >= 0) {
652 FD_SET(iobuf.in_fd, &r_fds);
653 FD_SET(iobuf.in_fd, &e_fds);
655 if (iobuf.in_fd > max_fd)
656 max_fd = iobuf.in_fd;
659 /* Only do more filesfrom processing if there is enough room in the out buffer. */
660 if (ff_forward_fd >= 0 && iobuf.out.size - iobuf.out.len > FILESFROM_BUFLEN*2) {
661 FD_SET(ff_forward_fd, &r_fds);
662 if (ff_forward_fd > max_fd)
663 max_fd = ff_forward_fd;
667 if (iobuf.out_fd >= 0) {
668 if (iobuf.raw_flushing_ends_before
669 || (!iobuf.msg.len && iobuf.out.len > iobuf.out_empty_len && !(flags & PIO_NEED_MSGROOM))) {
670 if (OUT_MULTIPLEXED && !iobuf.raw_flushing_ends_before) {
671 /* The iobuf.raw_flushing_ends_before value can point off the end
672 * of the iobuf.out buffer for a while, for easier subtracting. */
673 iobuf.raw_flushing_ends_before = iobuf.out.pos + iobuf.out.len;
675 SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0,
676 ((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4);
678 if (msgs2stderr && DEBUG_GTE(IO, 1)) {
679 rprintf(FINFO, "[%s] send_msg(%d, %ld)\n",
680 who_am_i(), (int)MSG_DATA, (long)iobuf.out.len - 4);
683 /* reserve room for the next MSG_DATA header */
684 iobuf.raw_data_header_pos = iobuf.raw_flushing_ends_before;
685 if (iobuf.raw_data_header_pos >= iobuf.out.size)
686 iobuf.raw_data_header_pos -= iobuf.out.size;
687 else if (iobuf.raw_data_header_pos + 4 > iobuf.out.size) {
688 /* The 4-byte header won't fit at the end of the buffer,
689 * so we'll temporarily reduce the output buffer's size
690 * and put the header at the start of the buffer. */
691 reduce_iobuf_size(&iobuf.out, iobuf.raw_data_header_pos);
692 iobuf.raw_data_header_pos = 0;
694 /* Yes, it is possible for this to make len > size for a while. */
698 empty_buf_len = iobuf.out_empty_len;
700 } else if (iobuf.msg.len) {
706 FD_SET(iobuf.out_fd, &w_fds);
707 if (iobuf.out_fd > max_fd)
708 max_fd = iobuf.out_fd;
714 switch (flags & PIO_NEED_FLAGS) {
717 if (kluge_around_eof == 2)
719 if (iobuf.in_fd == -2)
720 whine_about_eof(True);
721 rprintf(FERROR, "error in perform_io: no fd for input.\n");
722 exit_cleanup(RERR_PROTOCOL);
723 case PIO_NEED_OUTROOM:
724 case PIO_NEED_MSGROOM:
726 drain_multiplex_messages();
727 if (iobuf.out_fd == -2)
728 whine_about_eof(True);
729 rprintf(FERROR, "error in perform_io: no fd for output.\n");
730 exit_cleanup(RERR_PROTOCOL);
732 /* No stated needs, so I guess this is OK. */
738 if (extra_flist_sending_enabled) {
739 if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY)
742 extra_flist_sending_enabled = False;
743 tv.tv_sec = select_timeout;
746 tv.tv_sec = select_timeout;
749 cnt = select(max_fd + 1, &r_fds, &w_fds, &e_fds, &tv);
752 if (cnt < 0 && errno == EBADF) {
754 exit_cleanup(RERR_SOCKETIO);
756 if (extra_flist_sending_enabled) {
757 extra_flist_sending_enabled = False;
758 send_extra_file_list(sock_f_out, -1);
759 extra_flist_sending_enabled = !flist_eof;
761 check_timeout((flags & PIO_NEED_INPUT) != 0);
762 FD_ZERO(&r_fds); /* Just in case... */
766 if (iobuf.in_fd >= 0 && FD_ISSET(iobuf.in_fd, &r_fds)) {
767 size_t len, pos = iobuf.in.pos + iobuf.in.len;
769 if (pos >= iobuf.in.size) {
770 pos -= iobuf.in.size;
771 len = iobuf.in.size - iobuf.in.len;
773 len = iobuf.in.size - pos;
774 if ((n = read(iobuf.in_fd, iobuf.in.buf + pos, len)) <= 0) {
776 /* Signal that input has become invalid. */
777 if (!read_batch || batch_fd < 0 || am_generator)
782 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
785 /* Don't write errors on a dead socket. */
786 if (iobuf.in_fd == sock_f_in) {
789 rsyserr(FERROR_SOCKET, errno, "read error");
791 rsyserr(FERROR, errno, "read error");
792 exit_cleanup(RERR_SOCKETIO);
795 if (msgs2stderr && DEBUG_GTE(IO, 2))
796 rprintf(FINFO, "[%s] recv=%ld\n", who_am_i(), (long)n);
799 last_io_in = time(NULL);
800 if (flags & PIO_NEED_INPUT)
801 maybe_send_keepalive(last_io_in, 0);
803 stats.total_read += n;
808 if (out && FD_ISSET(iobuf.out_fd, &w_fds)) {
809 size_t len = iobuf.raw_flushing_ends_before ? iobuf.raw_flushing_ends_before - out->pos : out->len;
812 if (bwlimit_writemax && len > bwlimit_writemax)
813 len = bwlimit_writemax;
815 if (out->pos + len > out->size)
816 len = out->size - out->pos;
817 if ((n = write(iobuf.out_fd, out->buf + out->pos, len)) <= 0) {
818 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
821 /* Don't write errors on a dead socket. */
824 iobuf.out.len = iobuf.msg.len = iobuf.raw_flushing_ends_before = 0;
825 rsyserr(FERROR_SOCKET, errno, "[%s] write error", who_am_i());
826 drain_multiplex_messages();
827 exit_cleanup(RERR_SOCKETIO);
830 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
831 rprintf(FINFO, "[%s] %s sent=%ld\n",
832 who_am_i(), out == &iobuf.out ? "out" : "msg", (long)n);
836 last_io_out = time(NULL);
837 stats.total_written += n;
839 if (bwlimit_writemax)
840 sleep_for_bwlimit(n);
842 if ((out->pos += n) == out->size) {
843 if (iobuf.raw_flushing_ends_before)
844 iobuf.raw_flushing_ends_before -= out->size;
846 restore_iobuf_size(out);
847 } else if (out->pos == iobuf.raw_flushing_ends_before)
848 iobuf.raw_flushing_ends_before = 0;
849 if ((out->len -= n) == empty_buf_len) {
851 restore_iobuf_size(out);
853 iobuf.raw_data_header_pos = 0;
857 if (got_kill_signal > 0) {
858 got_kill_signal = -1;
859 flush_ok_after_signal = True;
860 exit_cleanup(RERR_SIGNAL);
863 /* We need to help prevent deadlock by doing what reading
864 * we can whenever we are here trying to write. */
865 if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) {
866 while (!iobuf.raw_input_ends_before && iobuf.in.len > 512)
868 if (flist_receiving_enabled && iobuf.in.len > 512)
869 wait_for_receiver(); /* generator only */
872 if (ff_forward_fd >= 0 && FD_ISSET(ff_forward_fd, &r_fds)) {
873 /* This can potentially flush all output and enable
874 * multiplexed output, so keep this last in the loop
875 * and be sure to not cache anything that would break
877 forward_filesfrom_data();
882 data = iobuf.in.buf + iobuf.in.pos;
884 if (flags & PIO_CONSUME_INPUT) {
885 iobuf.in.len -= needed;
886 iobuf.in.pos += needed;
887 if (iobuf.in.pos == iobuf.raw_input_ends_before)
888 iobuf.raw_input_ends_before = 0;
889 if (iobuf.in.pos >= iobuf.in.size) {
890 iobuf.in.pos -= iobuf.in.size;
891 if (iobuf.raw_input_ends_before)
892 iobuf.raw_input_ends_before -= iobuf.in.size;
899 static void raw_read_buf(char *buf, size_t len)
901 size_t pos = iobuf.in.pos;
902 char *data = perform_io(len, PIO_INPUT_AND_CONSUME);
903 if (iobuf.in.pos <= pos && len) {
904 size_t siz = len - iobuf.in.pos;
905 memcpy(buf, data, siz);
906 memcpy(buf + siz, iobuf.in.buf, iobuf.in.pos);
908 memcpy(buf, data, len);
911 static int32 raw_read_int(void)
914 if (iobuf.in.size - iobuf.in.pos >= 4)
915 data = perform_io(4, PIO_INPUT_AND_CONSUME);
917 raw_read_buf(data = buf, 4);
918 return IVAL(data, 0);
921 void noop_io_until_death(void)
925 if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
928 kluge_around_eof = 2;
929 /* Setting an I/O timeout ensures that if something inexplicably weird
930 * happens, we won't hang around forever. */
935 read_buf(iobuf.in_fd, buf, sizeof buf);
938 /* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */
939 int send_msg(enum msgcode code, const char *buf, size_t len, int convert)
943 BOOL want_debug = DEBUG_GTE(IO, 1) && convert >= 0 && (msgs2stderr || code != MSG_INFO);
945 if (!OUT_MULTIPLEXED)
949 rprintf(FINFO, "[%s] send_msg(%d, %ld)\n", who_am_i(), (int)code, (long)len);
951 /* When checking for enough free space for this message, we need to
952 * make sure that there is space for the 4-byte header, plus we'll
953 * assume that we may waste up to 3 bytes (if the header doesn't fit
954 * at the physical end of the buffer). */
956 if (convert > 0 && ic_send == (iconv_t)-1)
959 /* Ensuring double-size room leaves space for maximal conversion expansion. */
960 needed = len*2 + 4 + 3;
963 needed = len + 4 + 3;
964 if (iobuf.msg.len + needed > iobuf.msg.size)
965 perform_io(needed, PIO_NEED_MSGROOM);
967 pos = iobuf.msg.pos + iobuf.msg.len; /* Must be set after any flushing. */
968 if (pos >= iobuf.msg.size)
969 pos -= iobuf.msg.size;
970 else if (pos + 4 > iobuf.msg.size) {
971 /* The 4-byte header won't fit at the end of the buffer,
972 * so we'll temporarily reduce the message buffer's size
973 * and put the header at the start of the buffer. */
974 reduce_iobuf_size(&iobuf.msg, pos);
977 hdr = iobuf.msg.buf + pos;
979 iobuf.msg.len += 4; /* Allocate room for the coming header bytes. */
985 INIT_XBUF(inbuf, (char*)buf, len, (size_t)-1);
988 iconvbufs(ic_send, &inbuf, &iobuf.msg,
989 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT | ICB_INIT);
991 rprintf(FERROR, "overflowed iobuf.msg buffer in send_msg");
992 exit_cleanup(RERR_UNSUPPORTED);
994 len = iobuf.msg.len - len;
1000 if ((pos += 4) == iobuf.msg.size)
1003 /* Handle a split copy if we wrap around the end of the circular buffer. */
1004 if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) {
1005 memcpy(iobuf.msg.buf + pos, buf, siz);
1006 memcpy(iobuf.msg.buf, buf + siz, len - siz);
1008 memcpy(iobuf.msg.buf + pos, buf, len);
1010 iobuf.msg.len += len;
1013 SIVAL(hdr, 0, ((MPLEX_BASE + (int)code)<<24) + len);
1015 if (want_debug && convert > 0)
1016 rprintf(FINFO, "[%s] converted msg len=%ld\n", who_am_i(), (long)len);
1021 void send_msg_int(enum msgcode code, int num)
1025 if (DEBUG_GTE(IO, 1))
1026 rprintf(FINFO, "[%s] send_msg_int(%d, %d)\n", who_am_i(), (int)code, num);
1028 SIVAL(numbuf, 0, num);
1029 send_msg(code, numbuf, 4, -1);
1032 static void got_flist_entry_status(enum festatus status, int ndx)
1034 struct file_list *flist = flist_for_ndx(ndx, "got_flist_entry_status");
1036 if (remove_source_files) {
1038 active_bytecnt -= F_LENGTH(flist->files[ndx - flist->ndx_start]);
1042 flist->in_progress--;
1046 if (remove_source_files)
1047 send_msg_int(MSG_SUCCESS, ndx);
1050 #ifdef SUPPORT_HARD_LINKS
1051 if (preserve_hard_links) {
1052 struct file_struct *file = flist->files[ndx - flist->ndx_start];
1053 if (F_IS_HLINKED(file)) {
1054 if (status == FES_NO_SEND)
1055 flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */
1056 flist_ndx_push(&hlink_list, ndx);
1058 flist->in_progress++;
1066 flist->in_progress++;
1071 flist_ndx_push(&redo_list, ndx);
1076 /* Note the fds used for the main socket (which might really be a pipe
1077 * for a local transfer, but we can ignore that). */
1078 void io_set_sock_fds(int f_in, int f_out)
1084 void set_io_timeout(int secs)
1087 allowed_lull = (io_timeout + 1) / 2;
1089 if (!io_timeout || allowed_lull > SELECT_TIMEOUT)
1090 select_timeout = SELECT_TIMEOUT;
1092 select_timeout = allowed_lull;
1098 static void check_for_d_option_error(const char *msg)
1100 static char rsync263_opts[] = "BCDHIKLPRSTWabceghlnopqrtuvxz";
1105 || strncmp(msg, REMOTE_OPTION_ERROR, sizeof REMOTE_OPTION_ERROR - 1) != 0)
1108 msg += sizeof REMOTE_OPTION_ERROR - 1;
1109 if (*msg == '-' || (colon = strchr(msg, ':')) == NULL
1110 || strncmp(colon, REMOTE_OPTION_ERROR2, sizeof REMOTE_OPTION_ERROR2 - 1) != 0)
1113 for ( ; *msg != ':'; msg++) {
1116 else if (*msg == 'e')
1118 else if (strchr(rsync263_opts, *msg) == NULL)
1124 "*** Try using \"--old-d\" if remote rsync is <= 2.6.3 ***\n");
1128 /* This is used by the generator to limit how many file transfers can
1129 * be active at once when --remove-source-files is specified. Without
1130 * this, sender-side deletions were mostly happening at the end. */
1131 void increment_active_files(int ndx, int itemizing, enum logcode code)
1134 /* TODO: tune these limits? */
1135 int limit = active_bytecnt >= 128*1024 ? 10 : 50;
1136 if (active_filecnt < limit)
1138 check_for_finished_files(itemizing, code, 0);
1139 if (active_filecnt < limit)
1141 wait_for_receiver();
1145 active_bytecnt += F_LENGTH(cur_flist->files[ndx - cur_flist->ndx_start]);
1148 int get_redo_num(void)
1150 return flist_ndx_pop(&redo_list);
1153 int get_hlink_num(void)
1155 return flist_ndx_pop(&hlink_list);
1158 /* When we're the receiver and we have a local --files-from list of names
1159 * that needs to be sent over the socket to the sender, we have to do two
1160 * things at the same time: send the sender a list of what files we're
1161 * processing and read the incoming file+info list from the sender. We do
1162 * this by making recv_file_list() call forward_filesfrom_data(), which
1163 * will ensure that we forward data to the sender until we get some data
1164 * for recv_file_list() to use. */
1165 void start_filesfrom_forwarding(int fd)
1167 if (protocol_version < 31 && OUT_MULTIPLEXED) {
1168 /* Older protocols send the files-from data w/o packaging
1169 * it in multiplexed I/O packets, so temporarily switch
1170 * to buffered I/O to match this behavior. */
1171 iobuf.msg.pos = iobuf.msg.len = 0; /* Be extra sure no messages go out. */
1172 ff_reenable_multiplex = io_end_multiplex_out(MPLX_TO_BUFFERED);
1176 alloc_xbuf(&ff_xb, FILESFROM_BUFLEN);
1179 /* Read a line into the "buf" buffer. */
1180 int read_line(int fd, char *buf, size_t bufsiz, int flags)
1185 if (flags & RL_CONVERT && iconv_buf.size < bufsiz)
1186 realloc_xbuf(&iconv_buf, bufsiz + 1024);
1191 s = flags & RL_CONVERT ? iconv_buf.buf : buf;
1195 eob = s + bufsiz - 1;
1197 /* We avoid read_byte() for files because files can return an EOF. */
1198 if (fd == iobuf.in_fd)
1200 else if (safe_read(fd, &ch, 1) == 0)
1202 if (flags & RL_EOL_NULLS ? ch == '\0' : (ch == '\r' || ch == '\n')) {
1203 /* Skip empty lines if dumping comments. */
1204 if (flags & RL_DUMP_COMMENTS && s == buf)
1213 if (flags & RL_DUMP_COMMENTS && (*buf == '#' || *buf == ';'))
1217 if (flags & RL_CONVERT) {
1219 INIT_XBUF(outbuf, buf, 0, bufsiz);
1221 iconv_buf.len = s - iconv_buf.buf;
1222 iconvbufs(ic_recv, &iconv_buf, &outbuf,
1223 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_INIT);
1224 outbuf.buf[outbuf.len] = '\0';
1232 void read_args(int f_in, char *mod_name, char *buf, size_t bufsiz, int rl_nulls,
1233 char ***argv_p, int *argc_p, char **request_p)
1235 int maxargs = MAX_ARGS;
1236 int dot_pos = 0, argc = 0, request_len = 0;
1238 int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0);
1241 rl_flags |= (protect_args && ic_recv != (iconv_t)-1 ? RL_CONVERT : 0);
1244 if (!(argv = new_array(char *, maxargs)))
1245 out_of_memory("read_args");
1246 if (mod_name && !protect_args)
1247 argv[argc++] = "rsyncd";
1253 if (read_line(f_in, buf, bufsiz, rl_flags) == 0)
1256 if (argc == maxargs-1) {
1257 maxargs += MAX_ARGS;
1258 if (!(argv = realloc_array(argv, char *, maxargs)))
1259 out_of_memory("read_args");
1263 if (request_p && request_len < 1024) {
1264 int len = strlen(buf);
1266 request_p[0][request_len++] = ' ';
1267 if (!(*request_p = realloc_array(*request_p, char, request_len + len + 1)))
1268 out_of_memory("read_args");
1269 memcpy(*request_p + request_len, buf, len + 1);
1273 glob_expand_module(mod_name, buf, &argv, &argc, &maxargs);
1275 glob_expand(buf, &argv, &argc, &maxargs);
1277 if (!(p = strdup(buf)))
1278 out_of_memory("read_args");
1280 if (*p == '.' && p[1] == '\0')
1286 glob_expand(NULL, NULL, NULL, NULL);
1292 BOOL io_start_buffering_out(int f_out)
1294 if (msgs2stderr && DEBUG_GTE(IO, 2))
1295 rprintf(FINFO, "[%s] io_start_buffering_out(%d)\n", who_am_i(), f_out);
1297 if (iobuf.out.buf) {
1298 if (iobuf.out_fd == -1)
1299 iobuf.out_fd = f_out;
1301 assert(f_out == iobuf.out_fd);
1305 alloc_xbuf(&iobuf.out, ROUND_UP_1024(IO_BUFFER_SIZE * 2));
1306 iobuf.out_fd = f_out;
1311 BOOL io_start_buffering_in(int f_in)
1313 if (msgs2stderr && DEBUG_GTE(IO, 2))
1314 rprintf(FINFO, "[%s] io_start_buffering_in(%d)\n", who_am_i(), f_in);
1317 if (iobuf.in_fd == -1)
1320 assert(f_in == iobuf.in_fd);
1324 alloc_xbuf(&iobuf.in, ROUND_UP_1024(IO_BUFFER_SIZE));
1330 void io_end_buffering_in(BOOL free_buffers)
1332 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
1333 rprintf(FINFO, "[%s] io_end_buffering_in(IOBUF_%s_BUFS)\n",
1334 who_am_i(), free_buffers ? "FREE" : "KEEP");
1338 free_xbuf(&iobuf.in);
1340 iobuf.in.pos = iobuf.in.len = 0;
1345 void io_end_buffering_out(BOOL free_buffers)
1347 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
1348 rprintf(FINFO, "[%s] io_end_buffering_out(IOBUF_%s_BUFS)\n",
1349 who_am_i(), free_buffers ? "FREE" : "KEEP");
1352 io_flush(FULL_FLUSH);
1355 free_xbuf(&iobuf.out);
1356 free_xbuf(&iobuf.msg);
1362 void maybe_flush_socket(int important)
1364 if (flist_eof && iobuf.out.buf && iobuf.out.len > iobuf.out_empty_len
1365 && (important || time(NULL) - last_io_out >= 5))
1366 io_flush(NORMAL_FLUSH);
1369 /* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a
1370 * raw-data-based keep-alive (protocol 29), both of which implied forwarding of
1371 * the message through the sender. Since the new timeout method does not need
1372 * any forwarding, we just send an empty MSG_DATA message, which works with all
1373 * rsync versions. This avoids any message forwarding, and leaves the raw-data
1374 * stream alone (since we can never be quite sure if that stream is in the
1375 * right state for a keep-alive message). */
1376 void maybe_send_keepalive(time_t now, int flags)
1378 if (flags & MSK_ACTIVE_RECEIVER)
1379 last_io_in = now; /* Fudge things when we're working hard on the files. */
1381 if (now - last_io_out >= allowed_lull) {
1382 /* The receiver is special: it only sends keep-alive messages if it is
1383 * actively receiving data. Otherwise, it lets the generator timeout. */
1384 if (am_receiver && now - last_io_in >= io_timeout)
1387 if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len)
1388 send_msg(MSG_DATA, "", 0, 0);
1389 if (!(flags & MSK_ALLOW_FLUSH)) {
1390 /* Let the caller worry about writing out the data. */
1391 } else if (iobuf.msg.len)
1392 perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM);
1393 else if (iobuf.out.len > iobuf.out_empty_len)
1394 io_flush(NORMAL_FLUSH);
1398 void start_flist_forward(int ndx)
1400 write_int(iobuf.out_fd, ndx);
1401 forward_flist_data = 1;
1404 void stop_flist_forward(void)
1406 forward_flist_data = 0;
1409 /* Read a message from a multiplexed source. */
1410 static void read_a_msg(void)
1412 char data[BIGPATHBUFLEN];
1416 /* This ensures that perform_io() does not try to do any message reading
1417 * until we've read all of the data for this message. We should also
1418 * try to avoid calling things that will cause data to be written via
1419 * perform_io() prior to this being reset to 1. */
1420 iobuf.in_multiplexed = -1;
1422 tag = raw_read_int();
1424 msg_bytes = tag & 0xFFFFFF;
1425 tag = (tag >> 24) - MPLEX_BASE;
1427 if (DEBUG_GTE(IO, 1) && msgs2stderr)
1428 rprintf(FINFO, "[%s] got msg=%d, len=%ld\n", who_am_i(), (int)tag, (long)msg_bytes);
1432 assert(iobuf.raw_input_ends_before == 0);
1433 /* Though this does not yet read the data, we do mark where in
1434 * the buffer the msg data will end once it is read. It is
1435 * possible that this points off the end of the buffer, in
1436 * which case the gradual reading of the input stream will
1437 * cause this value to wrap around and eventually become real. */
1439 iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
1440 iobuf.in_multiplexed = 1;
1443 if (msg_bytes != sizeof stats.total_read || !am_generator)
1445 raw_read_buf((char*)&stats.total_read, sizeof stats.total_read);
1446 iobuf.in_multiplexed = 1;
1449 if (msg_bytes != 4 || !am_generator)
1451 val = raw_read_int();
1452 iobuf.in_multiplexed = 1;
1453 got_flist_entry_status(FES_REDO, val);
1458 val = raw_read_int();
1459 iobuf.in_multiplexed = 1;
1462 send_msg_int(MSG_IO_ERROR, val);
1464 case MSG_IO_TIMEOUT:
1465 if (msg_bytes != 4 || am_server || am_generator)
1467 val = raw_read_int();
1468 iobuf.in_multiplexed = 1;
1469 if (!io_timeout || io_timeout > val) {
1470 if (INFO_GTE(MISC, 2))
1471 rprintf(FINFO, "Setting --timeout=%d to match server\n", val);
1472 set_io_timeout(val);
1476 /* Support protocol-30 keep-alive method. */
1479 iobuf.in_multiplexed = 1;
1481 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
1484 if (msg_bytes >= sizeof data)
1487 raw_read_buf(data, msg_bytes);
1488 iobuf.in_multiplexed = 1;
1489 send_msg(MSG_DELETED, data, msg_bytes, 1);
1493 if (ic_recv != (iconv_t)-1) {
1497 int flags = ICB_INCLUDE_BAD | ICB_INIT;
1499 INIT_CONST_XBUF(outbuf, data);
1500 INIT_XBUF(inbuf, ibuf, 0, (size_t)-1);
1503 size_t len = msg_bytes > sizeof ibuf - inbuf.len ? sizeof ibuf - inbuf.len : msg_bytes;
1504 raw_read_buf(ibuf + inbuf.len, len);
1507 if (!(msg_bytes -= len) && !ibuf[inbuf.len-1])
1508 inbuf.len--, add_null = 1;
1509 if (iconvbufs(ic_send, &inbuf, &outbuf, flags) < 0) {
1512 /* Buffer ended with an incomplete char, so move the
1513 * bytes to the start of the buffer and continue. */
1514 memmove(ibuf, ibuf + inbuf.pos, inbuf.len);
1519 if (outbuf.len == outbuf.size)
1521 outbuf.buf[outbuf.len++] = '\0';
1523 msg_bytes = outbuf.len;
1526 raw_read_buf(data, msg_bytes);
1527 iobuf.in_multiplexed = 1;
1528 /* A directory name was sent with the trailing null */
1529 if (msg_bytes > 0 && !data[msg_bytes-1])
1530 log_delete(data, S_IFDIR);
1532 data[msg_bytes] = '\0';
1533 log_delete(data, S_IFREG);
1537 if (msg_bytes != 4) {
1539 rprintf(FERROR, "invalid multi-message %d:%lu [%s%s]\n",
1540 tag, (unsigned long)msg_bytes, who_am_i(),
1541 inc_recurse ? "/inc" : "");
1542 exit_cleanup(RERR_STREAMIO);
1544 val = raw_read_int();
1545 iobuf.in_multiplexed = 1;
1547 got_flist_entry_status(FES_SUCCESS, val);
1549 successful_send(val);
1554 val = raw_read_int();
1555 iobuf.in_multiplexed = 1;
1557 got_flist_entry_status(FES_NO_SEND, val);
1559 send_msg_int(MSG_NO_SEND, val);
1561 case MSG_ERROR_SOCKET:
1562 case MSG_ERROR_UTF8:
1567 if (tag == MSG_ERROR_SOCKET)
1572 case MSG_ERROR_XFER:
1574 if (msg_bytes >= sizeof data) {
1577 "multiplexing overflow %d:%lu [%s%s]\n",
1578 tag, (unsigned long)msg_bytes, who_am_i(),
1579 inc_recurse ? "/inc" : "");
1580 exit_cleanup(RERR_STREAMIO);
1582 raw_read_buf(data, msg_bytes);
1583 /* We don't set in_multiplexed value back to 1 before writing this message
1584 * because the write might loop back and read yet another message, over and
1585 * over again, while waiting for room to put the message in the msg buffer. */
1586 rwrite((enum logcode)tag, data, msg_bytes, !am_generator);
1587 iobuf.in_multiplexed = 1;
1588 if (first_message) {
1589 if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) {
1590 data[msg_bytes] = '\0';
1591 check_for_d_option_error(data);
1596 case MSG_ERROR_EXIT:
1598 val = raw_read_int();
1599 else if (msg_bytes == 0)
1603 iobuf.in_multiplexed = 1;
1604 if (DEBUG_GTE(EXIT, 3))
1605 rprintf(FINFO, "[%s] got MSG_ERROR_EXIT with %ld bytes\n", who_am_i(), (long)msg_bytes);
1606 if (msg_bytes == 0) {
1607 if (!am_sender && !am_generator) {
1608 if (DEBUG_GTE(EXIT, 3)) {
1609 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1612 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1613 io_flush(FULL_FLUSH);
1615 } else if (protocol_version >= 31) {
1616 if (am_generator || am_receiver) {
1617 if (DEBUG_GTE(EXIT, 3)) {
1618 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
1621 send_msg_int(MSG_ERROR_EXIT, val);
1623 if (DEBUG_GTE(EXIT, 3)) {
1624 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1627 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1630 /* Send a negative linenum so that we don't end up
1631 * with a duplicate exit message. */
1632 _exit_cleanup(val, __FILE__, 0 - __LINE__);
1634 rprintf(FERROR, "unexpected tag %d [%s%s]\n",
1635 tag, who_am_i(), inc_recurse ? "/inc" : "");
1636 exit_cleanup(RERR_STREAMIO);
1639 assert(iobuf.in_multiplexed > 0);
1642 static void drain_multiplex_messages(void)
1644 while (IN_MULTIPLEXED_AND_READY && iobuf.in.len) {
1645 if (iobuf.raw_input_ends_before) {
1646 size_t raw_len = iobuf.raw_input_ends_before - iobuf.in.pos;
1647 iobuf.raw_input_ends_before = 0;
1648 if (raw_len >= iobuf.in.len) {
1652 iobuf.in.len -= raw_len;
1653 if ((iobuf.in.pos += raw_len) >= iobuf.in.size)
1654 iobuf.in.pos -= iobuf.in.size;
1660 void wait_for_receiver(void)
1662 if (!iobuf.raw_input_ends_before)
1665 if (iobuf.raw_input_ends_before) {
1666 int ndx = read_int(iobuf.in_fd);
1671 if (DEBUG_GTE(FLIST, 3))
1672 rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
1678 exit_cleanup(RERR_STREAMIO);
1681 struct file_list *flist;
1682 flist_receiving_enabled = False;
1683 if (DEBUG_GTE(FLIST, 2)) {
1684 rprintf(FINFO, "[%s] receiving flist for dir %d\n",
1687 flist = recv_file_list(iobuf.in_fd);
1688 flist->parent_ndx = ndx;
1689 #ifdef SUPPORT_HARD_LINKS
1690 if (preserve_hard_links)
1691 match_hard_links(flist);
1693 flist_receiving_enabled = True;
1698 unsigned short read_shortint(int f)
1702 return (UVAL(b, 1) << 8) + UVAL(b, 0);
1705 int32 read_int(int f)
1712 #if SIZEOF_INT32 > 4
1713 if (num & (int32)0x80000000)
1714 num |= ~(int32)0xffffffff;
1719 int32 read_varint(int f)
1730 extra = int_byte_extra[ch / 4];
1732 uchar bit = ((uchar)1<<(8-extra));
1733 if (extra >= (int)sizeof u.b) {
1734 rprintf(FERROR, "Overflow in read_varint()\n");
1735 exit_cleanup(RERR_STREAMIO);
1737 read_buf(f, u.b, extra);
1738 u.b[extra] = ch & (bit-1);
1741 #if CAREFUL_ALIGNMENT
1744 #if SIZEOF_INT32 > 4
1745 if (u.x & (int32)0x80000000)
1746 u.x |= ~(int32)0xffffffff;
1751 int64 read_varlong(int f, uchar min_bytes)
1760 #if SIZEOF_INT64 < 8
1765 read_buf(f, b2, min_bytes);
1766 memcpy(u.b, b2+1, min_bytes-1);
1767 extra = int_byte_extra[CVAL(b2, 0) / 4];
1769 uchar bit = ((uchar)1<<(8-extra));
1770 if (min_bytes + extra > (int)sizeof u.b) {
1771 rprintf(FERROR, "Overflow in read_varlong()\n");
1772 exit_cleanup(RERR_STREAMIO);
1774 read_buf(f, u.b + min_bytes - 1, extra);
1775 u.b[min_bytes + extra - 1] = CVAL(b2, 0) & (bit-1);
1776 #if SIZEOF_INT64 < 8
1777 if (min_bytes + extra > 5 || u.b[4] || CVAL(u.b,3) & 0x80) {
1778 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1779 exit_cleanup(RERR_UNSUPPORTED);
1783 u.b[min_bytes + extra - 1] = CVAL(b2, 0);
1784 #if SIZEOF_INT64 < 8
1786 #elif CAREFUL_ALIGNMENT
1787 u.x = IVAL(u.b,0) | (((int64)IVAL(u.b,4))<<32);
1792 int64 read_longint(int f)
1794 #if SIZEOF_INT64 >= 8
1797 int32 num = read_int(f);
1799 if (num != (int32)0xffffffff)
1802 #if SIZEOF_INT64 < 8
1803 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1804 exit_cleanup(RERR_UNSUPPORTED);
1807 return IVAL(b,0) | (((int64)IVAL(b,4))<<32);
1811 void read_buf(int f, char *buf, size_t len)
1813 if (f != iobuf.in_fd) {
1814 if (safe_read(f, buf, len) != len)
1815 whine_about_eof(False); /* Doesn't return. */
1819 if (!IN_MULTIPLEXED) {
1820 raw_read_buf(buf, len);
1821 total_data_read += len;
1822 if (forward_flist_data)
1823 write_buf(iobuf.out_fd, buf, len);
1825 if (f == write_batch_monitor_in)
1826 safe_write(batch_fd, buf, len);
1833 while (!iobuf.raw_input_ends_before)
1836 siz = MIN(len, iobuf.raw_input_ends_before - iobuf.in.pos);
1837 if (siz >= iobuf.in.size)
1838 siz = iobuf.in.size;
1839 raw_read_buf(buf, siz);
1840 total_data_read += siz;
1842 if (forward_flist_data)
1843 write_buf(iobuf.out_fd, buf, siz);
1845 if (f == write_batch_monitor_in)
1846 safe_write(batch_fd, buf, siz);
1848 if ((len -= siz) == 0)
1854 void read_sbuf(int f, char *buf, size_t len)
1856 read_buf(f, buf, len);
1860 uchar read_byte(int f)
1863 read_buf(f, (char*)&c, 1);
1867 int read_vstring(int f, char *buf, int bufsize)
1869 int len = read_byte(f);
1872 len = (len & ~0x80) * 0x100 + read_byte(f);
1874 if (len >= bufsize) {
1875 rprintf(FERROR, "over-long vstring received (%d > %d)\n",
1881 read_buf(f, buf, len);
1886 /* Populate a sum_struct with values from the socket. This is
1887 * called by both the sender and the receiver. */
1888 void read_sum_head(int f, struct sum_struct *sum)
1890 int32 max_blength = protocol_version < 30 ? OLD_MAX_BLOCK_SIZE : MAX_BLOCK_SIZE;
1891 sum->count = read_int(f);
1892 if (sum->count < 0) {
1893 rprintf(FERROR, "Invalid checksum count %ld [%s]\n",
1894 (long)sum->count, who_am_i());
1895 exit_cleanup(RERR_PROTOCOL);
1897 sum->blength = read_int(f);
1898 if (sum->blength < 0 || sum->blength > max_blength) {
1899 rprintf(FERROR, "Invalid block length %ld [%s]\n",
1900 (long)sum->blength, who_am_i());
1901 exit_cleanup(RERR_PROTOCOL);
1903 sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f);
1904 if (sum->s2length < 0 || sum->s2length > MAX_DIGEST_LEN) {
1905 rprintf(FERROR, "Invalid checksum length %d [%s]\n",
1906 sum->s2length, who_am_i());
1907 exit_cleanup(RERR_PROTOCOL);
1909 sum->remainder = read_int(f);
1910 if (sum->remainder < 0 || sum->remainder > sum->blength) {
1911 rprintf(FERROR, "Invalid remainder length %ld [%s]\n",
1912 (long)sum->remainder, who_am_i());
1913 exit_cleanup(RERR_PROTOCOL);
1917 /* Send the values from a sum_struct over the socket. Set sum to
1918 * NULL if there are no checksums to send. This is called by both
1919 * the generator and the sender. */
1920 void write_sum_head(int f, struct sum_struct *sum)
1922 static struct sum_struct null_sum;
1927 write_int(f, sum->count);
1928 write_int(f, sum->blength);
1929 if (protocol_version >= 27)
1930 write_int(f, sum->s2length);
1931 write_int(f, sum->remainder);
1934 /* Sleep after writing to limit I/O bandwidth usage.
1936 * @todo Rather than sleeping after each write, it might be better to
1937 * use some kind of averaging. The current algorithm seems to always
1938 * use a bit less bandwidth than specified, because it doesn't make up
1939 * for slow periods. But arguably this is a feature. In addition, we
1940 * ought to take the time used to write the data into account.
1942 * During some phases of big transfers (file FOO is uptodate) this is
1943 * called with a small bytes_written every time. As the kernel has to
1944 * round small waits up to guarantee that we actually wait at least the
1945 * requested number of microseconds, this can become grossly inaccurate.
1946 * We therefore keep track of the bytes we've written over time and only
1947 * sleep when the accumulated delay is at least 1 tenth of a second. */
1948 static void sleep_for_bwlimit(int bytes_written)
1950 static struct timeval prior_tv;
1951 static long total_written = 0;
1952 struct timeval tv, start_tv;
1953 long elapsed_usec, sleep_usec;
1955 #define ONE_SEC 1000000L /* # of microseconds in a second */
1957 total_written += bytes_written;
1959 gettimeofday(&start_tv, NULL);
1960 if (prior_tv.tv_sec) {
1961 elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
1962 + (start_tv.tv_usec - prior_tv.tv_usec);
1963 total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024);
1964 if (total_written < 0)
1968 sleep_usec = total_written * (ONE_SEC/1024) / bwlimit;
1969 if (sleep_usec < ONE_SEC / 10) {
1970 prior_tv = start_tv;
1974 tv.tv_sec = sleep_usec / ONE_SEC;
1975 tv.tv_usec = sleep_usec % ONE_SEC;
1976 select(0, NULL, NULL, NULL, &tv);
1978 gettimeofday(&prior_tv, NULL);
1979 elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC
1980 + (prior_tv.tv_usec - start_tv.tv_usec);
1981 total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024);
1984 void io_flush(int flush_it_all)
1986 if (iobuf.out.len > iobuf.out_empty_len) {
1987 if (flush_it_all) /* FULL_FLUSH: flush everything in the output buffers */
1988 perform_io(iobuf.out.size - iobuf.out_empty_len, PIO_NEED_OUTROOM);
1989 else /* NORMAL_FLUSH: flush at least 1 byte */
1990 perform_io(iobuf.out.size - iobuf.out.len + 1, PIO_NEED_OUTROOM);
1993 perform_io(iobuf.msg.size, PIO_NEED_MSGROOM);
1996 void write_shortint(int f, unsigned short x)
2000 b[1] = (char)(x >> 8);
2004 void write_int(int f, int32 x)
2011 void write_varint(int f, int32 x)
2019 while (cnt > 1 && b[cnt] == 0)
2021 bit = ((uchar)1<<(7-cnt+1));
2022 if (CVAL(b, cnt) >= bit) {
2026 *b = b[cnt] | ~(bit*2-1);
2030 write_buf(f, b, cnt);
2033 void write_varlong(int f, int64 x, uchar min_bytes)
2040 #if SIZEOF_INT64 >= 8
2041 SIVAL(b, 5, x >> 32);
2043 if (x <= 0x7FFFFFFF && x >= 0)
2044 memset(b + 5, 0, 4);
2046 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2047 exit_cleanup(RERR_UNSUPPORTED);
2051 while (cnt > min_bytes && b[cnt] == 0)
2053 bit = ((uchar)1<<(7-cnt+min_bytes));
2054 if (CVAL(b, cnt) >= bit) {
2057 } else if (cnt > min_bytes)
2058 *b = b[cnt] | ~(bit*2-1);
2062 write_buf(f, b, cnt);
2066 * Note: int64 may actually be a 32-bit type if ./configure couldn't find any
2067 * 64-bit types on this platform.
2069 void write_longint(int f, int64 x)
2071 char b[12], * const s = b+4;
2074 if (x <= 0x7FFFFFFF && x >= 0) {
2079 #if SIZEOF_INT64 < 8
2080 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2081 exit_cleanup(RERR_UNSUPPORTED);
2084 SIVAL(s, 4, x >> 32);
2085 write_buf(f, b, 12);
2089 void write_buf(int f, const char *buf, size_t len)
2093 if (f != iobuf.out_fd) {
2094 safe_write(f, buf, len);
2098 if (iobuf.out.len + len > iobuf.out.size)
2099 perform_io(len, PIO_NEED_OUTROOM);
2101 pos = iobuf.out.pos + iobuf.out.len; /* Must be set after any flushing. */
2102 if (pos >= iobuf.out.size)
2103 pos -= iobuf.out.size;
2105 /* Handle a split copy if we wrap around the end of the circular buffer. */
2106 if (pos >= iobuf.out.pos && (siz = iobuf.out.size - pos) < len) {
2107 memcpy(iobuf.out.buf + pos, buf, siz);
2108 memcpy(iobuf.out.buf, buf + siz, len - siz);
2110 memcpy(iobuf.out.buf + pos, buf, len);
2112 iobuf.out.len += len;
2113 total_data_written += len;
2116 if (f == write_batch_monitor_out)
2117 safe_write(batch_fd, buf, len);
2120 /* Write a string to the connection */
2121 void write_sbuf(int f, const char *buf)
2123 write_buf(f, buf, strlen(buf));
2126 void write_byte(int f, uchar c)
2128 write_buf(f, (char *)&c, 1);
2131 void write_vstring(int f, const char *str, int len)
2133 uchar lenbuf[3], *lb = lenbuf;
2138 "attempting to send over-long vstring (%d > %d)\n",
2140 exit_cleanup(RERR_PROTOCOL);
2142 *lb++ = len / 0x100 + 0x80;
2146 write_buf(f, (char*)lenbuf, lb - lenbuf + 1);
2148 write_buf(f, str, len);
2151 /* Send a file-list index using a byte-reduction method. */
2152 void write_ndx(int f, int32 ndx)
2154 static int32 prev_positive = -1, prev_negative = 1;
2155 int32 diff, cnt = 0;
2158 if (protocol_version < 30 || read_batch) {
2163 /* Send NDX_DONE as a single-byte 0 with no side effects. Send
2164 * negative nums as a positive after sending a leading 0xFF. */
2166 diff = ndx - prev_positive;
2167 prev_positive = ndx;
2168 } else if (ndx == NDX_DONE) {
2173 b[cnt++] = (char)0xFF;
2175 diff = ndx - prev_negative;
2176 prev_negative = ndx;
2179 /* A diff of 1 - 253 is sent as a one-byte diff; a diff of 254 - 32767
2180 * or 0 is sent as a 0xFE + a two-byte diff; otherwise we send 0xFE
2181 * & all 4 bytes of the (non-negative) num with the high-bit set. */
2182 if (diff < 0xFE && diff > 0)
2183 b[cnt++] = (char)diff;
2184 else if (diff < 0 || diff > 0x7FFF) {
2185 b[cnt++] = (char)0xFE;
2186 b[cnt++] = (char)((ndx >> 24) | 0x80);
2187 b[cnt++] = (char)ndx;
2188 b[cnt++] = (char)(ndx >> 8);
2189 b[cnt++] = (char)(ndx >> 16);
2191 b[cnt++] = (char)0xFE;
2192 b[cnt++] = (char)(diff >> 8);
2193 b[cnt++] = (char)diff;
2195 write_buf(f, b, cnt);
2198 /* Receive a file-list index using a byte-reduction method. */
2199 int32 read_ndx(int f)
2201 static int32 prev_positive = -1, prev_negative = 1;
2202 int32 *prev_ptr, num;
2205 if (protocol_version < 30)
2209 if (CVAL(b, 0) == 0xFF) {
2211 prev_ptr = &prev_negative;
2212 } else if (CVAL(b, 0) == 0)
2215 prev_ptr = &prev_positive;
2216 if (CVAL(b, 0) == 0xFE) {
2218 if (CVAL(b, 0) & 0x80) {
2219 b[3] = CVAL(b, 0) & ~0x80;
2221 read_buf(f, b+1, 2);
2224 num = (UVAL(b,0)<<8) + UVAL(b,1) + *prev_ptr;
2226 num = UVAL(b, 0) + *prev_ptr;
2228 if (prev_ptr == &prev_negative)
2233 /* Read a line of up to bufsiz-1 characters into buf. Strips
2234 * the (required) trailing newline and all carriage returns.
2235 * Returns 1 for success; 0 for I/O error or truncation. */
2236 int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok)
2238 assert(fd != iobuf.in_fd);
2239 bufsiz--; /* leave room for the null */
2240 while (bufsiz > 0) {
2241 if (safe_read(fd, buf, 1) == 0) {
2259 void io_printf(int fd, const char *format, ...)
2262 char buf[BIGPATHBUFLEN];
2265 va_start(ap, format);
2266 len = vsnprintf(buf, sizeof buf, format, ap);
2270 exit_cleanup(RERR_PROTOCOL);
2272 if (len > (int)sizeof buf) {
2273 rprintf(FERROR, "io_printf() was too long for the buffer.\n");
2274 exit_cleanup(RERR_PROTOCOL);
2277 write_sbuf(fd, buf);
2280 /* Setup for multiplexing a MSG_* stream with the data stream. */
2281 void io_start_multiplex_out(int fd)
2283 io_flush(FULL_FLUSH);
2285 if (msgs2stderr && DEBUG_GTE(IO, 2))
2286 rprintf(FINFO, "[%s] io_start_multiplex_out(%d)\n", who_am_i(), fd);
2289 alloc_xbuf(&iobuf.msg, ROUND_UP_1024(IO_BUFFER_SIZE));
2291 iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */
2292 io_start_buffering_out(fd);
2293 got_kill_signal = 0;
2295 iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len;
2299 /* Setup for multiplexing a MSG_* stream with the data stream. */
2300 void io_start_multiplex_in(int fd)
2302 if (msgs2stderr && DEBUG_GTE(IO, 2))
2303 rprintf(FINFO, "[%s] io_start_multiplex_in(%d)\n", who_am_i(), fd);
2305 iobuf.in_multiplexed = 1; /* See also IN_MULTIPLEXED */
2306 io_start_buffering_in(fd);
2309 int io_end_multiplex_in(int mode)
2311 int ret = iobuf.in_multiplexed ? iobuf.in_fd : -1;
2313 if (msgs2stderr && DEBUG_GTE(IO, 2))
2314 rprintf(FINFO, "[%s] io_end_multiplex_in(mode=%d)\n", who_am_i(), mode);
2316 iobuf.in_multiplexed = 0;
2317 if (mode == MPLX_SWITCHING)
2318 iobuf.raw_input_ends_before = 0;
2320 assert(iobuf.raw_input_ends_before == 0);
2321 if (mode != MPLX_TO_BUFFERED)
2322 io_end_buffering_in(mode);
2327 int io_end_multiplex_out(int mode)
2329 int ret = iobuf.out_empty_len ? iobuf.out_fd : -1;
2331 if (msgs2stderr && DEBUG_GTE(IO, 2))
2332 rprintf(FINFO, "[%s] io_end_multiplex_out(mode=%d)\n", who_am_i(), mode);
2334 if (mode != MPLX_TO_BUFFERED)
2335 io_end_buffering_out(mode);
2337 io_flush(FULL_FLUSH);
2340 iobuf.out_empty_len = 0;
2341 if (got_kill_signal > 0) /* Just in case... */
2342 exit_cleanup(RERR_SIGNAL);
2343 got_kill_signal = -1;
2348 void start_write_batch(int fd)
2350 /* Some communication has already taken place, but we don't
2351 * enable batch writing until here so that we can write a
2352 * canonical record of the communication even though the
2353 * actual communication so far depends on whether a daemon
2355 write_int(batch_fd, protocol_version);
2356 if (protocol_version >= 30)
2357 write_byte(batch_fd, compat_flags);
2358 write_int(batch_fd, checksum_seed);
2361 write_batch_monitor_out = fd;
2363 write_batch_monitor_in = fd;
2366 void stop_write_batch(void)
2368 write_batch_monitor_out = -1;
2369 write_batch_monitor_in = -1;