2 * Socket and pipe I/O utilities used in rsync.
4 * Copyright (C) 1996-2001 Andrew Tridgell
5 * Copyright (C) 1996 Paul Mackerras
6 * Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org>
7 * Copyright (C) 2003-2009 Wayne Davison
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 3 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, visit the http://fsf.org website.
23 /* Rsync provides its own multiplexing system, which is used to send
24 * stderr and stdout over a single socket.
26 * For historical reasons this is off during the start of the
27 * connection, but it's switched on quite early using
28 * io_start_multiplex_out() and io_start_multiplex_in(). */
34 /** If no timeout is specified then use a 60 second select timeout */
35 #define SELECT_TIMEOUT 60
38 extern size_t bwlimit_writemax;
39 extern int io_timeout;
42 extern int am_receiver;
43 extern int am_generator;
44 extern int msgs2stderr;
45 extern int inc_recurse;
49 extern int file_total;
50 extern int file_old_total;
52 extern int read_batch;
53 extern int compat_flags;
54 extern int protect_args;
55 extern int checksum_seed;
56 extern int protocol_version;
57 extern int remove_source_files;
58 extern int preserve_hard_links;
59 extern BOOL extra_flist_sending_enabled;
60 extern struct stats stats;
61 extern struct file_list *cur_flist;
63 extern int filesfrom_convert;
64 extern iconv_t ic_send, ic_recv;
67 int csum_length = SHORT_SUM_LENGTH; /* initial value */
71 int forward_flist_data = 0;
72 BOOL flist_receiving_enabled = False;
74 /* Ignore an EOF error if non-zero. See whine_about_eof(). */
75 int kluge_around_eof = 0;
80 int64 total_data_read = 0;
81 int64 total_data_written = 0;
86 int out_fd; /* Both "out" and "msg" go to this fd. */
88 unsigned out_empty_len;
89 size_t raw_data_header_pos; /* in the out xbuf */
90 size_t raw_flushing_ends_before; /* in the out xbuf */
91 size_t raw_input_ends_before; /* in the in xbuf */
92 } iobuf = { .in_fd = -1, .out_fd = -1 };
94 static time_t last_io_in;
95 static time_t last_io_out;
97 static int write_batch_monitor_in = -1;
98 static int write_batch_monitor_out = -1;
100 static int ff_forward_fd = -1;
101 static int ff_reenable_multiplex = -1;
102 static char ff_lastchar = '\0';
103 static xbuf ff_xb = EMPTY_XBUF;
105 static xbuf iconv_buf = EMPTY_XBUF;
107 static int select_timeout = SELECT_TIMEOUT;
108 static int active_filecnt = 0;
109 static OFF_T active_bytecnt = 0;
110 static int first_message = 1;
112 static char int_byte_extra[64] = {
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (00 - 3F)/4 */
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* (40 - 7F)/4 */
115 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* (80 - BF)/4 */
116 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, /* (C0 - FF)/4 */
119 /* Our I/O buffers are sized with no bits on in the lowest byte of the "size"
120 * (indeed, our rounding of sizes in 1024-byte units assures more than this).
121 * This allows the code that is storing bytes near the physical end of a
122 * circular buffer to temporarily reduce the buffer's size (in order to make
123 * some storing idioms easier), while also making it simple to restore the
124 * buffer's actual size when the buffer's "pos" wraps around to the start (we
125 * just round the buffer's size up again). */
127 #define IOBUF_WAS_REDUCED(siz) ((siz) & 0xFF)
128 #define IOBUF_RESTORE_SIZE(siz) (((siz) | 0xFF) + 1)
130 #define IN_MULTIPLEXED (iobuf.in_multiplexed != 0)
131 #define IN_MULTIPLEXED_AND_READY (iobuf.in_multiplexed > 0)
132 #define OUT_MULTIPLEXED (iobuf.out_empty_len != 0)
134 #define PIO_NEED_INPUT (1<<0) /* The *_NEED_* flags are mutually exclusive. */
135 #define PIO_NEED_OUTROOM (1<<1)
136 #define PIO_NEED_MSGROOM (1<<2)
138 #define PIO_CONSUME_INPUT (1<<4) /* Must becombined with PIO_NEED_INPUT. */
140 #define PIO_INPUT_AND_CONSUME (PIO_NEED_INPUT | PIO_CONSUME_INPUT)
141 #define PIO_NEED_FLAGS (PIO_NEED_INPUT | PIO_NEED_OUTROOM | PIO_NEED_MSGROOM)
143 #define REMOTE_OPTION_ERROR "rsync: on remote machine: -"
144 #define REMOTE_OPTION_ERROR2 ": unknown option"
146 #define FILESFROM_BUFLEN 2048
148 enum festatus { FES_SUCCESS, FES_REDO, FES_NO_SEND };
150 static flist_ndx_list redo_list, hlink_list;
152 static void read_a_msg(void);
153 static void drain_multiplex_messages(void);
154 static void sleep_for_bwlimit(int bytes_written);
156 static void check_timeout(BOOL allow_keepalive)
160 /* On the receiving side, the generator is now the one that decides
161 * when a timeout has occurred. When it is sifting through a lot of
162 * files looking for work, it will be sending keep-alive messages to
163 * the sender, and even though the receiver won't be sending/receiving
164 * anything (not even keep-alive messages), the successful writes to
165 * the sender will keep things going. If the receiver is actively
166 * receiving data, it will ensure that the generator knows that it is
167 * not idle by sending the generator keep-alive messages (since the
168 * generator might be blocked trying to send checksums, it needs to
169 * know that the receiver is active). Thus, as long as one or the
170 * other is successfully doing work, the generator will not timeout. */
176 if (allow_keepalive) {
177 /* This may put data into iobuf.msg w/o flushing. */
178 maybe_send_keepalive(t, 0);
187 chk = MAX(last_io_out, last_io_in);
188 if (t - chk >= io_timeout) {
191 rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n",
192 who_am_i(), (int)(t-chk));
193 exit_cleanup(RERR_TIMEOUT);
197 /* It's almost always an error to get an EOF when we're trying to read from the
198 * network, because the protocol is (for the most part) self-terminating.
200 * There is one case for the receiver when it is at the end of the transfer
201 * (hanging around reading any keep-alive packets that might come its way): if
202 * the sender dies before the generator's kill-signal comes through, we can end
203 * up here needing to loop until the kill-signal arrives. In this situation,
204 * kluge_around_eof will be < 0.
206 * There is another case for older protocol versions (< 24) where the module
207 * listing was not terminated, so we must ignore an EOF error in that case and
208 * exit. In this situation, kluge_around_eof will be > 0. */
209 static NORETURN void whine_about_eof(BOOL allow_kluge)
211 if (kluge_around_eof && allow_kluge) {
213 if (kluge_around_eof > 0)
215 /* If we're still here after 10 seconds, exit with an error. */
216 for (i = 10*1000/20; i--; )
220 rprintf(FERROR, RSYNC_NAME ": connection unexpectedly closed "
221 "(%s bytes received so far) [%s]\n",
222 big_num(stats.total_read), who_am_i());
224 exit_cleanup(RERR_STREAMIO);
227 /* Do a safe read, handling any needed looping and error handling.
228 * Returns the count of the bytes read, which will only be different
229 * from "len" if we encountered an EOF. This routine is not used on
230 * the socket except very early in the transfer. */
231 static size_t safe_read(int fd, char *buf, size_t len)
236 assert(fd != iobuf.in_fd);
238 n = read(fd, buf, len);
239 if ((size_t)n == len || n == 0) {
240 if (DEBUG_GTE(IO, 2))
241 rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
245 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
247 rsyserr(FERROR, errno, "safe_read failed to read %ld bytes [%s]",
248 (long)len, who_am_i());
249 exit_cleanup(RERR_STREAMIO);
264 tv.tv_sec = select_timeout;
267 cnt = select(fd+1, &r_fds, NULL, &e_fds, &tv);
269 if (cnt < 0 && errno == EBADF) {
270 rsyserr(FERROR, errno, "safe_read select failed [%s]",
272 exit_cleanup(RERR_FILEIO);
275 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
279 /*if (FD_ISSET(fd, &e_fds))
280 rprintf(FINFO, "select exception on fd %d\n", fd); */
282 if (FD_ISSET(fd, &r_fds)) {
283 n = read(fd, buf + got, len - got);
284 if (DEBUG_GTE(IO, 2))
285 rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
293 if ((got += (size_t)n) == len)
301 static const char *what_fd_is(int fd)
305 if (fd == sock_f_out)
307 else if (fd == iobuf.out_fd)
309 else if (fd == batch_fd)
312 snprintf(buf, sizeof buf, "fd %d", fd);
317 /* Do a safe write, handling any needed looping and error handling.
318 * Returns only if everything was successfully written. This routine
319 * is not used on the socket except very early in the transfer. */
320 static void safe_write(int fd, const char *buf, size_t len)
324 assert(fd != iobuf.out_fd);
326 n = write(fd, buf, len);
327 if ((size_t)n == len)
330 if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
332 rsyserr(FERROR, errno,
333 "safe_write failed to write %ld bytes to %s [%s]",
334 (long)len, what_fd_is(fd), who_am_i());
335 exit_cleanup(RERR_STREAMIO);
349 tv.tv_sec = select_timeout;
352 cnt = select(fd + 1, NULL, &w_fds, NULL, &tv);
354 if (cnt < 0 && errno == EBADF) {
355 rsyserr(FERROR, errno, "safe_write select failed on %s [%s]",
356 what_fd_is(fd), who_am_i());
357 exit_cleanup(RERR_FILEIO);
360 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
364 if (FD_ISSET(fd, &w_fds)) {
365 n = write(fd, buf, len);
377 /* This is only called when files-from data is known to be available. We read
378 * a chunk of data and put it into the output buffer. */
379 static void forward_filesfrom_data(void)
383 len = read(ff_forward_fd, ff_xb.buf + ff_xb.len, ff_xb.size - ff_xb.len);
385 if (len == 0 || errno != EINTR) {
386 /* Send end-of-file marker */
388 write_buf(iobuf.out_fd, "\0\0", ff_lastchar ? 2 : 1);
390 if (ff_reenable_multiplex >= 0)
391 io_start_multiplex_out(ff_reenable_multiplex);
396 if (DEBUG_GTE(IO, 2))
397 rprintf(FINFO, "[%s] files-from read=%ld\n", who_am_i(), (long)len);
404 char *s = ff_xb.buf + len;
405 /* Transform CR and/or LF into '\0' */
406 while (s-- > ff_xb.buf) {
407 if (*s == '\n' || *s == '\r')
416 /* Last buf ended with a '\0', so don't let this buf start with one. */
417 while (len && *s == '\0')
419 ff_xb.pos = s - ff_xb.buf;
423 if (filesfrom_convert && len) {
424 char *sob = ff_xb.buf + ff_xb.pos, *s = sob;
425 char *eob = sob + len;
426 int flags = ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT;
427 if (ff_lastchar == '\0')
429 /* Convert/send each null-terminated string separately, skipping empties. */
432 ff_xb.len = s - sob - 1;
433 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0)
434 exit_cleanup(RERR_PROTOCOL); /* impossible? */
435 write_buf(iobuf.out_fd, s-1, 1); /* Send the '\0'. */
436 while (s != eob && *s == '\0')
439 ff_xb.pos = sob - ff_xb.buf;
444 if ((ff_xb.len = s - sob) == 0)
447 /* Handle a partial string specially, saving any incomplete chars. */
448 flags &= ~ICB_INCLUDE_INCOMPLETE;
449 if (iconvbufs(ic_send, &ff_xb, &iobuf.out, flags) < 0) {
451 exit_cleanup(RERR_PROTOCOL); /* impossible? */
453 memmove(ff_xb.buf, ff_xb.buf + ff_xb.pos, ff_xb.len);
455 ff_lastchar = 'x'; /* Anything non-zero. */
461 char *f = ff_xb.buf + ff_xb.pos;
464 /* Eliminate any multi-'\0' runs. */
466 if (!(*t++ = *f++)) {
467 while (f != eob && *f == '\0')
472 if ((len = t - ff_xb.buf) != 0) {
473 /* This will not circle back to perform_io() because we only get
474 * called when there is plenty of room in the output buffer. */
475 write_buf(iobuf.out_fd, ff_xb.buf, len);
480 void reduce_iobuf_size(xbuf *out, size_t new_size)
482 if (new_size < out->size) {
483 /* Avoid weird buffer interactions by only outputting this to stderr. */
484 if (msgs2stderr && DEBUG_GTE(IO, 4)) {
485 const char *name = out == &iobuf.out ? "iobuf.out"
486 : out == &iobuf.msg ? "iobuf.msg"
489 rprintf(FINFO, "[%s] reduced size of %s (-%d)\n",
490 who_am_i(), name, (int)(out->size - new_size));
493 out->size = new_size;
497 void restore_iobuf_size(xbuf *out)
499 if (IOBUF_WAS_REDUCED(out->size)) {
500 size_t new_size = IOBUF_RESTORE_SIZE(out->size);
501 /* Avoid weird buffer interactions by only outputting this to stderr. */
502 if (msgs2stderr && DEBUG_GTE(IO, 4)) {
503 const char *name = out == &iobuf.out ? "iobuf.out"
504 : out == &iobuf.msg ? "iobuf.msg"
507 rprintf(FINFO, "[%s] restored size of %s (+%d)\n",
508 who_am_i(), name, (int)(new_size - out->size));
511 out->size = new_size;
515 /* Perform buffered input and/or output until specified conditions are met.
516 * When given a "needed" read or write request, this returns without doing any
517 * I/O if the needed input bytes or write space is already available. Once I/O
518 * is needed, this will try to do whatever reading and/or writing is currently
519 * possible, up to the maximum buffer allowances, no matter if this is a read
520 * or write request. However, the I/O stops as soon as the required input
521 * bytes or output space is available. If this is not a read request, the
522 * routine may also do some advantageous reading of messages from a multiplexed
523 * input source (which ensures that we don't jam up with everyone in their
524 * "need to write" code and nobody reading the accumulated data that would make
527 * The iobuf.in, .out and .msg buffers are all circular. Callers need to be
528 * aware that some data copies will need to be split when the bytes wrap around
529 * from the end to the start. In order to help make writing into the output
530 * buffers easier for some operations (such as the use of SIVAL() into the
531 * buffer) a buffer may be temporarily shortened by a small amount, but the
532 * original size will be automatically restored when the .pos wraps to the
533 * start. See also the 3 raw_* iobuf vars that are used in the handling of
534 * MSG_DATA bytes as they are read-from/written-into the buffers.
536 * When writing, we flush data in the following priority order:
538 * 1. Finish writing any in-progress MSG_DATA sequence from iobuf.out.
540 * 2. Write out all the messages from the message buf (if iobuf.msg is active).
541 * Yes, this means that a PIO_NEED_OUTROOM call will completely flush any
542 * messages before getting to the iobuf.out flushing (except for rule 1).
544 * 3. Write out the raw data from iobuf.out, possibly filling in the multiplexed
545 * MSG_DATA header that was pre-allocated (when output is multiplexed).
547 * TODO: items for possible future work:
549 * - Make this routine able to read the generator-to-receiver batch flow?
551 * Unlike the old routines that this replaces, it is OK to read ahead as far as
552 * we can because the read_a_msg() routine now reads its bytes out of the input
553 * buffer. In the old days, only raw data was in the input buffer, and any
554 * unused raw data in the buf would prevent the reading of socket data. */
555 static char *perform_io(size_t needed, int flags)
557 fd_set r_fds, e_fds, w_fds;
560 size_t empty_buf_len = 0;
564 if (iobuf.in.len == 0 && iobuf.in.pos != 0) {
565 if (iobuf.raw_input_ends_before)
566 iobuf.raw_input_ends_before -= iobuf.in.pos;
570 switch (flags & PIO_NEED_FLAGS) {
572 /* We never resize the circular input buffer. */
573 if (iobuf.in.size < needed) {
574 rprintf(FERROR, "need to read %ld bytes, iobuf.in.buf is only %ld bytes.\n",
575 (long)needed, (long)iobuf.in.size);
576 exit_cleanup(RERR_PROTOCOL);
579 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
580 rprintf(FINFO, "[%s] perform_io(%ld, %sinput)\n",
581 who_am_i(), (long)needed, flags & PIO_CONSUME_INPUT ? "consume&" : "");
585 case PIO_NEED_OUTROOM:
586 /* We never resize the circular output buffer. */
587 if (iobuf.out.size - iobuf.out_empty_len < needed) {
588 fprintf(stderr, "need to write %ld bytes, iobuf.out.buf is only %ld bytes.\n",
589 (long)needed, (long)(iobuf.out.size - iobuf.out_empty_len));
590 exit_cleanup(RERR_PROTOCOL);
593 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
594 rprintf(FINFO, "[%s] perform_io(%ld, outroom) needs to flush %ld\n",
595 who_am_i(), (long)needed,
596 iobuf.out.len + needed > iobuf.out.size
597 ? (long)(iobuf.out.len + needed - iobuf.out.size) : 0L);
601 case PIO_NEED_MSGROOM:
602 /* We never resize the circular message buffer. */
603 if (iobuf.msg.size < needed) {
604 fprintf(stderr, "need to write %ld bytes, iobuf.msg.buf is only %ld bytes.\n",
605 (long)needed, (long)iobuf.msg.size);
606 exit_cleanup(RERR_PROTOCOL);
609 if (msgs2stderr && DEBUG_GTE(IO, 3)) {
610 rprintf(FINFO, "[%s] perform_io(%ld, msgroom) needs to flush %ld\n",
611 who_am_i(), (long)needed,
612 iobuf.msg.len + needed > iobuf.msg.size
613 ? (long)(iobuf.msg.len + needed - iobuf.msg.size) : 0L);
618 if (msgs2stderr && DEBUG_GTE(IO, 3))
619 rprintf(FINFO, "[%s] perform_io(%ld, %d)\n", who_am_i(), (long)needed, flags);
623 exit_cleanup(RERR_UNSUPPORTED);
627 switch (flags & PIO_NEED_FLAGS) {
629 if (iobuf.in.len >= needed)
632 case PIO_NEED_OUTROOM:
633 /* Note that iobuf.out_empty_len doesn't factor into this check
634 * because iobuf.out.len already holds any needed header len. */
635 if (iobuf.out.len + needed <= iobuf.out.size)
638 case PIO_NEED_MSGROOM:
639 if (iobuf.msg.len + needed <= iobuf.msg.size)
648 if (iobuf.in_fd >= 0 && iobuf.in.size - iobuf.in.len) {
649 if (!read_batch || batch_fd >= 0) {
650 FD_SET(iobuf.in_fd, &r_fds);
651 FD_SET(iobuf.in_fd, &e_fds);
653 if (iobuf.in_fd > max_fd)
654 max_fd = iobuf.in_fd;
657 /* Only do more filesfrom processing if there is enough room in the out buffer. */
658 if (ff_forward_fd >= 0 && iobuf.out.size - iobuf.out.len > FILESFROM_BUFLEN*2) {
659 FD_SET(ff_forward_fd, &r_fds);
660 if (ff_forward_fd > max_fd)
661 max_fd = ff_forward_fd;
665 if (iobuf.out_fd >= 0) {
666 if (iobuf.raw_flushing_ends_before
667 || (!iobuf.msg.len && iobuf.out.len > iobuf.out_empty_len && !(flags & PIO_NEED_MSGROOM))) {
668 if (OUT_MULTIPLEXED && !iobuf.raw_flushing_ends_before) {
669 /* The iobuf.raw_flushing_ends_before value can point off the end
670 * of the iobuf.out buffer for a while, for easier subtracting. */
671 iobuf.raw_flushing_ends_before = iobuf.out.pos + iobuf.out.len;
673 SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0,
674 ((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4);
676 if (msgs2stderr && DEBUG_GTE(IO, 1)) {
677 rprintf(FINFO, "[%s] send_msg(%d, %ld)\n",
678 who_am_i(), (int)MSG_DATA, (long)iobuf.out.len - 4);
681 /* reserve room for the next MSG_DATA header */
682 iobuf.raw_data_header_pos = iobuf.raw_flushing_ends_before;
683 if (iobuf.raw_data_header_pos >= iobuf.out.size)
684 iobuf.raw_data_header_pos -= iobuf.out.size;
685 else if (iobuf.raw_data_header_pos + 4 > iobuf.out.size) {
686 /* The 4-byte header won't fit at the end of the buffer,
687 * so we'll temporarily reduce the output buffer's size
688 * and put the header at the start of the buffer. */
689 reduce_iobuf_size(&iobuf.out, iobuf.raw_data_header_pos);
690 iobuf.raw_data_header_pos = 0;
692 /* Yes, it is possible for this to make len > size for a while. */
696 empty_buf_len = iobuf.out_empty_len;
698 } else if (iobuf.msg.len) {
704 FD_SET(iobuf.out_fd, &w_fds);
705 if (iobuf.out_fd > max_fd)
706 max_fd = iobuf.out_fd;
712 switch (flags & PIO_NEED_FLAGS) {
715 if (kluge_around_eof == 2)
717 if (iobuf.in_fd == -2)
718 whine_about_eof(True);
719 rprintf(FERROR, "error in perform_io: no fd for input.\n");
720 exit_cleanup(RERR_PROTOCOL);
721 case PIO_NEED_OUTROOM:
722 case PIO_NEED_MSGROOM:
724 drain_multiplex_messages();
725 if (iobuf.out_fd == -2)
726 whine_about_eof(True);
727 rprintf(FERROR, "error in perform_io: no fd for output.\n");
728 exit_cleanup(RERR_PROTOCOL);
730 /* No stated needs, so I guess this is OK. */
736 if (extra_flist_sending_enabled) {
737 if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY)
740 extra_flist_sending_enabled = False;
741 tv.tv_sec = select_timeout;
744 tv.tv_sec = select_timeout;
747 cnt = select(max_fd + 1, &r_fds, &w_fds, &e_fds, &tv);
750 if (cnt < 0 && errno == EBADF) {
752 exit_cleanup(RERR_SOCKETIO);
754 if (extra_flist_sending_enabled) {
755 extra_flist_sending_enabled = False;
756 send_extra_file_list(sock_f_out, -1);
757 extra_flist_sending_enabled = !flist_eof;
759 check_timeout((flags & PIO_NEED_INPUT) != 0);
760 FD_ZERO(&r_fds); /* Just in case... */
764 if (iobuf.in_fd >= 0 && FD_ISSET(iobuf.in_fd, &r_fds)) {
765 size_t len, pos = iobuf.in.pos + iobuf.in.len;
767 if (pos >= iobuf.in.size) {
768 pos -= iobuf.in.size;
769 len = iobuf.in.size - iobuf.in.len;
771 len = iobuf.in.size - pos;
772 if ((n = read(iobuf.in_fd, iobuf.in.buf + pos, len)) <= 0) {
774 /* Signal that input has become invalid. */
775 if (!read_batch || batch_fd < 0 || am_generator)
780 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
783 /* Don't write errors on a dead socket. */
784 if (iobuf.in_fd == sock_f_in) {
787 rsyserr(FERROR_SOCKET, errno, "read error");
789 rsyserr(FERROR, errno, "read error");
790 exit_cleanup(RERR_SOCKETIO);
793 if (msgs2stderr && DEBUG_GTE(IO, 2))
794 rprintf(FINFO, "[%s] recv=%ld\n", who_am_i(), (long)n);
797 last_io_in = time(NULL);
798 if (flags & PIO_NEED_INPUT)
799 maybe_send_keepalive(last_io_in, 0);
801 stats.total_read += n;
806 if (out && FD_ISSET(iobuf.out_fd, &w_fds)) {
807 size_t len = iobuf.raw_flushing_ends_before ? iobuf.raw_flushing_ends_before - out->pos : out->len;
810 if (bwlimit_writemax && len > bwlimit_writemax)
811 len = bwlimit_writemax;
813 if (out->pos + len > out->size)
814 len = out->size - out->pos;
815 if ((n = write(iobuf.out_fd, out->buf + out->pos, len)) <= 0) {
816 if (errno == EINTR || errno == EWOULDBLOCK || errno == EAGAIN)
819 /* Don't write errors on a dead socket. */
822 iobuf.out.len = iobuf.msg.len = iobuf.raw_flushing_ends_before = 0;
823 rsyserr(FERROR_SOCKET, errno, "[%s] write error", who_am_i());
824 drain_multiplex_messages();
825 exit_cleanup(RERR_SOCKETIO);
828 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
829 rprintf(FINFO, "[%s] %s sent=%ld\n",
830 who_am_i(), out == &iobuf.out ? "out" : "msg", (long)n);
834 last_io_out = time(NULL);
835 stats.total_written += n;
837 if (bwlimit_writemax)
838 sleep_for_bwlimit(n);
840 if ((out->pos += n) == out->size) {
841 if (iobuf.raw_flushing_ends_before)
842 iobuf.raw_flushing_ends_before -= out->size;
844 restore_iobuf_size(out);
845 } else if (out->pos == iobuf.raw_flushing_ends_before)
846 iobuf.raw_flushing_ends_before = 0;
847 if ((out->len -= n) == empty_buf_len) {
849 restore_iobuf_size(out);
851 iobuf.raw_data_header_pos = 0;
855 /* We need to help prevent deadlock by doing what reading
856 * we can whenever we are here trying to write. */
857 if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) {
858 while (!iobuf.raw_input_ends_before && iobuf.in.len > 512)
860 if (flist_receiving_enabled && iobuf.in.len > 512)
861 wait_for_receiver(); /* generator only */
864 if (ff_forward_fd >= 0 && FD_ISSET(ff_forward_fd, &r_fds)) {
865 /* This can potentially flush all output and enable
866 * multiplexed output, so keep this last in the loop
867 * and be sure to not cache anything that would break
869 forward_filesfrom_data();
874 data = iobuf.in.buf + iobuf.in.pos;
876 if (flags & PIO_CONSUME_INPUT) {
877 iobuf.in.len -= needed;
878 iobuf.in.pos += needed;
879 if (iobuf.in.pos == iobuf.raw_input_ends_before)
880 iobuf.raw_input_ends_before = 0;
881 if (iobuf.in.pos >= iobuf.in.size) {
882 iobuf.in.pos -= iobuf.in.size;
883 if (iobuf.raw_input_ends_before)
884 iobuf.raw_input_ends_before -= iobuf.in.size;
891 static void raw_read_buf(char *buf, size_t len)
893 size_t pos = iobuf.in.pos;
894 char *data = perform_io(len, PIO_INPUT_AND_CONSUME);
895 if (iobuf.in.pos <= pos && len) {
896 size_t siz = len - iobuf.in.pos;
897 memcpy(buf, data, siz);
898 memcpy(buf + siz, iobuf.in.buf, iobuf.in.pos);
900 memcpy(buf, data, len);
903 static int32 raw_read_int(void)
906 if (iobuf.in.size - iobuf.in.pos >= 4)
907 data = perform_io(4, PIO_INPUT_AND_CONSUME);
909 raw_read_buf(data = buf, 4);
910 return IVAL(data, 0);
913 void noop_io_until_death(void)
917 if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
920 kluge_around_eof = 2;
921 /* Setting an I/O timeout ensures that if something inexplicably weird
922 * happens, we won't hang around forever. */
927 read_buf(iobuf.in_fd, buf, sizeof buf);
930 /* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */
931 int send_msg(enum msgcode code, const char *buf, size_t len, int convert)
935 BOOL want_debug = DEBUG_GTE(IO, 1) && convert >= 0 && (msgs2stderr || code != MSG_INFO);
937 if (!OUT_MULTIPLEXED)
941 rprintf(FINFO, "[%s] send_msg(%d, %ld)\n", who_am_i(), (int)code, (long)len);
943 /* When checking for enough free space for this message, we need to
944 * make sure that there is space for the 4-byte header, plus we'll
945 * assume that we may waste up to 3 bytes (if the header doesn't fit
946 * at the physical end of the buffer). */
948 if (convert > 0 && ic_send == (iconv_t)-1)
951 /* Ensuring double-size room leaves space for maximal conversion expansion. */
952 needed = len*2 + 4 + 3;
955 needed = len + 4 + 3;
956 if (iobuf.msg.len + needed > iobuf.msg.size)
957 perform_io(needed, PIO_NEED_MSGROOM);
959 pos = iobuf.msg.pos + iobuf.msg.len; /* Must be set after any flushing. */
960 if (pos >= iobuf.msg.size)
961 pos -= iobuf.msg.size;
962 else if (pos + 4 > iobuf.msg.size) {
963 /* The 4-byte header won't fit at the end of the buffer,
964 * so we'll temporarily reduce the message buffer's size
965 * and put the header at the start of the buffer. */
966 reduce_iobuf_size(&iobuf.msg, pos);
969 hdr = iobuf.msg.buf + pos;
971 iobuf.msg.len += 4; /* Allocate room for the coming header bytes. */
977 INIT_XBUF(inbuf, (char*)buf, len, (size_t)-1);
980 iconvbufs(ic_send, &inbuf, &iobuf.msg,
981 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_CIRCULAR_OUT | ICB_INIT);
983 rprintf(FERROR, "overflowed iobuf.msg buffer in send_msg");
984 exit_cleanup(RERR_UNSUPPORTED);
986 len = iobuf.msg.len - len;
992 if ((pos += 4) == iobuf.msg.size)
995 /* Handle a split copy if we wrap around the end of the circular buffer. */
996 if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) {
997 memcpy(iobuf.msg.buf + pos, buf, siz);
998 memcpy(iobuf.msg.buf, buf + siz, len - siz);
1000 memcpy(iobuf.msg.buf + pos, buf, len);
1002 iobuf.msg.len += len;
1005 SIVAL(hdr, 0, ((MPLEX_BASE + (int)code)<<24) + len);
1007 if (want_debug && convert > 0)
1008 rprintf(FINFO, "[%s] converted msg len=%ld\n", who_am_i(), (long)len);
1013 void send_msg_int(enum msgcode code, int num)
1017 if (DEBUG_GTE(IO, 1))
1018 rprintf(FINFO, "[%s] send_msg_int(%d, %d)\n", who_am_i(), (int)code, num);
1020 SIVAL(numbuf, 0, num);
1021 send_msg(code, numbuf, 4, -1);
1024 static void got_flist_entry_status(enum festatus status, int ndx)
1026 struct file_list *flist = flist_for_ndx(ndx, "got_flist_entry_status");
1028 if (remove_source_files) {
1030 active_bytecnt -= F_LENGTH(flist->files[ndx - flist->ndx_start]);
1034 flist->in_progress--;
1038 if (remove_source_files)
1039 send_msg_int(MSG_SUCCESS, ndx);
1042 #ifdef SUPPORT_HARD_LINKS
1043 if (preserve_hard_links) {
1044 struct file_struct *file = flist->files[ndx - flist->ndx_start];
1045 if (F_IS_HLINKED(file)) {
1046 if (status == FES_NO_SEND)
1047 flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */
1048 flist_ndx_push(&hlink_list, ndx);
1050 flist->in_progress++;
1058 flist->in_progress++;
1063 flist_ndx_push(&redo_list, ndx);
1068 /* Note the fds used for the main socket (which might really be a pipe
1069 * for a local transfer, but we can ignore that). */
1070 void io_set_sock_fds(int f_in, int f_out)
1076 void set_io_timeout(int secs)
1079 allowed_lull = (io_timeout + 1) / 2;
1081 if (!io_timeout || allowed_lull > SELECT_TIMEOUT)
1082 select_timeout = SELECT_TIMEOUT;
1084 select_timeout = allowed_lull;
1090 static void check_for_d_option_error(const char *msg)
1092 static char rsync263_opts[] = "BCDHIKLPRSTWabceghlnopqrtuvxz";
1097 || strncmp(msg, REMOTE_OPTION_ERROR, sizeof REMOTE_OPTION_ERROR - 1) != 0)
1100 msg += sizeof REMOTE_OPTION_ERROR - 1;
1101 if (*msg == '-' || (colon = strchr(msg, ':')) == NULL
1102 || strncmp(colon, REMOTE_OPTION_ERROR2, sizeof REMOTE_OPTION_ERROR2 - 1) != 0)
1105 for ( ; *msg != ':'; msg++) {
1108 else if (*msg == 'e')
1110 else if (strchr(rsync263_opts, *msg) == NULL)
1116 "*** Try using \"--old-d\" if remote rsync is <= 2.6.3 ***\n");
1120 /* This is used by the generator to limit how many file transfers can
1121 * be active at once when --remove-source-files is specified. Without
1122 * this, sender-side deletions were mostly happening at the end. */
1123 void increment_active_files(int ndx, int itemizing, enum logcode code)
1126 /* TODO: tune these limits? */
1127 int limit = active_bytecnt >= 128*1024 ? 10 : 50;
1128 if (active_filecnt < limit)
1130 check_for_finished_files(itemizing, code, 0);
1131 if (active_filecnt < limit)
1133 wait_for_receiver();
1137 active_bytecnt += F_LENGTH(cur_flist->files[ndx - cur_flist->ndx_start]);
1140 int get_redo_num(void)
1142 return flist_ndx_pop(&redo_list);
1145 int get_hlink_num(void)
1147 return flist_ndx_pop(&hlink_list);
1150 /* When we're the receiver and we have a local --files-from list of names
1151 * that needs to be sent over the socket to the sender, we have to do two
1152 * things at the same time: send the sender a list of what files we're
1153 * processing and read the incoming file+info list from the sender. We do
1154 * this by making recv_file_list() call forward_filesfrom_data(), which
1155 * will ensure that we forward data to the sender until we get some data
1156 * for recv_file_list() to use. */
1157 void start_filesfrom_forwarding(int fd)
1159 if (protocol_version < 31 && OUT_MULTIPLEXED) {
1160 /* Older protocols send the files-from data w/o packaging
1161 * it in multiplexed I/O packets, so temporarily switch
1162 * to buffered I/O to match this behavior. */
1163 iobuf.msg.pos = iobuf.msg.len = 0; /* Be extra sure no messages go out. */
1164 ff_reenable_multiplex = io_end_multiplex_out(MPLX_TO_BUFFERED);
1168 alloc_xbuf(&ff_xb, FILESFROM_BUFLEN);
1171 /* Read a line into the "buf" buffer. */
1172 int read_line(int fd, char *buf, size_t bufsiz, int flags)
1177 if (flags & RL_CONVERT && iconv_buf.size < bufsiz)
1178 realloc_xbuf(&iconv_buf, bufsiz + 1024);
1183 s = flags & RL_CONVERT ? iconv_buf.buf : buf;
1187 eob = s + bufsiz - 1;
1189 /* We avoid read_byte() for files because files can return an EOF. */
1190 if (fd == iobuf.in_fd)
1192 else if (safe_read(fd, &ch, 1) == 0)
1194 if (flags & RL_EOL_NULLS ? ch == '\0' : (ch == '\r' || ch == '\n')) {
1195 /* Skip empty lines if dumping comments. */
1196 if (flags & RL_DUMP_COMMENTS && s == buf)
1205 if (flags & RL_DUMP_COMMENTS && (*buf == '#' || *buf == ';'))
1209 if (flags & RL_CONVERT) {
1211 INIT_XBUF(outbuf, buf, 0, bufsiz);
1213 iconv_buf.len = s - iconv_buf.buf;
1214 iconvbufs(ic_recv, &iconv_buf, &outbuf,
1215 ICB_INCLUDE_BAD | ICB_INCLUDE_INCOMPLETE | ICB_INIT);
1216 outbuf.buf[outbuf.len] = '\0';
1224 void read_args(int f_in, char *mod_name, char *buf, size_t bufsiz, int rl_nulls,
1225 char ***argv_p, int *argc_p, char **request_p)
1227 int maxargs = MAX_ARGS;
1228 int dot_pos = 0, argc = 0, request_len = 0;
1230 int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0);
1233 rl_flags |= (protect_args && ic_recv != (iconv_t)-1 ? RL_CONVERT : 0);
1236 if (!(argv = new_array(char *, maxargs)))
1237 out_of_memory("read_args");
1238 if (mod_name && !protect_args)
1239 argv[argc++] = "rsyncd";
1245 if (read_line(f_in, buf, bufsiz, rl_flags) == 0)
1248 if (argc == maxargs-1) {
1249 maxargs += MAX_ARGS;
1250 if (!(argv = realloc_array(argv, char *, maxargs)))
1251 out_of_memory("read_args");
1255 if (request_p && request_len < 1024) {
1256 int len = strlen(buf);
1258 request_p[0][request_len++] = ' ';
1259 if (!(*request_p = realloc_array(*request_p, char, request_len + len + 1)))
1260 out_of_memory("read_args");
1261 memcpy(*request_p + request_len, buf, len + 1);
1265 glob_expand_module(mod_name, buf, &argv, &argc, &maxargs);
1267 glob_expand(buf, &argv, &argc, &maxargs);
1269 if (!(p = strdup(buf)))
1270 out_of_memory("read_args");
1272 if (*p == '.' && p[1] == '\0')
1278 glob_expand(NULL, NULL, NULL, NULL);
1284 BOOL io_start_buffering_out(int f_out)
1286 if (msgs2stderr && DEBUG_GTE(IO, 2))
1287 rprintf(FINFO, "[%s] io_start_buffering_out(%d)\n", who_am_i(), f_out);
1289 if (iobuf.out.buf) {
1290 if (iobuf.out_fd == -1)
1291 iobuf.out_fd = f_out;
1293 assert(f_out == iobuf.out_fd);
1297 alloc_xbuf(&iobuf.out, ROUND_UP_1024(IO_BUFFER_SIZE * 2));
1298 iobuf.out_fd = f_out;
1303 BOOL io_start_buffering_in(int f_in)
1305 if (msgs2stderr && DEBUG_GTE(IO, 2))
1306 rprintf(FINFO, "[%s] io_start_buffering_in(%d)\n", who_am_i(), f_in);
1309 if (iobuf.in_fd == -1)
1312 assert(f_in == iobuf.in_fd);
1316 alloc_xbuf(&iobuf.in, ROUND_UP_1024(IO_BUFFER_SIZE));
1322 void io_end_buffering_in(BOOL free_buffers)
1324 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
1325 rprintf(FINFO, "[%s] io_end_buffering_in(IOBUF_%s_BUFS)\n",
1326 who_am_i(), free_buffers ? "FREE" : "KEEP");
1330 free_xbuf(&iobuf.in);
1332 iobuf.in.pos = iobuf.in.len = 0;
1337 void io_end_buffering_out(BOOL free_buffers)
1339 if (msgs2stderr && DEBUG_GTE(IO, 2)) {
1340 rprintf(FINFO, "[%s] io_end_buffering_out(IOBUF_%s_BUFS)\n",
1341 who_am_i(), free_buffers ? "FREE" : "KEEP");
1344 io_flush(FULL_FLUSH);
1347 free_xbuf(&iobuf.out);
1348 free_xbuf(&iobuf.msg);
1354 void maybe_flush_socket(int important)
1356 if (flist_eof && iobuf.out.buf && iobuf.out.len > iobuf.out_empty_len
1357 && (important || time(NULL) - last_io_out >= 5))
1358 io_flush(NORMAL_FLUSH);
1361 /* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a
1362 * raw-data-based keep-alive (protocol 29), both of which implied forwarding of
1363 * the message through the sender. Since the new timeout method does not need
1364 * any forwarding, we just send an empty MSG_DATA message, which works with all
1365 * rsync versions. This avoids any message forwarding, and leaves the raw-data
1366 * stream alone (since we can never be quite sure if that stream is in the
1367 * right state for a keep-alive message). */
1368 void maybe_send_keepalive(time_t now, int flags)
1370 if (flags & MSK_ACTIVE_RECEIVER)
1371 last_io_in = now; /* Fudge things when we're working hard on the files. */
1373 if (now - last_io_out >= allowed_lull) {
1374 /* The receiver is special: it only sends keep-alive messages if it is
1375 * actively receiving data. Otherwise, it lets the generator timeout. */
1376 if (am_receiver && now - last_io_in >= io_timeout)
1379 if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len)
1380 send_msg(MSG_DATA, "", 0, 0);
1381 if (!(flags & MSK_ALLOW_FLUSH)) {
1382 /* Let the caller worry about writing out the data. */
1383 } else if (iobuf.msg.len)
1384 perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM);
1385 else if (iobuf.out.len > iobuf.out_empty_len)
1386 io_flush(NORMAL_FLUSH);
1390 void start_flist_forward(int ndx)
1392 write_int(iobuf.out_fd, ndx);
1393 forward_flist_data = 1;
1396 void stop_flist_forward(void)
1398 forward_flist_data = 0;
1401 /* Read a message from a multiplexed source. */
1402 static void read_a_msg(void)
1404 char data[BIGPATHBUFLEN];
1408 /* This ensures that perform_io() does not try to do any message reading
1409 * until we've read all of the data for this message. We should also
1410 * try to avoid calling things that will cause data to be written via
1411 * perform_io() prior to this being reset to 1. */
1412 iobuf.in_multiplexed = -1;
1414 tag = raw_read_int();
1416 msg_bytes = tag & 0xFFFFFF;
1417 tag = (tag >> 24) - MPLEX_BASE;
1419 if (DEBUG_GTE(IO, 1) && msgs2stderr)
1420 rprintf(FINFO, "[%s] got msg=%d, len=%ld\n", who_am_i(), (int)tag, (long)msg_bytes);
1424 assert(iobuf.raw_input_ends_before == 0);
1425 /* Though this does not yet read the data, we do mark where in
1426 * the buffer the msg data will end once it is read. It is
1427 * possible that this points off the end of the buffer, in
1428 * which case the gradual reading of the input stream will
1429 * cause this value to wrap around and eventually become real. */
1431 iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
1432 iobuf.in_multiplexed = 1;
1435 if (msg_bytes != sizeof stats.total_read || !am_generator)
1437 raw_read_buf((char*)&stats.total_read, sizeof stats.total_read);
1438 iobuf.in_multiplexed = 1;
1441 if (msg_bytes != 4 || !am_generator)
1443 val = raw_read_int();
1444 iobuf.in_multiplexed = 1;
1445 got_flist_entry_status(FES_REDO, val);
1450 val = raw_read_int();
1451 iobuf.in_multiplexed = 1;
1454 send_msg_int(MSG_IO_ERROR, val);
1456 case MSG_IO_TIMEOUT:
1457 if (msg_bytes != 4 || am_server || am_generator)
1459 val = raw_read_int();
1460 iobuf.in_multiplexed = 1;
1461 if (!io_timeout || io_timeout > val) {
1462 if (INFO_GTE(MISC, 2))
1463 rprintf(FINFO, "Setting --timeout=%d to match server\n", val);
1464 set_io_timeout(val);
1468 /* Support protocol-30 keep-alive method. */
1471 iobuf.in_multiplexed = 1;
1473 maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
1476 if (msg_bytes >= sizeof data)
1479 raw_read_buf(data, msg_bytes);
1480 iobuf.in_multiplexed = 1;
1481 send_msg(MSG_DELETED, data, msg_bytes, 1);
1485 if (ic_recv != (iconv_t)-1) {
1489 int flags = ICB_INCLUDE_BAD | ICB_INIT;
1491 INIT_CONST_XBUF(outbuf, data);
1492 INIT_XBUF(inbuf, ibuf, 0, (size_t)-1);
1495 size_t len = msg_bytes > sizeof ibuf - inbuf.len ? sizeof ibuf - inbuf.len : msg_bytes;
1496 raw_read_buf(ibuf + inbuf.len, len);
1499 if (!(msg_bytes -= len) && !ibuf[inbuf.len-1])
1500 inbuf.len--, add_null = 1;
1501 if (iconvbufs(ic_send, &inbuf, &outbuf, flags) < 0) {
1504 /* Buffer ended with an incomplete char, so move the
1505 * bytes to the start of the buffer and continue. */
1506 memmove(ibuf, ibuf + inbuf.pos, inbuf.len);
1511 if (outbuf.len == outbuf.size)
1513 outbuf.buf[outbuf.len++] = '\0';
1515 msg_bytes = outbuf.len;
1518 raw_read_buf(data, msg_bytes);
1519 iobuf.in_multiplexed = 1;
1520 /* A directory name was sent with the trailing null */
1521 if (msg_bytes > 0 && !data[msg_bytes-1])
1522 log_delete(data, S_IFDIR);
1524 data[msg_bytes] = '\0';
1525 log_delete(data, S_IFREG);
1529 if (msg_bytes != 4) {
1531 rprintf(FERROR, "invalid multi-message %d:%lu [%s%s]\n",
1532 tag, (unsigned long)msg_bytes, who_am_i(),
1533 inc_recurse ? "/inc" : "");
1534 exit_cleanup(RERR_STREAMIO);
1536 val = raw_read_int();
1537 iobuf.in_multiplexed = 1;
1539 got_flist_entry_status(FES_SUCCESS, val);
1541 successful_send(val);
1546 val = raw_read_int();
1547 iobuf.in_multiplexed = 1;
1549 got_flist_entry_status(FES_NO_SEND, val);
1551 send_msg_int(MSG_NO_SEND, val);
1553 case MSG_ERROR_SOCKET:
1554 case MSG_ERROR_UTF8:
1559 if (tag == MSG_ERROR_SOCKET)
1564 case MSG_ERROR_XFER:
1566 if (msg_bytes >= sizeof data) {
1569 "multiplexing overflow %d:%lu [%s%s]\n",
1570 tag, (unsigned long)msg_bytes, who_am_i(),
1571 inc_recurse ? "/inc" : "");
1572 exit_cleanup(RERR_STREAMIO);
1574 raw_read_buf(data, msg_bytes);
1575 /* We don't set in_multiplexed value back to 1 before writing this message
1576 * because the write might loop back and read yet another message, over and
1577 * over again, while waiting for room to put the message in the msg buffer. */
1578 rwrite((enum logcode)tag, data, msg_bytes, !am_generator);
1579 iobuf.in_multiplexed = 1;
1580 if (first_message) {
1581 if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) {
1582 data[msg_bytes] = '\0';
1583 check_for_d_option_error(data);
1588 case MSG_ERROR_EXIT:
1590 val = raw_read_int();
1591 else if (msg_bytes == 0)
1595 iobuf.in_multiplexed = 1;
1596 if (DEBUG_GTE(EXIT, 3))
1597 rprintf(FINFO, "[%s] got MSG_ERROR_EXIT with %ld bytes\n", who_am_i(), (long)msg_bytes);
1598 if (msg_bytes == 0) {
1599 if (!am_sender && !am_generator) {
1600 if (DEBUG_GTE(EXIT, 3)) {
1601 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1604 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1605 io_flush(FULL_FLUSH);
1607 } else if (protocol_version >= 31) {
1609 if (DEBUG_GTE(EXIT, 3)) {
1610 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
1613 send_msg_int(MSG_ERROR_EXIT, val);
1615 if (DEBUG_GTE(EXIT, 3)) {
1616 rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
1619 send_msg(MSG_ERROR_EXIT, "", 0, 0);
1622 /* Send a negative linenum so that we don't end up
1623 * with a duplicate exit message. */
1624 _exit_cleanup(val, __FILE__, 0 - __LINE__);
1626 rprintf(FERROR, "unexpected tag %d [%s%s]\n",
1627 tag, who_am_i(), inc_recurse ? "/inc" : "");
1628 exit_cleanup(RERR_STREAMIO);
1631 assert(iobuf.in_multiplexed > 0);
1634 static void drain_multiplex_messages(void)
1636 while (IN_MULTIPLEXED_AND_READY && iobuf.in.len) {
1637 if (iobuf.raw_input_ends_before) {
1638 size_t raw_len = iobuf.raw_input_ends_before - iobuf.in.pos;
1639 iobuf.raw_input_ends_before = 0;
1640 if (raw_len >= iobuf.in.len) {
1644 iobuf.in.len -= raw_len;
1645 if ((iobuf.in.pos += raw_len) >= iobuf.in.size)
1646 iobuf.in.pos -= iobuf.in.size;
1652 void wait_for_receiver(void)
1654 if (!iobuf.raw_input_ends_before)
1657 if (iobuf.raw_input_ends_before) {
1658 int ndx = read_int(iobuf.in_fd);
1663 if (DEBUG_GTE(FLIST, 3))
1664 rprintf(FINFO, "[%s] flist_eof=1\n", who_am_i());
1670 exit_cleanup(RERR_STREAMIO);
1673 struct file_list *flist;
1674 flist_receiving_enabled = False;
1675 if (DEBUG_GTE(FLIST, 2)) {
1676 rprintf(FINFO, "[%s] receiving flist for dir %d\n",
1679 flist = recv_file_list(iobuf.in_fd);
1680 flist->parent_ndx = ndx;
1681 #ifdef SUPPORT_HARD_LINKS
1682 if (preserve_hard_links)
1683 match_hard_links(flist);
1685 flist_receiving_enabled = True;
1690 unsigned short read_shortint(int f)
1694 return (UVAL(b, 1) << 8) + UVAL(b, 0);
1697 int32 read_int(int f)
1704 #if SIZEOF_INT32 > 4
1705 if (num & (int32)0x80000000)
1706 num |= ~(int32)0xffffffff;
1711 int32 read_varint(int f)
1722 extra = int_byte_extra[ch / 4];
1724 uchar bit = ((uchar)1<<(8-extra));
1725 if (extra >= (int)sizeof u.b) {
1726 rprintf(FERROR, "Overflow in read_varint()\n");
1727 exit_cleanup(RERR_STREAMIO);
1729 read_buf(f, u.b, extra);
1730 u.b[extra] = ch & (bit-1);
1733 #if CAREFUL_ALIGNMENT
1736 #if SIZEOF_INT32 > 4
1737 if (u.x & (int32)0x80000000)
1738 u.x |= ~(int32)0xffffffff;
1743 int64 read_varlong(int f, uchar min_bytes)
1752 #if SIZEOF_INT64 < 8
1757 read_buf(f, b2, min_bytes);
1758 memcpy(u.b, b2+1, min_bytes-1);
1759 extra = int_byte_extra[CVAL(b2, 0) / 4];
1761 uchar bit = ((uchar)1<<(8-extra));
1762 if (min_bytes + extra > (int)sizeof u.b) {
1763 rprintf(FERROR, "Overflow in read_varlong()\n");
1764 exit_cleanup(RERR_STREAMIO);
1766 read_buf(f, u.b + min_bytes - 1, extra);
1767 u.b[min_bytes + extra - 1] = CVAL(b2, 0) & (bit-1);
1768 #if SIZEOF_INT64 < 8
1769 if (min_bytes + extra > 5 || u.b[4] || CVAL(u.b,3) & 0x80) {
1770 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1771 exit_cleanup(RERR_UNSUPPORTED);
1775 u.b[min_bytes + extra - 1] = CVAL(b2, 0);
1776 #if SIZEOF_INT64 < 8
1778 #elif CAREFUL_ALIGNMENT
1779 u.x = IVAL(u.b,0) | (((int64)IVAL(u.b,4))<<32);
1784 int64 read_longint(int f)
1786 #if SIZEOF_INT64 >= 8
1789 int32 num = read_int(f);
1791 if (num != (int32)0xffffffff)
1794 #if SIZEOF_INT64 < 8
1795 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
1796 exit_cleanup(RERR_UNSUPPORTED);
1799 return IVAL(b,0) | (((int64)IVAL(b,4))<<32);
1803 void read_buf(int f, char *buf, size_t len)
1805 if (f != iobuf.in_fd) {
1806 if (safe_read(f, buf, len) != len)
1807 whine_about_eof(False); /* Doesn't return. */
1811 if (!IN_MULTIPLEXED) {
1812 raw_read_buf(buf, len);
1813 total_data_read += len;
1814 if (forward_flist_data)
1815 write_buf(iobuf.out_fd, buf, len);
1817 if (f == write_batch_monitor_in)
1818 safe_write(batch_fd, buf, len);
1825 while (!iobuf.raw_input_ends_before)
1828 siz = MIN(len, iobuf.raw_input_ends_before - iobuf.in.pos);
1829 if (siz >= iobuf.in.size)
1830 siz = iobuf.in.size;
1831 raw_read_buf(buf, siz);
1832 total_data_read += siz;
1834 if (forward_flist_data)
1835 write_buf(iobuf.out_fd, buf, siz);
1837 if (f == write_batch_monitor_in)
1838 safe_write(batch_fd, buf, siz);
1840 if ((len -= siz) == 0)
1846 void read_sbuf(int f, char *buf, size_t len)
1848 read_buf(f, buf, len);
1852 uchar read_byte(int f)
1855 read_buf(f, (char*)&c, 1);
1859 int read_vstring(int f, char *buf, int bufsize)
1861 int len = read_byte(f);
1864 len = (len & ~0x80) * 0x100 + read_byte(f);
1866 if (len >= bufsize) {
1867 rprintf(FERROR, "over-long vstring received (%d > %d)\n",
1873 read_buf(f, buf, len);
1878 /* Populate a sum_struct with values from the socket. This is
1879 * called by both the sender and the receiver. */
1880 void read_sum_head(int f, struct sum_struct *sum)
1882 int32 max_blength = protocol_version < 30 ? OLD_MAX_BLOCK_SIZE : MAX_BLOCK_SIZE;
1883 sum->count = read_int(f);
1884 if (sum->count < 0) {
1885 rprintf(FERROR, "Invalid checksum count %ld [%s]\n",
1886 (long)sum->count, who_am_i());
1887 exit_cleanup(RERR_PROTOCOL);
1889 sum->blength = read_int(f);
1890 if (sum->blength < 0 || sum->blength > max_blength) {
1891 rprintf(FERROR, "Invalid block length %ld [%s]\n",
1892 (long)sum->blength, who_am_i());
1893 exit_cleanup(RERR_PROTOCOL);
1895 sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f);
1896 if (sum->s2length < 0 || sum->s2length > MAX_DIGEST_LEN) {
1897 rprintf(FERROR, "Invalid checksum length %d [%s]\n",
1898 sum->s2length, who_am_i());
1899 exit_cleanup(RERR_PROTOCOL);
1901 sum->remainder = read_int(f);
1902 if (sum->remainder < 0 || sum->remainder > sum->blength) {
1903 rprintf(FERROR, "Invalid remainder length %ld [%s]\n",
1904 (long)sum->remainder, who_am_i());
1905 exit_cleanup(RERR_PROTOCOL);
1909 /* Send the values from a sum_struct over the socket. Set sum to
1910 * NULL if there are no checksums to send. This is called by both
1911 * the generator and the sender. */
1912 void write_sum_head(int f, struct sum_struct *sum)
1914 static struct sum_struct null_sum;
1919 write_int(f, sum->count);
1920 write_int(f, sum->blength);
1921 if (protocol_version >= 27)
1922 write_int(f, sum->s2length);
1923 write_int(f, sum->remainder);
1926 /* Sleep after writing to limit I/O bandwidth usage.
1928 * @todo Rather than sleeping after each write, it might be better to
1929 * use some kind of averaging. The current algorithm seems to always
1930 * use a bit less bandwidth than specified, because it doesn't make up
1931 * for slow periods. But arguably this is a feature. In addition, we
1932 * ought to take the time used to write the data into account.
1934 * During some phases of big transfers (file FOO is uptodate) this is
1935 * called with a small bytes_written every time. As the kernel has to
1936 * round small waits up to guarantee that we actually wait at least the
1937 * requested number of microseconds, this can become grossly inaccurate.
1938 * We therefore keep track of the bytes we've written over time and only
1939 * sleep when the accumulated delay is at least 1 tenth of a second. */
1940 static void sleep_for_bwlimit(int bytes_written)
1942 static struct timeval prior_tv;
1943 static long total_written = 0;
1944 struct timeval tv, start_tv;
1945 long elapsed_usec, sleep_usec;
1947 #define ONE_SEC 1000000L /* # of microseconds in a second */
1949 total_written += bytes_written;
1951 gettimeofday(&start_tv, NULL);
1952 if (prior_tv.tv_sec) {
1953 elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
1954 + (start_tv.tv_usec - prior_tv.tv_usec);
1955 total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024);
1956 if (total_written < 0)
1960 sleep_usec = total_written * (ONE_SEC/1024) / bwlimit;
1961 if (sleep_usec < ONE_SEC / 10) {
1962 prior_tv = start_tv;
1966 tv.tv_sec = sleep_usec / ONE_SEC;
1967 tv.tv_usec = sleep_usec % ONE_SEC;
1968 select(0, NULL, NULL, NULL, &tv);
1970 gettimeofday(&prior_tv, NULL);
1971 elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC
1972 + (prior_tv.tv_usec - start_tv.tv_usec);
1973 total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024);
1976 void io_flush(int flush_it_all)
1978 if (iobuf.out.len > iobuf.out_empty_len) {
1979 if (flush_it_all) /* FULL_FLUSH: flush everything in the output buffers */
1980 perform_io(iobuf.out.size - iobuf.out_empty_len, PIO_NEED_OUTROOM);
1981 else /* NORMAL_FLUSH: flush at least 1 byte */
1982 perform_io(iobuf.out.size - iobuf.out.len + 1, PIO_NEED_OUTROOM);
1985 perform_io(iobuf.msg.size, PIO_NEED_MSGROOM);
1988 void write_shortint(int f, unsigned short x)
1992 b[1] = (char)(x >> 8);
1996 void write_int(int f, int32 x)
2003 void write_varint(int f, int32 x)
2011 while (cnt > 1 && b[cnt] == 0)
2013 bit = ((uchar)1<<(7-cnt+1));
2014 if (CVAL(b, cnt) >= bit) {
2018 *b = b[cnt] | ~(bit*2-1);
2022 write_buf(f, b, cnt);
2025 void write_varlong(int f, int64 x, uchar min_bytes)
2032 #if SIZEOF_INT64 >= 8
2033 SIVAL(b, 5, x >> 32);
2035 if (x <= 0x7FFFFFFF && x >= 0)
2036 memset(b + 5, 0, 4);
2038 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2039 exit_cleanup(RERR_UNSUPPORTED);
2043 while (cnt > min_bytes && b[cnt] == 0)
2045 bit = ((uchar)1<<(7-cnt+min_bytes));
2046 if (CVAL(b, cnt) >= bit) {
2049 } else if (cnt > min_bytes)
2050 *b = b[cnt] | ~(bit*2-1);
2054 write_buf(f, b, cnt);
2058 * Note: int64 may actually be a 32-bit type if ./configure couldn't find any
2059 * 64-bit types on this platform.
2061 void write_longint(int f, int64 x)
2063 char b[12], * const s = b+4;
2066 if (x <= 0x7FFFFFFF && x >= 0) {
2071 #if SIZEOF_INT64 < 8
2072 rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
2073 exit_cleanup(RERR_UNSUPPORTED);
2076 SIVAL(s, 4, x >> 32);
2077 write_buf(f, b, 12);
2081 void write_buf(int f, const char *buf, size_t len)
2085 if (f != iobuf.out_fd) {
2086 safe_write(f, buf, len);
2090 if (iobuf.out.len + len > iobuf.out.size)
2091 perform_io(len, PIO_NEED_OUTROOM);
2093 pos = iobuf.out.pos + iobuf.out.len; /* Must be set after any flushing. */
2094 if (pos >= iobuf.out.size)
2095 pos -= iobuf.out.size;
2097 /* Handle a split copy if we wrap around the end of the circular buffer. */
2098 if (pos >= iobuf.out.pos && (siz = iobuf.out.size - pos) < len) {
2099 memcpy(iobuf.out.buf + pos, buf, siz);
2100 memcpy(iobuf.out.buf, buf + siz, len - siz);
2102 memcpy(iobuf.out.buf + pos, buf, len);
2104 iobuf.out.len += len;
2105 total_data_written += len;
2108 if (f == write_batch_monitor_out)
2109 safe_write(batch_fd, buf, len);
2112 /* Write a string to the connection */
2113 void write_sbuf(int f, const char *buf)
2115 write_buf(f, buf, strlen(buf));
2118 void write_byte(int f, uchar c)
2120 write_buf(f, (char *)&c, 1);
2123 void write_vstring(int f, const char *str, int len)
2125 uchar lenbuf[3], *lb = lenbuf;
2130 "attempting to send over-long vstring (%d > %d)\n",
2132 exit_cleanup(RERR_PROTOCOL);
2134 *lb++ = len / 0x100 + 0x80;
2138 write_buf(f, (char*)lenbuf, lb - lenbuf + 1);
2140 write_buf(f, str, len);
2143 /* Send a file-list index using a byte-reduction method. */
2144 void write_ndx(int f, int32 ndx)
2146 static int32 prev_positive = -1, prev_negative = 1;
2147 int32 diff, cnt = 0;
2150 if (protocol_version < 30 || read_batch) {
2155 /* Send NDX_DONE as a single-byte 0 with no side effects. Send
2156 * negative nums as a positive after sending a leading 0xFF. */
2158 diff = ndx - prev_positive;
2159 prev_positive = ndx;
2160 } else if (ndx == NDX_DONE) {
2165 b[cnt++] = (char)0xFF;
2167 diff = ndx - prev_negative;
2168 prev_negative = ndx;
2171 /* A diff of 1 - 253 is sent as a one-byte diff; a diff of 254 - 32767
2172 * or 0 is sent as a 0xFE + a two-byte diff; otherwise we send 0xFE
2173 * & all 4 bytes of the (non-negative) num with the high-bit set. */
2174 if (diff < 0xFE && diff > 0)
2175 b[cnt++] = (char)diff;
2176 else if (diff < 0 || diff > 0x7FFF) {
2177 b[cnt++] = (char)0xFE;
2178 b[cnt++] = (char)((ndx >> 24) | 0x80);
2179 b[cnt++] = (char)ndx;
2180 b[cnt++] = (char)(ndx >> 8);
2181 b[cnt++] = (char)(ndx >> 16);
2183 b[cnt++] = (char)0xFE;
2184 b[cnt++] = (char)(diff >> 8);
2185 b[cnt++] = (char)diff;
2187 write_buf(f, b, cnt);
2190 /* Receive a file-list index using a byte-reduction method. */
2191 int32 read_ndx(int f)
2193 static int32 prev_positive = -1, prev_negative = 1;
2194 int32 *prev_ptr, num;
2197 if (protocol_version < 30)
2201 if (CVAL(b, 0) == 0xFF) {
2203 prev_ptr = &prev_negative;
2204 } else if (CVAL(b, 0) == 0)
2207 prev_ptr = &prev_positive;
2208 if (CVAL(b, 0) == 0xFE) {
2210 if (CVAL(b, 0) & 0x80) {
2211 b[3] = CVAL(b, 0) & ~0x80;
2213 read_buf(f, b+1, 2);
2216 num = (UVAL(b,0)<<8) + UVAL(b,1) + *prev_ptr;
2218 num = UVAL(b, 0) + *prev_ptr;
2220 if (prev_ptr == &prev_negative)
2225 /* Read a line of up to bufsiz-1 characters into buf. Strips
2226 * the (required) trailing newline and all carriage returns.
2227 * Returns 1 for success; 0 for I/O error or truncation. */
2228 int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok)
2230 assert(fd != iobuf.in_fd);
2231 bufsiz--; /* leave room for the null */
2232 while (bufsiz > 0) {
2233 if (safe_read(fd, buf, 1) == 0) {
2251 void io_printf(int fd, const char *format, ...)
2254 char buf[BIGPATHBUFLEN];
2257 va_start(ap, format);
2258 len = vsnprintf(buf, sizeof buf, format, ap);
2262 exit_cleanup(RERR_PROTOCOL);
2264 if (len > (int)sizeof buf) {
2265 rprintf(FERROR, "io_printf() was too long for the buffer.\n");
2266 exit_cleanup(RERR_PROTOCOL);
2269 write_sbuf(fd, buf);
2272 /* Setup for multiplexing a MSG_* stream with the data stream. */
2273 void io_start_multiplex_out(int fd)
2275 io_flush(FULL_FLUSH);
2277 if (msgs2stderr && DEBUG_GTE(IO, 2))
2278 rprintf(FINFO, "[%s] io_start_multiplex_out(%d)\n", who_am_i(), fd);
2281 alloc_xbuf(&iobuf.msg, ROUND_UP_1024(IO_BUFFER_SIZE));
2283 iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */
2284 io_start_buffering_out(fd);
2286 iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len;
2290 /* Setup for multiplexing a MSG_* stream with the data stream. */
2291 void io_start_multiplex_in(int fd)
2293 if (msgs2stderr && DEBUG_GTE(IO, 2))
2294 rprintf(FINFO, "[%s] io_start_multiplex_in(%d)\n", who_am_i(), fd);
2296 iobuf.in_multiplexed = 1; /* See also IN_MULTIPLEXED */
2297 io_start_buffering_in(fd);
2300 int io_end_multiplex_in(int mode)
2302 int ret = iobuf.in_multiplexed ? iobuf.in_fd : -1;
2304 if (msgs2stderr && DEBUG_GTE(IO, 2))
2305 rprintf(FINFO, "[%s] io_end_multiplex_in(mode=%d)\n", who_am_i(), mode);
2307 iobuf.in_multiplexed = 0;
2308 if (mode == MPLX_SWITCHING)
2309 iobuf.raw_input_ends_before = 0;
2311 assert(iobuf.raw_input_ends_before == 0);
2312 if (mode != MPLX_TO_BUFFERED)
2313 io_end_buffering_in(mode);
2318 int io_end_multiplex_out(int mode)
2320 int ret = iobuf.out_empty_len ? iobuf.out_fd : -1;
2322 if (msgs2stderr && DEBUG_GTE(IO, 2))
2323 rprintf(FINFO, "[%s] io_end_multiplex_out(mode=%d)\n", who_am_i(), mode);
2325 if (mode != MPLX_TO_BUFFERED)
2326 io_end_buffering_out(mode);
2328 io_flush(FULL_FLUSH);
2331 iobuf.out_empty_len = 0;
2336 void start_write_batch(int fd)
2338 /* Some communication has already taken place, but we don't
2339 * enable batch writing until here so that we can write a
2340 * canonical record of the communication even though the
2341 * actual communication so far depends on whether a daemon
2343 write_int(batch_fd, protocol_version);
2344 if (protocol_version >= 30)
2345 write_byte(batch_fd, compat_flags);
2346 write_int(batch_fd, checksum_seed);
2349 write_batch_monitor_out = fd;
2351 write_batch_monitor_in = fd;
2354 void stop_write_batch(void)
2356 write_batch_monitor_out = -1;
2357 write_batch_monitor_in = -1;