* Copyright (C) 1996-2001 Andrew Tridgell
* Copyright (C) 1996 Paul Mackerras
* Copyright (C) 2001, 2002 Martin Pool <mbp@samba.org>
- * Copyright (C) 2003-2009 Wayne Davison
+ * Copyright (C) 2003-2014 Wayne Davison
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
extern size_t bwlimit_writemax;
extern int io_timeout;
extern int am_server;
-extern int am_daemon;
extern int am_sender;
+extern int am_receiver;
extern int am_generator;
extern int msgs2stderr;
extern int inc_recurse;
extern int file_old_total;
extern int list_only;
extern int read_batch;
+extern int compat_flags;
extern int protect_args;
extern int checksum_seed;
extern int protocol_version;
extern int remove_source_files;
extern int preserve_hard_links;
extern BOOL extra_flist_sending_enabled;
+extern BOOL flush_ok_after_signal;
extern struct stats stats;
extern struct file_list *cur_flist;
#ifdef ICONV_OPTION
int csum_length = SHORT_SUM_LENGTH; /* initial value */
int allowed_lull = 0;
-int ignore_timeout = 0;
int batch_fd = -1;
int msgdone_cnt = 0;
int forward_flist_data = 0;
/* Ignore an EOF error if non-zero. See whine_about_eof(). */
int kluge_around_eof = 0;
+int got_kill_signal = -1; /* is set to 0 only after multiplexed I/O starts */
int sock_f_in = -1;
int sock_f_out = -1;
static void drain_multiplex_messages(void);
static void sleep_for_bwlimit(int bytes_written);
-static void check_timeout(void)
-{
- time_t t;
-
- if (!io_timeout || ignore_timeout)
+static void check_timeout(BOOL allow_keepalive, int keepalive_flags)
+{
+ time_t t, chk;
+
+ /* On the receiving side, the generator is now the one that decides
+ * when a timeout has occurred. When it is sifting through a lot of
+ * files looking for work, it will be sending keep-alive messages to
+ * the sender, and even though the receiver won't be sending/receiving
+ * anything (not even keep-alive messages), the successful writes to
+ * the sender will keep things going. If the receiver is actively
+ * receiving data, it will ensure that the generator knows that it is
+ * not idle by sending the generator keep-alive messages (since the
+ * generator might be blocked trying to send checksums, it needs to
+ * know that the receiver is active). Thus, as long as one or the
+ * other is successfully doing work, the generator will not timeout. */
+ if (!io_timeout)
return;
- if (!last_io_in) {
- last_io_in = time(NULL);
- return;
+ t = time(NULL);
+
+ if (allow_keepalive) {
+ /* This may put data into iobuf.msg w/o flushing. */
+ maybe_send_keepalive(t, keepalive_flags);
}
- t = time(NULL);
+ if (!last_io_in)
+ last_io_in = t;
- if (t - last_io_in >= io_timeout) {
- if (!am_server && !am_daemon) {
- rprintf(FERROR, "io timeout after %d seconds -- exiting\n",
- (int)(t-last_io_in));
- }
+ if (am_receiver)
+ return;
+
+ chk = MAX(last_io_out, last_io_in);
+ if (t - chk >= io_timeout) {
+ if (am_server)
+ msgs2stderr = 1;
+ rprintf(FERROR, "[%s] io timeout after %d seconds -- exiting\n",
+ who_am_i(), (int)(t-chk));
exit_cleanup(RERR_TIMEOUT);
}
}
* the socket except very early in the transfer. */
static size_t safe_read(int fd, char *buf, size_t len)
{
- size_t got;
- int n;
+ size_t got = 0;
assert(fd != iobuf.in_fd);
- n = read(fd, buf, len);
- if ((size_t)n == len || n == 0) {
- if (DEBUG_GTE(IO, 2))
- rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
- return n;
- }
- if (n < 0) {
- if (errno != EINTR && errno != EWOULDBLOCK && errno != EAGAIN) {
- read_failed:
- rsyserr(FERROR, errno, "safe_read failed to read %ld bytes [%s]",
- (long)len, who_am_i());
- exit_cleanup(RERR_STREAMIO);
- }
- got = 0;
- } else
- got = n;
-
while (1) {
struct timeval tv;
fd_set r_fds, e_fds;
who_am_i());
exit_cleanup(RERR_FILEIO);
}
- check_timeout();
+ check_timeout(1, MSK_ALLOW_FLUSH);
continue;
}
rprintf(FINFO, "select exception on fd %d\n", fd); */
if (FD_ISSET(fd, &r_fds)) {
- n = read(fd, buf + got, len - got);
+ int n = read(fd, buf + got, len - got);
if (DEBUG_GTE(IO, 2))
rprintf(FINFO, "[%s] safe_read(%d)=%ld\n", who_am_i(), fd, (long)n);
if (n == 0)
if (n < 0) {
if (errno == EINTR)
continue;
- goto read_failed;
+ rsyserr(FERROR, errno, "safe_read failed to read %ld bytes [%s]",
+ (long)len, who_am_i());
+ exit_cleanup(RERR_STREAMIO);
}
if ((got += (size_t)n) == len)
break;
what_fd_is(fd), who_am_i());
exit_cleanup(RERR_FILEIO);
}
- check_timeout();
+ if (io_timeout)
+ maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
continue;
}
void reduce_iobuf_size(xbuf *out, size_t new_size)
{
if (new_size < out->size) {
- if (DEBUG_GTE(IO, 4)) {
+ /* Avoid weird buffer interactions by only outputting this to stderr. */
+ if (msgs2stderr && DEBUG_GTE(IO, 4)) {
const char *name = out == &iobuf.out ? "iobuf.out"
: out == &iobuf.msg ? "iobuf.msg"
: NULL;
{
if (IOBUF_WAS_REDUCED(out->size)) {
size_t new_size = IOBUF_RESTORE_SIZE(out->size);
- if (DEBUG_GTE(IO, 4)) {
+ /* Avoid weird buffer interactions by only outputting this to stderr. */
+ if (msgs2stderr && DEBUG_GTE(IO, 4)) {
const char *name = out == &iobuf.out ? "iobuf.out"
: out == &iobuf.msg ? "iobuf.msg"
: NULL;
}
}
-/* Perform buffered input and output until specified conditions are met. When
- * given a "needed" read requirement, we'll return without doing any I/O if the
- * iobuf.in bytes are already available. When reading, we'll read as many
- * bytes as we can into the buffer, and return as soon as we meet the minimum
- * read requirement. When given a "needed" write requirement, we'll return
- * without doing any I/O if that many bytes will fit in the output buffer (we
- * check either iobuf.out or iobuf.msg, depending on the flags). When writing,
- * we write out as much as we can, and return as soon as the given free-space
- * requirement is available.
+static void handle_kill_signal(BOOL flush_ok)
+{
+ got_kill_signal = -1;
+ flush_ok_after_signal = flush_ok;
+ exit_cleanup(RERR_SIGNAL);
+}
+
+/* Perform buffered input and/or output until specified conditions are met.
+ * When given a "needed" read or write request, this returns without doing any
+ * I/O if the needed input bytes or write space is already available. Once I/O
+ * is needed, this will try to do whatever reading and/or writing is currently
+ * possible, up to the maximum buffer allowances, no matter if this is a read
+ * or write request. However, the I/O stops as soon as the required input
+ * bytes or output space is available. If this is not a read request, the
+ * routine may also do some advantageous reading of messages from a multiplexed
+ * input source (which ensures that we don't jam up with everyone in their
+ * "need to write" code and nobody reading the accumulated data that would make
+ * writing possible).
*
- * The iobuf.out and iobuf.msg buffers are circular, so some writes into them
- * will need to be split when the data needs to wrap around to the start. In
- * order to help make this easier for some operations (such as the use of
- * SIVAL() into the buffer) a buffer may be temporarily shortened, but the
- * original size will be automatically restored. The iobuf.in buffer is also
- * circular, so callers may need to split their reading of the data if it spans
- * the end. See also the 3 raw_* iobuf vars that are used in the handling of
+ * The iobuf.in, .out and .msg buffers are all circular. Callers need to be
+ * aware that some data copies will need to be split when the bytes wrap around
+ * from the end to the start. In order to help make writing into the output
+ * buffers easier for some operations (such as the use of SIVAL() into the
+ * buffer) a buffer may be temporarily shortened by a small amount, but the
+ * original size will be automatically restored when the .pos wraps to the
+ * start. See also the 3 raw_* iobuf vars that are used in the handling of
* MSG_DATA bytes as they are read-from/written-into the buffers.
*
* When writing, we flush data in the following priority order:
exit_cleanup(RERR_PROTOCOL);
}
- if (DEBUG_GTE(IO, 3)) {
+ if (msgs2stderr && DEBUG_GTE(IO, 3)) {
rprintf(FINFO, "[%s] perform_io(%ld, %sinput)\n",
who_am_i(), (long)needed, flags & PIO_CONSUME_INPUT ? "consume&" : "");
}
exit_cleanup(RERR_PROTOCOL);
}
- if (DEBUG_GTE(IO, 3)) {
+ if (msgs2stderr && DEBUG_GTE(IO, 3)) {
rprintf(FINFO, "[%s] perform_io(%ld, outroom) needs to flush %ld\n",
who_am_i(), (long)needed,
iobuf.out.len + needed > iobuf.out.size
exit_cleanup(RERR_PROTOCOL);
}
- if (DEBUG_GTE(IO, 3)) {
+ if (msgs2stderr && DEBUG_GTE(IO, 3)) {
rprintf(FINFO, "[%s] perform_io(%ld, msgroom) needs to flush %ld\n",
who_am_i(), (long)needed,
iobuf.msg.len + needed > iobuf.msg.size
break;
case 0:
- if (DEBUG_GTE(IO, 3))
+ if (msgs2stderr && DEBUG_GTE(IO, 3))
rprintf(FINFO, "[%s] perform_io(%ld, %d)\n", who_am_i(), (long)needed, flags);
break;
SIVAL(iobuf.out.buf + iobuf.raw_data_header_pos, 0,
((MPLEX_BASE + (int)MSG_DATA)<<24) + iobuf.out.len - 4);
- if (DEBUG_GTE(IO, 1)) {
+ if (msgs2stderr && DEBUG_GTE(IO, 1)) {
rprintf(FINFO, "[%s] send_msg(%d, %ld)\n",
who_am_i(), (int)MSG_DATA, (long)iobuf.out.len - 4);
}
break;
}
+ if (got_kill_signal > 0)
+ handle_kill_signal(True);
+
if (extra_flist_sending_enabled) {
- if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD)
+ if (file_total - file_old_total < MAX_FILECNT_LOOKAHEAD && IN_MULTIPLEXED_AND_READY)
tv.tv_sec = 0;
else {
extra_flist_sending_enabled = False;
send_extra_file_list(sock_f_out, -1);
extra_flist_sending_enabled = !flist_eof;
} else
- check_timeout();
+ check_timeout((flags & PIO_NEED_INPUT) != 0, 0);
FD_ZERO(&r_fds); /* Just in case... */
FD_ZERO(&w_fds);
}
if (msgs2stderr && DEBUG_GTE(IO, 2))
rprintf(FINFO, "[%s] recv=%ld\n", who_am_i(), (long)n);
- if (io_timeout)
+ if (io_timeout) {
last_io_in = time(NULL);
+ if (flags & PIO_NEED_INPUT)
+ maybe_send_keepalive(last_io_in, 0);
+ }
stats.total_read += n;
iobuf.in.len += n;
}
}
+ if (got_kill_signal > 0)
+ handle_kill_signal(True);
+
/* We need to help prevent deadlock by doing what reading
* we can whenever we are here trying to write. */
if (IN_MULTIPLEXED_AND_READY && !(flags & PIO_NEED_INPUT)) {
}
double_break:
+ if (got_kill_signal > 0)
+ handle_kill_signal(True);
+
data = iobuf.in.buf + iobuf.in.pos;
if (flags & PIO_CONSUME_INPUT) {
{
char buf[1024];
+ if (!iobuf.in.buf || !iobuf.out.buf || iobuf.in_fd < 0 || iobuf.out_fd < 0 || kluge_around_eof)
+ return;
+
kluge_around_eof = 2;
/* Setting an I/O timeout ensures that if something inexplicably weird
* happens, we won't hang around forever. */
read_buf(iobuf.in_fd, buf, sizeof buf);
}
-/* Buffer a message for the multiplexed output stream. Is never used for MSG_DATA. */
+/* Buffer a message for the multiplexed output stream. Is not used for (normal) MSG_DATA. */
int send_msg(enum msgcode code, const char *buf, size_t len, int convert)
{
char *hdr;
{
size_t siz;
- if ((pos += 4) >= iobuf.msg.size)
- pos -= iobuf.msg.size;
+ if ((pos += 4) == iobuf.msg.size)
+ pos = 0;
/* Handle a split copy if we wrap around the end of the circular buffer. */
if (pos >= iobuf.msg.pos && (siz = iobuf.msg.size - pos) < len) {
case FES_SUCCESS:
if (remove_source_files)
send_msg_int(MSG_SUCCESS, ndx);
+ /* FALL THROUGH */
+ case FES_NO_SEND:
+#ifdef SUPPORT_HARD_LINKS
if (preserve_hard_links) {
struct file_struct *file = flist->files[ndx - flist->ndx_start];
if (F_IS_HLINKED(file)) {
+ if (status == FES_NO_SEND)
+ flist_ndx_push(&hlink_list, -2); /* indicates a failure follows */
flist_ndx_push(&hlink_list, ndx);
- flist->in_progress++;
+ if (inc_recurse)
+ flist->in_progress++;
}
}
+#endif
break;
case FES_REDO:
if (read_batch) {
flist->to_redo++;
flist_ndx_push(&redo_list, ndx);
break;
- case FES_NO_SEND:
- break;
}
}
void set_io_timeout(int secs)
{
io_timeout = secs;
+ allowed_lull = (io_timeout + 1) / 2;
- if (!io_timeout || io_timeout > SELECT_TIMEOUT)
+ if (!io_timeout || allowed_lull > SELECT_TIMEOUT)
select_timeout = SELECT_TIMEOUT;
else
- select_timeout = io_timeout;
+ select_timeout = allowed_lull;
- allowed_lull = read_batch ? 0 : (io_timeout + 1) / 2;
+ if (read_batch)
+ allowed_lull = 0;
}
static void check_for_d_option_error(const char *msg)
char ***argv_p, int *argc_p, char **request_p)
{
int maxargs = MAX_ARGS;
- int dot_pos = 0;
- int argc = 0;
+ int dot_pos = 0, argc = 0, request_len = 0;
char **argv, *p;
int rl_flags = (rl_nulls ? RL_EOL_NULLS : 0);
if (mod_name && !protect_args)
argv[argc++] = "rsyncd";
+ if (request_p)
+ *request_p = NULL;
+
while (1) {
if (read_line(f_in, buf, bufsiz, rl_flags) == 0)
break;
}
if (dot_pos) {
- if (request_p) {
- *request_p = strdup(buf);
- request_p = NULL;
+ if (request_p && request_len < 1024) {
+ int len = strlen(buf);
+ if (request_len)
+ request_p[0][request_len++] = ' ';
+ if (!(*request_p = realloc_array(*request_p, char, request_len + len + 1)))
+ out_of_memory("read_args");
+ memcpy(*request_p + request_len, buf, len + 1);
+ request_len += len;
}
if (mod_name)
glob_expand_module(mod_name, buf, &argv, &argc, &maxargs);
io_flush(NORMAL_FLUSH);
}
-void maybe_send_keepalive(void)
-{
- if (time(NULL) - last_io_out >= allowed_lull) {
- if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len) {
- if (protocol_version < 29)
- return; /* there's nothing we can do */
- if (protocol_version >= 30)
- send_msg(MSG_NOOP, "", 0, 0);
- else {
- write_int(iobuf.out_fd, cur_flist->used);
- write_shortint(iobuf.out_fd, ITEM_IS_NEW);
- }
- }
- if (iobuf.msg.len)
+/* Older rsync versions used to send either a MSG_NOOP (protocol 30) or a
+ * raw-data-based keep-alive (protocol 29), both of which implied forwarding of
+ * the message through the sender. Since the new timeout method does not need
+ * any forwarding, we just send an empty MSG_DATA message, which works with all
+ * rsync versions. This avoids any message forwarding, and leaves the raw-data
+ * stream alone (since we can never be quite sure if that stream is in the
+ * right state for a keep-alive message). */
+void maybe_send_keepalive(time_t now, int flags)
+{
+ if (flags & MSK_ACTIVE_RECEIVER)
+ last_io_in = now; /* Fudge things when we're working hard on the files. */
+
+ /* Early in the transfer (before the receiver forks) the receiving side doesn't
+ * care if it hasn't sent data in a while as long as it is receiving data (in
+ * fact, a pre-3.1.0 rsync would die if we tried to send it a keep alive during
+ * this time). So, if we're an early-receiving proc, just return and let the
+ * incoming data determine if we timeout. */
+ if (!am_sender && !am_receiver && !am_generator)
+ return;
+
+ if (now - last_io_out >= allowed_lull) {
+ /* The receiver is special: it only sends keep-alive messages if it is
+ * actively receiving data. Otherwise, it lets the generator timeout. */
+ if (am_receiver && now - last_io_in >= io_timeout)
+ return;
+
+ if (!iobuf.msg.len && iobuf.out.len == iobuf.out_empty_len)
+ send_msg(MSG_DATA, "", 0, 0);
+ if (!(flags & MSK_ALLOW_FLUSH)) {
+ /* Let the caller worry about writing out the data. */
+ } else if (iobuf.msg.len)
perform_io(iobuf.msg.size - iobuf.msg.len + 1, PIO_NEED_MSGROOM);
else if (iobuf.out.len > iobuf.out_empty_len)
io_flush(NORMAL_FLUSH);
* the buffer the msg data will end once it is read. It is
* possible that this points off the end of the buffer, in
* which case the gradual reading of the input stream will
- * cause this value to decrease and eventually become real. */
- iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
+ * cause this value to wrap around and eventually become real. */
+ if (msg_bytes)
+ iobuf.raw_input_ends_before = iobuf.in.pos + msg_bytes;
iobuf.in_multiplexed = 1;
break;
case MSG_STATS:
got_flist_entry_status(FES_REDO, val);
break;
case MSG_IO_ERROR:
- if (msg_bytes != 4 || am_sender)
+ if (msg_bytes != 4)
goto invalid_msg;
val = raw_read_int();
iobuf.in_multiplexed = 1;
io_error |= val;
- if (!am_generator)
+ if (am_receiver)
send_msg_int(MSG_IO_ERROR, val);
break;
case MSG_IO_TIMEOUT:
}
break;
case MSG_NOOP:
- if (am_sender)
- maybe_send_keepalive();
+ /* Support protocol-30 keep-alive method. */
+ if (msg_bytes != 0)
+ goto invalid_msg;
iobuf.in_multiplexed = 1;
+ if (am_sender)
+ maybe_send_keepalive(time(NULL), MSK_ALLOW_FLUSH);
break;
case MSG_DELETED:
if (msg_bytes >= sizeof data)
exit_cleanup(RERR_STREAMIO);
}
raw_read_buf(data, msg_bytes);
- iobuf.in_multiplexed = 1;
+ /* We don't set in_multiplexed value back to 1 before writing this message
+ * because the write might loop back and read yet another message, over and
+ * over again, while waiting for room to put the message in the msg buffer. */
rwrite((enum logcode)tag, data, msg_bytes, !am_generator);
+ iobuf.in_multiplexed = 1;
if (first_message) {
if (list_only && !am_sender && tag == 1 && msg_bytes < sizeof data) {
data[msg_bytes] = '\0';
send_msg(MSG_ERROR_EXIT, "", 0, 0);
io_flush(FULL_FLUSH);
}
- val = 0;
- } else {
- val = raw_read_int();
- if (protocol_version >= 31) {
- if (am_generator) {
- if (DEBUG_GTE(EXIT, 3)) {
- rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
- who_am_i(), val);
- }
- send_msg_int(MSG_ERROR_EXIT, val);
- } else {
- if (DEBUG_GTE(EXIT, 3)) {
- rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
- who_am_i());
- }
- send_msg(MSG_ERROR_EXIT, "", 0, 0);
+ } else if (protocol_version >= 31) {
+ if (am_generator || am_receiver) {
+ if (DEBUG_GTE(EXIT, 3)) {
+ rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT with exit_code %d\n",
+ who_am_i(), val);
+ }
+ send_msg_int(MSG_ERROR_EXIT, val);
+ } else {
+ if (DEBUG_GTE(EXIT, 3)) {
+ rprintf(FINFO, "[%s] sending MSG_ERROR_EXIT (len 0)\n",
+ who_am_i());
}
+ send_msg(MSG_ERROR_EXIT, "", 0, 0);
}
}
/* Send a negative linenum so that we don't end up
#if SIZEOF_INT64 < 8
u.x = IVAL(u.b,0);
#elif CAREFUL_ALIGNMENT
- u.x = IVAL(u.b,0) | (((int64)IVAL(u.b,4))<<32);
+ u.x = IVAL64(u.b,0);
#endif
return u.x;
}
if (prior_tv.tv_sec) {
elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
+ (start_tv.tv_usec - prior_tv.tv_usec);
- total_written -= elapsed_usec * bwlimit / (ONE_SEC/1024);
+ total_written -= (int64)elapsed_usec * bwlimit / (ONE_SEC/1024);
if (total_written < 0)
total_written = 0;
}
uchar bit;
int cnt = 8;
- SIVAL(b, 1, x);
#if SIZEOF_INT64 >= 8
- SIVAL(b, 5, x >> 32);
+ SIVAL64(b, 1, x);
#else
+ SIVAL(b, 1, x);
if (x <= 0x7FFFFFFF && x >= 0)
memset(b + 5, 0, 4);
else {
#endif
}
+void write_bigbuf(int f, const char *buf, size_t len)
+{
+ size_t half_max = (iobuf.out.size - iobuf.out_empty_len) / 2;
+
+ while (len > half_max + 1024) {
+ write_buf(f, buf, half_max);
+ buf += half_max;
+ len -= half_max;
+ }
+
+ write_buf(f, buf, len);
+}
+
void write_buf(int f, const char *buf, size_t len)
{
size_t pos, siz;
/* Read a line of up to bufsiz-1 characters into buf. Strips
* the (required) trailing newline and all carriage returns.
* Returns 1 for success; 0 for I/O error or truncation. */
-int read_line_old(int fd, char *buf, size_t bufsiz)
+int read_line_old(int fd, char *buf, size_t bufsiz, int eof_ok)
{
+ assert(fd != iobuf.in_fd);
bufsiz--; /* leave room for the null */
while (bufsiz > 0) {
- assert(fd != iobuf.in_fd);
- if (safe_read(fd, buf, 1) == 0)
+ if (safe_read(fd, buf, 1) == 0) {
+ if (eof_ok)
+ break;
return 0;
+ }
if (*buf == '\0')
return 0;
if (*buf == '\n')
iobuf.out_empty_len = 4; /* See also OUT_MULTIPLEXED */
io_start_buffering_out(fd);
+ got_kill_signal = 0;
iobuf.raw_data_header_pos = iobuf.out.pos + iobuf.out.len;
iobuf.out.len += 4;
iobuf.out.len = 0;
iobuf.out_empty_len = 0;
+ if (got_kill_signal > 0) /* Just in case... */
+ handle_kill_signal(False);
+ got_kill_signal = -1;
return ret;
}
* is involved. */
write_int(batch_fd, protocol_version);
if (protocol_version >= 30)
- write_byte(batch_fd, inc_recurse);
+ write_byte(batch_fd, compat_flags);
write_int(batch_fd, checksum_seed);
if (am_sender)