2 Unix SMB/CIFS implementation.
4 Copyright (C) Volker Lendecke 2008
6 ** NOTE! The following LGPL license applies to the async_sock
7 ** library. This does NOT imply that all of Samba is released
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 3 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include "system/network.h"
26 #include "system/filesys.h"
29 #include "lib/async_req/async_sock.h"
30 #include "lib/util/iov_buf.h"
32 /* Note: lib/util/ is currently GPL */
33 #include "lib/util/tevent_unix.h"
34 #include "lib/util/samba_util.h"
36 struct async_connect_state {
38 struct tevent_fd *fde;
41 socklen_t address_len;
42 struct sockaddr_storage address;
44 void (*before_connect)(void *private_data);
45 void (*after_connect)(void *private_data);
49 static void async_connect_cleanup(struct tevent_req *req,
50 enum tevent_req_state req_state);
51 static void async_connect_connected(struct tevent_context *ev,
52 struct tevent_fd *fde, uint16_t flags,
56 * @brief async version of connect(2)
57 * @param[in] mem_ctx The memory context to hang the result off
58 * @param[in] ev The event context to work from
59 * @param[in] fd The socket to recv from
60 * @param[in] address Where to connect?
61 * @param[in] address_len Length of *address
62 * @retval The async request
64 * This function sets the socket into non-blocking state to be able to call
65 * connect in an async state. This will be reset when the request is finished.
68 struct tevent_req *async_connect_send(
69 TALLOC_CTX *mem_ctx, struct tevent_context *ev, int fd,
70 const struct sockaddr *address, socklen_t address_len,
71 void (*before_connect)(void *private_data),
72 void (*after_connect)(void *private_data),
75 struct tevent_req *req;
76 struct async_connect_state *state;
79 req = tevent_req_create(mem_ctx, &state, struct async_connect_state);
85 * We have to set the socket to nonblocking for async connect(2). Keep
86 * the old sockflags around.
90 state->before_connect = before_connect;
91 state->after_connect = after_connect;
92 state->private_data = private_data;
94 state->old_sockflags = fcntl(fd, F_GETFL, 0);
95 if (state->old_sockflags == -1) {
96 tevent_req_error(req, errno);
97 return tevent_req_post(req, ev);
100 tevent_req_set_cleanup_fn(req, async_connect_cleanup);
102 state->address_len = address_len;
103 if (address_len > sizeof(state->address)) {
104 tevent_req_error(req, EINVAL);
105 return tevent_req_post(req, ev);
107 memcpy(&state->address, address, address_len);
109 ret = set_blocking(fd, false);
111 tevent_req_error(req, errno);
112 return tevent_req_post(req, ev);
115 if (state->before_connect != NULL) {
116 state->before_connect(state->private_data);
119 state->result = connect(fd, address, address_len);
121 if (state->after_connect != NULL) {
122 state->after_connect(state->private_data);
125 if (state->result == 0) {
126 tevent_req_done(req);
127 return tevent_req_post(req, ev);
131 * The only errno indicating that an initial connect is still
132 * in flight is EINPROGRESS.
134 * We get EALREADY when someone calls us a second time for a
135 * given fd and the connect is still in flight (and returned
136 * EINPROGRESS the first time).
138 * This allows callers like open_socket_out_send() to reuse
139 * fds and call us with an fd for which the connect is still
140 * in flight. The proper thing to do for callers would be
141 * closing the fd and starting from scratch with a fresh
145 if (errno != EINPROGRESS && errno != EALREADY) {
146 tevent_req_error(req, errno);
147 return tevent_req_post(req, ev);
151 * Note for historic reasons TEVENT_FD_WRITE is not enough
152 * to get notified for POLLERR or EPOLLHUP even if they
153 * come together with POLLOUT. That means we need to
154 * use TEVENT_FD_READ in addition until we have
157 state->fde = tevent_add_fd(ev, state, fd, TEVENT_FD_READ|TEVENT_FD_WRITE,
158 async_connect_connected, req);
159 if (state->fde == NULL) {
160 tevent_req_error(req, ENOMEM);
161 return tevent_req_post(req, ev);
166 static void async_connect_cleanup(struct tevent_req *req,
167 enum tevent_req_state req_state)
169 struct async_connect_state *state =
170 tevent_req_data(req, struct async_connect_state);
172 TALLOC_FREE(state->fde);
173 if (state->fd != -1) {
176 ret = fcntl(state->fd, F_SETFL, state->old_sockflags);
186 * fde event handler for connect(2)
187 * @param[in] ev The event context that sent us here
188 * @param[in] fde The file descriptor event associated with the connect
189 * @param[in] flags Indicate read/writeability of the socket
190 * @param[in] priv private data, "struct async_req *" in this case
193 static void async_connect_connected(struct tevent_context *ev,
194 struct tevent_fd *fde, uint16_t flags,
197 struct tevent_req *req = talloc_get_type_abort(
198 priv, struct tevent_req);
199 struct async_connect_state *state =
200 tevent_req_data(req, struct async_connect_state);
202 int socket_error = 0;
203 socklen_t slen = sizeof(socket_error);
205 ret = getsockopt(state->fd, SOL_SOCKET, SO_ERROR,
206 &socket_error, &slen);
210 * According to Stevens this is the Solaris behaviour
211 * in case the connection encountered an error:
212 * getsockopt() fails, error is in errno
214 tevent_req_error(req, errno);
218 if (socket_error != 0) {
220 * Berkeley derived implementations (including) Linux
221 * return the pending error via socket_error.
223 tevent_req_error(req, socket_error);
227 tevent_req_done(req);
231 int async_connect_recv(struct tevent_req *req, int *perrno)
233 int err = tevent_req_simple_recv_unix(req);
243 struct writev_state {
244 struct tevent_context *ev;
245 struct tevent_queue_entry *queue_entry;
247 struct tevent_fd *fde;
252 bool err_on_readability;
255 static void writev_cleanup(struct tevent_req *req,
256 enum tevent_req_state req_state);
257 static bool writev_cancel(struct tevent_req *req);
258 static void writev_trigger(struct tevent_req *req, void *private_data);
259 static void writev_handler(struct tevent_context *ev, struct tevent_fd *fde,
260 uint16_t flags, void *private_data);
262 struct tevent_req *writev_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
263 struct tevent_queue *queue, int fd,
264 bool err_on_readability,
265 struct iovec *iov, int count)
267 struct tevent_req *req;
268 struct writev_state *state;
270 req = tevent_req_create(mem_ctx, &state, struct writev_state);
276 state->total_size = 0;
277 state->count = count;
278 state->iov = (struct iovec *)talloc_memdup(
279 state, iov, sizeof(struct iovec) * count);
280 if (tevent_req_nomem(state->iov, req)) {
281 return tevent_req_post(req, ev);
283 state->flags = TEVENT_FD_WRITE|TEVENT_FD_READ;
284 state->err_on_readability = err_on_readability;
286 tevent_req_set_cleanup_fn(req, writev_cleanup);
287 tevent_req_set_cancel_fn(req, writev_cancel);
290 state->fde = tevent_add_fd(state->ev, state, state->fd,
291 state->flags, writev_handler, req);
292 if (tevent_req_nomem(state->fde, req)) {
293 return tevent_req_post(req, ev);
298 state->queue_entry = tevent_queue_add_entry(queue, ev, req,
299 writev_trigger, NULL);
300 if (tevent_req_nomem(state->queue_entry, req)) {
301 return tevent_req_post(req, ev);
306 static void writev_cleanup(struct tevent_req *req,
307 enum tevent_req_state req_state)
309 struct writev_state *state = tevent_req_data(req, struct writev_state);
311 TALLOC_FREE(state->queue_entry);
312 TALLOC_FREE(state->fde);
315 static bool writev_cancel(struct tevent_req *req)
317 struct writev_state *state = tevent_req_data(req, struct writev_state);
319 TALLOC_FREE(state->queue_entry);
320 TALLOC_FREE(state->fde);
322 if (state->count == 0) {
329 tevent_req_defer_callback(req, state->ev);
330 if (state->total_size > 0) {
332 * We've already started to write :-(
334 tevent_req_error(req, EIO);
338 tevent_req_error(req, ECANCELED);
342 static void writev_trigger(struct tevent_req *req, void *private_data)
344 struct writev_state *state = tevent_req_data(req, struct writev_state);
346 state->queue_entry = NULL;
348 state->fde = tevent_add_fd(state->ev, state, state->fd, state->flags,
349 writev_handler, req);
350 if (tevent_req_nomem(state->fde, req)) {
355 static void writev_handler(struct tevent_context *ev, struct tevent_fd *fde,
356 uint16_t flags, void *private_data)
358 struct tevent_req *req = talloc_get_type_abort(
359 private_data, struct tevent_req);
360 struct writev_state *state =
361 tevent_req_data(req, struct writev_state);
365 if ((state->flags & TEVENT_FD_READ) && (flags & TEVENT_FD_READ)) {
368 if (state->err_on_readability) {
369 /* Readable and the caller wants an error on read. */
370 tevent_req_error(req, EPIPE);
374 /* Might be an error. Check if there are bytes to read */
375 ret = ioctl(state->fd, FIONREAD, &value);
376 /* FIXME - should we also check
377 for ret == 0 and value == 0 here ? */
379 /* There's an error. */
380 tevent_req_error(req, EPIPE);
383 /* A request for TEVENT_FD_READ will succeed from now and
384 forevermore until the bytes are read so if there was
385 an error we'll wait until we do read, then get it in
386 the read callback function. Until then, remove TEVENT_FD_READ
387 from the flags we're waiting for. */
388 state->flags &= ~TEVENT_FD_READ;
389 TEVENT_FD_NOT_READABLE(fde);
391 /* If not writable, we're done. */
392 if (!(flags & TEVENT_FD_WRITE)) {
397 written = writev(state->fd, state->iov, state->count);
398 if ((written == -1) && (errno == EINTR)) {
403 tevent_req_error(req, errno);
407 tevent_req_error(req, EPIPE);
410 state->total_size += written;
412 ok = iov_advance(&state->iov, &state->count, written);
414 tevent_req_error(req, EIO);
418 if (state->count == 0) {
419 tevent_req_done(req);
424 ssize_t writev_recv(struct tevent_req *req, int *perrno)
426 struct writev_state *state =
427 tevent_req_data(req, struct writev_state);
430 if (tevent_req_is_unix_error(req, perrno)) {
431 tevent_req_received(req);
434 ret = state->total_size;
435 tevent_req_received(req);
439 struct read_packet_state {
441 struct tevent_fd *fde;
444 ssize_t (*more)(uint8_t *buf, size_t buflen, void *private_data);
448 static void read_packet_cleanup(struct tevent_req *req,
449 enum tevent_req_state req_state);
450 static void read_packet_handler(struct tevent_context *ev,
451 struct tevent_fd *fde,
452 uint16_t flags, void *private_data);
454 struct tevent_req *read_packet_send(TALLOC_CTX *mem_ctx,
455 struct tevent_context *ev,
456 int fd, size_t initial,
457 ssize_t (*more)(uint8_t *buf,
462 struct tevent_req *req;
463 struct read_packet_state *state;
465 req = tevent_req_create(mem_ctx, &state, struct read_packet_state);
472 state->private_data = private_data;
474 tevent_req_set_cleanup_fn(req, read_packet_cleanup);
476 state->buf = talloc_array(state, uint8_t, initial);
477 if (tevent_req_nomem(state->buf, req)) {
478 return tevent_req_post(req, ev);
481 state->fde = tevent_add_fd(ev, state, fd,
482 TEVENT_FD_READ, read_packet_handler,
484 if (tevent_req_nomem(state->fde, req)) {
485 return tevent_req_post(req, ev);
490 static void read_packet_cleanup(struct tevent_req *req,
491 enum tevent_req_state req_state)
493 struct read_packet_state *state =
494 tevent_req_data(req, struct read_packet_state);
496 TALLOC_FREE(state->fde);
499 static void read_packet_handler(struct tevent_context *ev,
500 struct tevent_fd *fde,
501 uint16_t flags, void *private_data)
503 struct tevent_req *req = talloc_get_type_abort(
504 private_data, struct tevent_req);
505 struct read_packet_state *state =
506 tevent_req_data(req, struct read_packet_state);
507 size_t total = talloc_get_size(state->buf);
511 nread = recv(state->fd, state->buf+state->nread, total-state->nread,
513 if ((nread == -1) && (errno == ENOTSOCK)) {
514 nread = read(state->fd, state->buf+state->nread,
517 if ((nread == -1) && (errno == EINTR)) {
522 tevent_req_error(req, errno);
526 tevent_req_error(req, EPIPE);
530 state->nread += nread;
531 if (state->nread < total) {
532 /* Come back later */
537 * We got what was initially requested. See if "more" asks for -- more.
539 if (state->more == NULL) {
540 /* Nobody to ask, this is a async read_data */
541 tevent_req_done(req);
545 more = state->more(state->buf, total, state->private_data);
547 /* We got an invalid packet, tell the caller */
548 tevent_req_error(req, EIO);
552 /* We're done, full packet received */
553 tevent_req_done(req);
557 if (total + more < total) {
558 tevent_req_error(req, EMSGSIZE);
562 tmp = talloc_realloc(state, state->buf, uint8_t, total+more);
563 if (tevent_req_nomem(tmp, req)) {
569 ssize_t read_packet_recv(struct tevent_req *req, TALLOC_CTX *mem_ctx,
570 uint8_t **pbuf, int *perrno)
572 struct read_packet_state *state =
573 tevent_req_data(req, struct read_packet_state);
575 if (tevent_req_is_unix_error(req, perrno)) {
576 tevent_req_received(req);
579 *pbuf = talloc_move(mem_ctx, &state->buf);
580 tevent_req_received(req);
581 return talloc_get_size(*pbuf);
584 struct wait_for_read_state {
585 struct tevent_fd *fde;
590 static void wait_for_read_cleanup(struct tevent_req *req,
591 enum tevent_req_state req_state);
592 static void wait_for_read_done(struct tevent_context *ev,
593 struct tevent_fd *fde,
597 struct tevent_req *wait_for_read_send(TALLOC_CTX *mem_ctx,
598 struct tevent_context *ev, int fd,
601 struct tevent_req *req;
602 struct wait_for_read_state *state;
604 req = tevent_req_create(mem_ctx, &state, struct wait_for_read_state);
609 tevent_req_set_cleanup_fn(req, wait_for_read_cleanup);
611 state->fde = tevent_add_fd(ev, state, fd, TEVENT_FD_READ,
612 wait_for_read_done, req);
613 if (tevent_req_nomem(state->fde, req)) {
614 return tevent_req_post(req, ev);
618 state->check_errors = check_errors;
622 static void wait_for_read_cleanup(struct tevent_req *req,
623 enum tevent_req_state req_state)
625 struct wait_for_read_state *state =
626 tevent_req_data(req, struct wait_for_read_state);
628 TALLOC_FREE(state->fde);
631 static void wait_for_read_done(struct tevent_context *ev,
632 struct tevent_fd *fde,
636 struct tevent_req *req = talloc_get_type_abort(
637 private_data, struct tevent_req);
638 struct wait_for_read_state *state =
639 tevent_req_data(req, struct wait_for_read_state);
643 if ((flags & TEVENT_FD_READ) == 0) {
647 if (!state->check_errors) {
648 tevent_req_done(req);
652 nread = recv(state->fd, &c, 1, MSG_PEEK);
655 tevent_req_error(req, EPIPE);
659 if ((nread == -1) && (errno == EINTR)) {
660 /* come back later */
664 if ((nread == -1) && (errno == ENOTSOCK)) {
665 /* Ignore this specific error on pipes */
666 tevent_req_done(req);
671 tevent_req_error(req, errno);
675 tevent_req_done(req);
678 bool wait_for_read_recv(struct tevent_req *req, int *perr)
680 int err = tevent_req_simple_recv_unix(req);
690 struct accept_state {
691 struct tevent_fd *fde;
694 struct sockaddr_storage addr;
698 static void accept_handler(struct tevent_context *ev, struct tevent_fd *fde,
699 uint16_t flags, void *private_data);
701 struct tevent_req *accept_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev,
704 struct tevent_req *req;
705 struct accept_state *state;
707 req = tevent_req_create(mem_ctx, &state, struct accept_state);
712 state->listen_sock = listen_sock;
714 state->fde = tevent_add_fd(ev, state, listen_sock, TEVENT_FD_READ,
715 accept_handler, req);
716 if (tevent_req_nomem(state->fde, req)) {
717 return tevent_req_post(req, ev);
722 static void accept_handler(struct tevent_context *ev, struct tevent_fd *fde,
723 uint16_t flags, void *private_data)
725 struct tevent_req *req = talloc_get_type_abort(
726 private_data, struct tevent_req);
727 struct accept_state *state = tevent_req_data(req, struct accept_state);
730 TALLOC_FREE(state->fde);
732 if ((flags & TEVENT_FD_READ) == 0) {
733 tevent_req_error(req, EIO);
736 state->addrlen = sizeof(state->addr);
738 ret = accept(state->listen_sock, (struct sockaddr *)&state->addr,
740 if ((ret == -1) && (errno == EINTR)) {
745 tevent_req_error(req, errno);
748 smb_set_close_on_exec(ret);
750 tevent_req_done(req);
753 int accept_recv(struct tevent_req *req, struct sockaddr_storage *paddr,
754 socklen_t *paddrlen, int *perr)
756 struct accept_state *state = tevent_req_data(req, struct accept_state);
759 if (tevent_req_is_unix_error(req, &err)) {
766 memcpy(paddr, &state->addr, state->addrlen);
768 if (paddrlen != NULL) {
769 *paddrlen = state->addrlen;