2 * Unix SMB/CIFS implementation.
3 * Copyright (C) Volker Lendecke 2013
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 3 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "system/select.h"
22 #include "system/time.h"
23 #include "system/network.h"
24 #include "dlinklist.h"
25 #include "pthreadpool/pthreadpool.h"
29 * This file implements two abstractions: The "unix_dgram" functions implement
30 * queueing for unix domain datagram sockets. You can send to a destination
31 * socket, and if that has no free space available, it will fall back to an
32 * anonymous socket that will poll for writability. "unix_dgram" expects the
33 * data size not to exceed the system limit.
35 * The "unix_msg" functions implement the fragmentation of large messages on
36 * top of "unix_dgram". This is what is exposed to the user of this API.
39 struct unix_dgram_msg {
40 struct unix_dgram_msg *prev, *next;
51 struct unix_dgram_send_queue {
52 struct unix_dgram_send_queue *prev, *next;
53 struct unix_dgram_ctx *ctx;
55 struct unix_dgram_msg *msgs;
59 struct unix_dgram_ctx {
62 const struct poll_funcs *ev_funcs;
65 void (*recv_callback)(struct unix_dgram_ctx *ctx,
66 uint8_t *msg, size_t msg_len,
67 int *fds, size_t num_fds,
71 struct poll_watch *sock_read_watch;
72 struct unix_dgram_send_queue *send_queues;
74 struct pthreadpool *send_pool;
75 struct poll_watch *pool_read_watch;
81 static ssize_t iov_buflen(const struct iovec *iov, int iovlen);
82 static void unix_dgram_recv_handler(struct poll_watch *w, int fd, short events,
85 /* Set socket non blocking. */
86 static int prepare_socket_nonblock(int sock)
90 #define FLAG_TO_SET O_NONBLOCK
93 #define FLAG_TO_SET O_NDELAY
95 #define FLAG_TO_SET FNDELAY
99 flags = fcntl(sock, F_GETFL);
103 flags |= FLAG_TO_SET;
104 if (fcntl(sock, F_SETFL, flags) == -1) {
112 /* Set socket close on exec. */
113 static int prepare_socket_cloexec(int sock)
118 flags = fcntl(sock, F_GETFD, 0);
123 if (fcntl(sock, F_SETFD, flags) == -1) {
130 /* Set socket non blocking and close on exec. */
131 static int prepare_socket(int sock)
133 int ret = prepare_socket_nonblock(sock);
138 return prepare_socket_cloexec(sock);
141 static void close_fd_array(int *fds, size_t num_fds)
145 for (i = 0; i < num_fds; i++) {
155 static int unix_dgram_init(const struct sockaddr_un *addr, size_t max_msg,
156 const struct poll_funcs *ev_funcs,
157 void (*recv_callback)(struct unix_dgram_ctx *ctx,
158 uint8_t *msg, size_t msg_len,
159 int *fds, size_t num_fds,
162 struct unix_dgram_ctx **result)
164 struct unix_dgram_ctx *ctx;
169 pathlen = strlen(addr->sun_path)+1;
174 ctx = malloc(offsetof(struct unix_dgram_ctx, path) + pathlen);
179 memcpy(ctx->path, addr->sun_path, pathlen);
184 *ctx = (struct unix_dgram_ctx) {
186 .ev_funcs = ev_funcs,
187 .recv_callback = recv_callback,
188 .private_data = private_data,
189 .created_pid = (pid_t)-1
192 ctx->recv_buf = malloc(max_msg);
193 if (ctx->recv_buf == NULL) {
198 ctx->sock = socket(AF_UNIX, SOCK_DGRAM, 0);
199 if (ctx->sock == -1) {
204 /* Set non-blocking and close-on-exec. */
205 ret = prepare_socket(ctx->sock);
211 ret = bind(ctx->sock,
212 (const struct sockaddr *)(const void *)addr,
219 ctx->created_pid = getpid();
221 ctx->sock_read_watch = ctx->ev_funcs->watch_new(
222 ctx->ev_funcs, ctx->sock, POLLIN,
223 unix_dgram_recv_handler, ctx);
225 if (ctx->sock_read_watch == NULL) {
242 static void unix_dgram_recv_handler(struct poll_watch *w, int fd, short events,
245 struct unix_dgram_ctx *ctx = (struct unix_dgram_ctx *)private_data;
250 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
251 char buf[CMSG_SPACE(sizeof(int)*INT8_MAX)] = { 0, };
252 struct cmsghdr *cmsg;
253 #endif /* HAVE_STRUCT_MSGHDR_MSG_CONTROL */
255 size_t i, num_fds = 0;
257 iov = (struct iovec) {
258 .iov_base = (void *)ctx->recv_buf,
259 .iov_len = ctx->max_msg,
262 msg = (struct msghdr) {
265 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
267 .msg_controllen = sizeof(buf),
271 #ifdef MSG_CMSG_CLOEXEC
272 flags |= MSG_CMSG_CLOEXEC;
275 received = recvmsg(fd, &msg, flags);
276 if (received == -1) {
277 if ((errno == EAGAIN) ||
278 (errno == EWOULDBLOCK) ||
279 (errno == EINTR) || (errno == ENOMEM)) {
280 /* Not really an error - just try again. */
283 /* Problem with the socket. Set it unreadable. */
284 ctx->ev_funcs->watch_update(w, 0);
287 if (received > ctx->max_msg) {
288 /* More than we expected, not for us */
292 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
293 for(cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
294 cmsg = CMSG_NXTHDR(&msg, cmsg))
296 void *data = CMSG_DATA(cmsg);
298 if (cmsg->cmsg_type != SCM_RIGHTS) {
301 if (cmsg->cmsg_level != SOL_SOCKET) {
306 num_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof (int);
311 for (i = 0; i < num_fds; i++) {
314 err = prepare_socket_cloexec(fds[i]);
320 ctx->recv_callback(ctx, ctx->recv_buf, received,
321 fds, num_fds, ctx->private_data);
325 close_fd_array(fds, num_fds);
327 ctx->recv_callback(ctx, ctx->recv_buf, received,
328 NULL, 0, ctx->private_data);
331 static void unix_dgram_job_finished(struct poll_watch *w, int fd, short events,
334 static int unix_dgram_init_pthreadpool(struct unix_dgram_ctx *ctx)
338 if (ctx->send_pool != NULL) {
342 ret = pthreadpool_init(0, &ctx->send_pool);
347 signalfd = pthreadpool_signal_fd(ctx->send_pool);
349 ctx->pool_read_watch = ctx->ev_funcs->watch_new(
350 ctx->ev_funcs, signalfd, POLLIN,
351 unix_dgram_job_finished, ctx);
352 if (ctx->pool_read_watch == NULL) {
353 pthreadpool_destroy(ctx->send_pool);
354 ctx->send_pool = NULL;
361 static int unix_dgram_send_queue_init(
362 struct unix_dgram_ctx *ctx, const struct sockaddr_un *dst,
363 struct unix_dgram_send_queue **result)
365 struct unix_dgram_send_queue *q;
369 pathlen = strlen(dst->sun_path)+1;
371 q = malloc(offsetof(struct unix_dgram_send_queue, path) + pathlen);
377 memcpy(q->path, dst->sun_path, pathlen);
379 q->sock = socket(AF_UNIX, SOCK_DGRAM, 0);
385 err = prepare_socket_cloexec(q->sock);
391 ret = connect(q->sock,
392 (const struct sockaddr *)(const void *)dst,
394 } while ((ret == -1) && (errno == EINTR));
401 err = unix_dgram_init_pthreadpool(ctx);
406 DLIST_ADD(ctx->send_queues, q);
418 static void unix_dgram_send_queue_free(struct unix_dgram_send_queue *q)
420 struct unix_dgram_ctx *ctx = q->ctx;
422 while (q->msgs != NULL) {
423 struct unix_dgram_msg *msg;
425 DLIST_REMOVE(q->msgs, msg);
426 close_fd_array(msg->fds, msg->num_fds);
430 DLIST_REMOVE(ctx->send_queues, q);
434 static struct unix_dgram_send_queue *find_send_queue(
435 struct unix_dgram_ctx *ctx, const char *dst_sock)
437 struct unix_dgram_send_queue *s;
439 for (s = ctx->send_queues; s != NULL; s = s->next) {
440 if (strcmp(s->path, dst_sock) == 0) {
447 static int queue_msg(struct unix_dgram_send_queue *q,
448 const struct iovec *iov, int iovlen,
449 const int *fds, size_t num_fds)
451 struct unix_dgram_msg *msg;
455 size_t fds_size = sizeof(int) * num_fds;
456 int fds_copy[MIN(num_fds, INT8_MAX)];
457 size_t fds_padding = 0;
462 if (num_fds > INT8_MAX) {
466 for (i = 0; i < num_fds; i++) {
470 for (i = 0; i < num_fds; i++) {
471 fds_copy[i] = dup(fds[i]);
472 if (fds_copy[i] == -1) {
478 data_len = iov_buflen(iov, iovlen);
479 if (data_len == -1) {
483 msglen = offsetof(struct unix_dgram_msg, buf);
484 tmp = msglen + data_len;
485 if ((tmp < msglen) || (tmp < data_len)) {
492 const size_t fds_align = sizeof(int) - 1;
494 tmp = msglen + fds_align;
495 if ((tmp < msglen) || (tmp < fds_align)) {
501 fds_padding = tmp - msglen;
504 tmp = msglen + fds_size;
505 if ((tmp < msglen) || (tmp < fds_size)) {
512 msg = malloc(msglen);
517 msg->buflen = data_len;
521 for (i=0; i<iovlen; i++) {
522 memcpy(data_buf, iov[i].iov_base, iov[i].iov_len);
523 data_buf += iov[i].iov_len;
526 msg->num_fds = num_fds;
527 if (msg->num_fds > 0) {
529 data_buf += fds_padding;
530 fds_ptr= (void *)data_buf;
531 memcpy(fds_ptr, fds_copy, fds_size);
532 msg->fds = (int *)fds_ptr;
537 DLIST_ADD_END(q->msgs, msg, struct unix_dgram_msg);
543 close_fd_array(fds_copy, num_fds);
547 static void unix_dgram_send_job(void *private_data)
549 struct unix_dgram_msg *dmsg = private_data;
551 .iov_base = (void *)dmsg->buf,
552 .iov_len = dmsg->buflen,
554 struct msghdr msg = {
558 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
559 struct cmsghdr *cmsg;
560 size_t fds_size = sizeof(int) * dmsg->num_fds;
561 size_t cmsg_len = CMSG_LEN(fds_size);
562 size_t cmsg_space = CMSG_SPACE(fds_size);
563 char cmsg_buf[cmsg_space];
565 if (dmsg->num_fds > 0) {
568 memset(cmsg_buf, 0, cmsg_space);
570 msg.msg_control = cmsg_buf;
571 msg.msg_controllen = cmsg_space;
572 cmsg = CMSG_FIRSTHDR(&msg);
573 cmsg->cmsg_level = SOL_SOCKET;
574 cmsg->cmsg_type = SCM_RIGHTS;
575 cmsg->cmsg_len = cmsg_len;
576 fdptr = CMSG_DATA(cmsg);
577 memcpy(fdptr, dmsg->fds, fds_size);
578 msg.msg_controllen = cmsg->cmsg_len;
580 #endif /* HAVE_STRUCT_MSGHDR_MSG_CONTROL */
583 dmsg->sent = sendmsg(dmsg->sock, &msg, 0);
584 } while ((dmsg->sent == -1) && (errno == EINTR));
586 if (dmsg->sent == -1) {
587 dmsg->sys_errno = errno;
591 static void unix_dgram_job_finished(struct poll_watch *w, int fd, short events,
594 struct unix_dgram_ctx *ctx = private_data;
595 struct unix_dgram_send_queue *q;
596 struct unix_dgram_msg *msg;
599 ret = pthreadpool_finished_jobs(ctx->send_pool, &job, 1);
604 for (q = ctx->send_queues; q != NULL; q = q->next) {
605 if (job == q->sock) {
611 /* Huh? Should not happen */
616 DLIST_REMOVE(q->msgs, msg);
617 close_fd_array(msg->fds, msg->num_fds);
620 if (q->msgs != NULL) {
621 ret = pthreadpool_add_job(ctx->send_pool, q->sock,
622 unix_dgram_send_job, q->msgs);
628 unix_dgram_send_queue_free(q);
631 static int unix_dgram_send(struct unix_dgram_ctx *ctx,
632 const struct sockaddr_un *dst,
633 const struct iovec *iov, int iovlen,
634 const int *fds, size_t num_fds)
636 struct unix_dgram_send_queue *q;
638 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
639 struct cmsghdr *cmsg;
640 size_t fds_size = sizeof(int) * num_fds;
641 size_t cmsg_len = CMSG_LEN(fds_size);
642 size_t cmsg_space = CMSG_SPACE(fds_size);
643 char cmsg_buf[cmsg_space];
644 #endif /* HAVE_STRUCT_MSGHDR_MSG_CONTROL */
648 if (num_fds > INT8_MAX) {
652 #ifndef HAVE_STRUCT_MSGHDR_MSG_CONTROL
656 #endif /* ! HAVE_STRUCT_MSGHDR_MSG_CONTROL */
658 for (i = 0; i < num_fds; i++) {
660 * Make sure we only allow fd passing
661 * for communication channels,
662 * e.g. sockets, pipes, fifos, ...
664 ret = lseek(fds[i], 0, SEEK_CUR);
665 if (ret == -1 && errno == ESPIPE) {
671 * Reject the message as we may need to call dup(),
672 * if we queue the message.
674 * That might result in unexpected behavior for the caller
675 * for files and broken posix locking.
681 * To preserve message ordering, we have to queue a message when
682 * others are waiting in line already.
684 q = find_send_queue(ctx, dst->sun_path);
686 return queue_msg(q, iov, iovlen, fds, num_fds);
690 * Try a cheap nonblocking send
693 msg = (struct msghdr) {
694 .msg_name = discard_const_p(struct sockaddr_un, dst),
695 .msg_namelen = sizeof(*dst),
696 .msg_iov = discard_const_p(struct iovec, iov),
699 #ifdef HAVE_STRUCT_MSGHDR_MSG_CONTROL
703 memset(cmsg_buf, 0, cmsg_space);
705 msg.msg_control = cmsg_buf;
706 msg.msg_controllen = cmsg_space;
707 cmsg = CMSG_FIRSTHDR(&msg);
708 cmsg->cmsg_level = SOL_SOCKET;
709 cmsg->cmsg_type = SCM_RIGHTS;
710 cmsg->cmsg_len = cmsg_len;
711 fdptr = CMSG_DATA(cmsg);
712 memcpy(fdptr, fds, fds_size);
713 msg.msg_controllen = cmsg->cmsg_len;
715 #endif /* HAVE_STRUCT_MSGHDR_MSG_CONTROL */
717 ret = sendmsg(ctx->sock, &msg, 0);
721 if ((errno != EWOULDBLOCK) && (errno != EAGAIN) && (errno != EINTR)) {
725 ret = unix_dgram_send_queue_init(ctx, dst, &q);
729 ret = queue_msg(q, iov, iovlen, fds, num_fds);
731 unix_dgram_send_queue_free(q);
734 ret = pthreadpool_add_job(ctx->send_pool, q->sock,
735 unix_dgram_send_job, q->msgs);
737 unix_dgram_send_queue_free(q);
743 static int unix_dgram_sock(struct unix_dgram_ctx *ctx)
748 static int unix_dgram_free(struct unix_dgram_ctx *ctx)
750 if (ctx->send_queues != NULL) {
754 if (ctx->send_pool != NULL) {
755 int ret = pthreadpool_destroy(ctx->send_pool);
759 ctx->ev_funcs->watch_free(ctx->pool_read_watch);
762 ctx->ev_funcs->watch_free(ctx->sock_read_watch);
764 if (getpid() == ctx->created_pid) {
765 /* If we created it, unlink. Otherwise someone else might
766 * still have it open */
777 * Every message starts with a uint64_t cookie.
779 * A value of 0 indicates a single-fragment message which is complete in
780 * itself. The data immediately follows the cookie.
782 * Every multi-fragment message has a cookie != 0 and starts with a cookie
783 * followed by a struct unix_msg_header and then the data. The pid and sock
784 * fields are used to assure uniqueness on the receiver side.
787 struct unix_msg_hdr {
794 struct unix_msg *prev, *next;
803 struct unix_msg_ctx {
804 struct unix_dgram_ctx *dgram;
808 void (*recv_callback)(struct unix_msg_ctx *ctx,
809 uint8_t *msg, size_t msg_len,
810 int *fds, size_t num_fds,
814 struct unix_msg *msgs;
817 static void unix_msg_recv(struct unix_dgram_ctx *dgram_ctx,
818 uint8_t *buf, size_t buflen,
819 int *fds, size_t num_fds,
822 int unix_msg_init(const struct sockaddr_un *addr,
823 const struct poll_funcs *ev_funcs,
824 size_t fragment_len, uint64_t cookie,
825 void (*recv_callback)(struct unix_msg_ctx *ctx,
826 uint8_t *msg, size_t msg_len,
827 int *fds, size_t num_fds,
830 struct unix_msg_ctx **result)
832 struct unix_msg_ctx *ctx;
835 ctx = malloc(sizeof(*ctx));
840 *ctx = (struct unix_msg_ctx) {
841 .fragment_len = fragment_len,
843 .recv_callback = recv_callback,
844 .private_data = private_data
847 ret = unix_dgram_init(addr, fragment_len, ev_funcs,
848 unix_msg_recv, ctx, &ctx->dgram);
858 int unix_msg_send(struct unix_msg_ctx *ctx, const struct sockaddr_un *dst,
859 const struct iovec *iov, int iovlen,
860 const int *fds, size_t num_fds)
865 struct iovec iov_copy[iovlen+2];
866 struct unix_msg_hdr hdr;
867 struct iovec src_iov;
873 msglen = iov_buflen(iov, iovlen);
878 #ifndef HAVE_STRUCT_MSGHDR_MSG_CONTROL
882 #endif /* ! HAVE_STRUCT_MSGHDR_MSG_CONTROL */
884 if (num_fds > INT8_MAX) {
888 if (msglen <= (ctx->fragment_len - sizeof(uint64_t))) {
891 iov_copy[0].iov_base = &cookie;
892 iov_copy[0].iov_len = sizeof(cookie);
894 memcpy(&iov_copy[1], iov,
895 sizeof(struct iovec) * iovlen);
898 return unix_dgram_send(ctx->dgram, dst, iov_copy, iovlen+1,
902 hdr = (struct unix_msg_hdr) {
905 .sock = unix_dgram_sock(ctx->dgram)
908 iov_copy[0].iov_base = &ctx->cookie;
909 iov_copy[0].iov_len = sizeof(ctx->cookie);
910 iov_copy[1].iov_base = &hdr;
911 iov_copy[1].iov_len = sizeof(hdr);
917 * The following write loop sends the user message in pieces. We have
918 * filled the first two iovecs above with "cookie" and "hdr". In the
919 * following loops we pull message chunks from the user iov array and
920 * fill iov_copy piece by piece, possibly truncating chunks from the
921 * caller's iov array. Ugly, but hopefully efficient.
924 while (sent < msglen) {
926 size_t iov_index = 2;
928 fragment_len = sizeof(ctx->cookie) + sizeof(hdr);
930 while (fragment_len < ctx->fragment_len) {
933 space = ctx->fragment_len - fragment_len;
934 chunk = MIN(space, src_iov.iov_len);
936 iov_copy[iov_index].iov_base = src_iov.iov_base;
937 iov_copy[iov_index].iov_len = chunk;
940 src_iov.iov_base = (char *)src_iov.iov_base + chunk;
941 src_iov.iov_len -= chunk;
942 fragment_len += chunk;
944 if (src_iov.iov_len == 0) {
953 sent += (fragment_len - sizeof(ctx->cookie) - sizeof(hdr));
956 * only the last fragment should pass the fd array.
957 * That simplifies the receiver a lot.
960 ret = unix_dgram_send(ctx->dgram, dst,
964 ret = unix_dgram_send(ctx->dgram, dst,
974 if (ctx->cookie == 0) {
981 static void unix_msg_recv(struct unix_dgram_ctx *dgram_ctx,
982 uint8_t *buf, size_t buflen,
983 int *fds, size_t num_fds,
986 struct unix_msg_ctx *ctx = (struct unix_msg_ctx *)private_data;
987 struct unix_msg_hdr hdr;
988 struct unix_msg *msg;
992 if (buflen < sizeof(cookie)) {
996 memcpy(&cookie, buf, sizeof(cookie));
998 buf += sizeof(cookie);
999 buflen -= sizeof(cookie);
1002 ctx->recv_callback(ctx, buf, buflen, fds, num_fds, ctx->private_data);
1006 if (buflen < sizeof(hdr)) {
1009 memcpy(&hdr, buf, sizeof(hdr));
1012 buflen -= sizeof(hdr);
1014 for (msg = ctx->msgs; msg != NULL; msg = msg->next) {
1015 if ((msg->sender_pid == hdr.pid) &&
1016 (msg->sender_sock == hdr.sock)) {
1021 if ((msg != NULL) && (msg->cookie != cookie)) {
1022 DLIST_REMOVE(ctx->msgs, msg);
1028 msg = malloc(offsetof(struct unix_msg, buf) + hdr.msglen);
1032 *msg = (struct unix_msg) {
1033 .msglen = hdr.msglen,
1034 .sender_pid = hdr.pid,
1035 .sender_sock = hdr.sock,
1038 DLIST_ADD(ctx->msgs, msg);
1041 space = msg->msglen - msg->received;
1042 if (buflen > space) {
1046 memcpy(msg->buf + msg->received, buf, buflen);
1047 msg->received += buflen;
1049 if (msg->received < msg->msglen) {
1053 DLIST_REMOVE(ctx->msgs, msg);
1054 ctx->recv_callback(ctx, msg->buf, msg->msglen, fds, num_fds, ctx->private_data);
1059 close_fd_array(fds, num_fds);
1062 int unix_msg_free(struct unix_msg_ctx *ctx)
1066 ret = unix_dgram_free(ctx->dgram);
1071 while (ctx->msgs != NULL) {
1072 struct unix_msg *msg = ctx->msgs;
1073 DLIST_REMOVE(ctx->msgs, msg);
1081 static ssize_t iov_buflen(const struct iovec *iov, int iovlen)
1086 for (i=0; i<iovlen; i++) {
1087 size_t thislen = iov[i].iov_len;
1088 size_t tmp = buflen + thislen;
1090 if ((tmp < buflen) || (tmp < thislen)) {