2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "system/network.h"
26 #endif /* HAVE_LIBURING */
27 #include "smbd/smbd.h"
28 #include "smbd/globals.h"
29 #include "smbd/smbXsrv_open.h"
30 #include "lib/param/param.h"
31 #include "../libcli/smb/smb_common.h"
32 #include "../lib/tsocket/tsocket.h"
33 #include "../lib/util/tevent_ntstatus.h"
34 #include "smbprofile.h"
35 #include "../lib/util/bitmap.h"
36 #include "../librpc/gen_ndr/krb5pac.h"
37 #include "lib/util/iov_buf.h"
39 #include "libcli/smb/smbXcli_base.h"
40 #include "source3/lib/substitute.h"
43 /* SIOCOUTQ TIOCOUTQ are the same */
44 #define __IOCTL_SEND_QUEUE_SIZE_OPCODE TIOCOUTQ
45 #define __HAVE_TCP_INFO_RTO 1
46 #define __ALLOW_MULTI_CHANNEL_SUPPORT 1
47 #elif defined(FREEBSD)
48 #define __IOCTL_SEND_QUEUE_SIZE_OPCODE FIONWRITE
49 #define __HAVE_TCP_INFO_RTO 1
50 #define __ALLOW_MULTI_CHANNEL_SUPPORT 1
53 #include "lib/crypto/gnutls_helpers.h"
54 #include <gnutls/gnutls.h>
55 #include <gnutls/crypto.h>
58 #define DBGC_CLASS DBGC_SMB2
60 static void smbd_smb2_connection_handler(struct tevent_context *ev,
61 struct tevent_fd *fde,
64 static NTSTATUS smbd_smb2_flush_send_queue(struct smbXsrv_connection *xconn);
66 static const struct smbd_smb2_dispatch_table {
69 bool need_session : 1;
73 } smbd_smb2_table[] = {
75 .opcode = SMB2_OP_NEGPROT,
78 .opcode = SMB2_OP_SESSSETUP,
81 .opcode = SMB2_OP_LOGOFF,
85 .opcode = SMB2_OP_TCON,
88 * This call needs to be run as root.
90 * smbd_smb2_request_process_tcon()
91 * calls make_connection_snum(), which will call
92 * change_to_user(), when needed.
96 .opcode = SMB2_OP_TDIS,
101 .opcode = SMB2_OP_CREATE,
102 .need_session = true,
105 .opcode = SMB2_OP_CLOSE,
106 .need_session = true,
110 .opcode = SMB2_OP_FLUSH,
111 .need_session = true,
115 .opcode = SMB2_OP_READ,
116 .need_session = true,
120 .opcode = SMB2_OP_WRITE,
121 .need_session = true,
126 .opcode = SMB2_OP_LOCK,
127 .need_session = true,
131 .opcode = SMB2_OP_IOCTL,
132 .need_session = true,
137 .opcode = SMB2_OP_CANCEL,
140 .opcode = SMB2_OP_KEEPALIVE,
142 .opcode = SMB2_OP_QUERY_DIRECTORY,
143 .need_session = true,
147 .opcode = SMB2_OP_NOTIFY,
148 .need_session = true,
152 .opcode = SMB2_OP_GETINFO,
153 .need_session = true,
157 .opcode = SMB2_OP_SETINFO,
158 .need_session = true,
163 .opcode = SMB2_OP_BREAK,
164 .need_session = true,
169 * as LEASE breaks does not
175 const char *smb2_opcode_name(uint16_t opcode)
177 const char *result = "Bad SMB2 opcode";
180 case SMB2_OP_NEGPROT:
181 result = "SMB2_OP_NEGPROT";
183 case SMB2_OP_SESSSETUP:
184 result = "SMB2_OP_SESSSETUP";
187 result = "SMB2_OP_LOGOFF";
190 result = "SMB2_OP_TCON";
193 result = "SMB2_OP_TDIS";
196 result = "SMB2_OP_CREATE";
199 result = "SMB2_OP_CLOSE";
202 result = "SMB2_OP_FLUSH";
205 result = "SMB2_OP_READ";
208 result = "SMB2_OP_WRITE";
211 result = "SMB2_OP_LOCK";
214 result = "SMB2_OP_IOCTL";
217 result = "SMB2_OP_CANCEL";
219 case SMB2_OP_KEEPALIVE:
220 result = "SMB2_OP_KEEPALIVE";
222 case SMB2_OP_QUERY_DIRECTORY:
223 result = "SMB2_OP_QUERY_DIRECTORY";
226 result = "SMB2_OP_NOTIFY";
228 case SMB2_OP_GETINFO:
229 result = "SMB2_OP_GETINFO";
231 case SMB2_OP_SETINFO:
232 result = "SMB2_OP_SETINFO";
235 result = "SMB2_OP_BREAK";
243 static const struct smbd_smb2_dispatch_table *smbd_smb2_call(uint16_t opcode)
245 const struct smbd_smb2_dispatch_table *ret = NULL;
247 if (opcode >= ARRAY_SIZE(smbd_smb2_table)) {
251 ret = &smbd_smb2_table[opcode];
253 SMB_ASSERT(ret->opcode == opcode);
258 static void print_req_vectors(const struct smbd_smb2_request *req)
262 for (i = 0; i < req->in.vector_count; i++) {
263 dbgtext("\treq->in.vector[%u].iov_len = %u\n",
265 (unsigned int)req->in.vector[i].iov_len);
267 for (i = 0; i < req->out.vector_count; i++) {
268 dbgtext("\treq->out.vector[%u].iov_len = %u\n",
270 (unsigned int)req->out.vector[i].iov_len);
274 bool smbd_is_smb2_header(const uint8_t *inbuf, size_t size)
276 if (size < (4 + SMB2_HDR_BODY)) {
280 if (IVAL(inbuf, 4) != SMB2_MAGIC) {
287 bool smbd_smb2_is_compound(const struct smbd_smb2_request *req)
289 return req->in.vector_count >= (2*SMBD_SMB2_NUM_IOV_PER_REQ);
292 bool smbd_smb2_is_last_in_compound(const struct smbd_smb2_request *req)
294 return (req->current_idx + SMBD_SMB2_NUM_IOV_PER_REQ ==
295 req->in.vector_count);
298 static void smbd_smb2_request_read_state_destructor_completion_io_uring(struct samba_io_uring_completion *completion,
299 void *completion_private,
300 const struct io_uring_cqe *cqe)
302 struct smbd_smb2_request_read_state *state =
303 talloc_get_type_abort(completion_private,
304 struct smbd_smb2_request_read_state);
306 samba_io_uring_completion_assert_unused(completion);
311 static int smbd_smb2_request_read_state_destructor(struct smbd_smb2_request_read_state *state)
315 if (state->xconn != NULL && state->xconn->smb2.recv_uring != NULL) {
316 samba_io_uring_cancel_submissions(state->xconn->smb2.recv_uring,
317 &state->io_uring.submission);
320 pending = samba_io_uring_completion_pending(&state->io_uring.completion);
325 samba_io_uring_completion_update(&state->io_uring.completion,
326 smbd_smb2_request_read_state_destructor_completion_io_uring,
329 if (state->xconn != NULL) {
330 talloc_steal(state->xconn->smb2.recv_uring, state);
337 static NTSTATUS smbd_initialize_smb2(struct smbXsrv_connection *xconn,
338 uint64_t expected_seq_low)
342 xconn->smb2.request_read_state = talloc_zero(xconn,
343 struct smbd_smb2_request_read_state);
344 if (xconn->smb2.request_read_state == NULL) {
345 return NT_STATUS_NO_MEMORY;
347 talloc_set_destructor(xconn->smb2.request_read_state,
348 smbd_smb2_request_read_state_destructor);
349 xconn->smb2.request_read_state->xconn = xconn;
351 xconn->smb2.credits.seq_low = expected_seq_low;
352 xconn->smb2.credits.seq_range = 1;
353 xconn->smb2.credits.granted = 1;
354 xconn->smb2.credits.max = lp_smb2_max_credits();
355 xconn->smb2.credits.bitmap = bitmap_talloc(xconn,
356 xconn->smb2.credits.max);
357 if (xconn->smb2.credits.bitmap == NULL) {
358 return NT_STATUS_NO_MEMORY;
361 tevent_fd_set_close_fn(xconn->transport.fde, NULL);
362 TALLOC_FREE(xconn->transport.fde);
364 xconn->transport.fde = tevent_add_fd(
365 xconn->client->raw_ev_ctx,
367 xconn->transport.sock,
368 TEVENT_FD_ERROR | TEVENT_FD_READ,
369 smbd_smb2_connection_handler,
371 if (xconn->transport.fde == NULL) {
372 close(xconn->transport.sock);
373 xconn->transport.sock = -1;
374 return NT_STATUS_NO_MEMORY;
376 tevent_fd_set_auto_close(xconn->transport.fde);
378 xconn->smb2.send_io_uring.async_threshhold = ~0;
379 if (lp_parm_bool(-1, "smb2srv", "send_uring", false)) {
380 const struct samba_io_uring_features *features =
381 samba_io_uring_get_features(xconn->client->sconn->uring);
384 tmp = lp_parm_bytes(-1,
386 "sendmsg_uring_async_threshhold",
388 if (features->flag_async && tmp >= 0) {
389 xconn->smb2.send_io_uring.async_threshhold = tmp;
392 if (features->op_sendmsg) {
393 xconn->smb2.send_io_uring.uring = xconn->client->sconn->uring;
396 xconn->smb2.recv_io_uring.async_threshhold = ~0;
397 if (lp_parm_bool(-1, "smb2srv", "recv_uring", false)) {
398 const struct samba_io_uring_features *features =
399 samba_io_uring_get_features(xconn->client->sconn->uring);
402 tmp = lp_parm_bytes(-1,
404 "recvmsg_uring_async_threshhold",
406 if (features->flag_async && tmp >= 0) {
407 xconn->smb2.recv_io_uring.async_threshhold = tmp;
410 if (features->op_recvmsg) {
411 xconn->smb2.recv_io_uring.uring = xconn->client->sconn->uring;
416 * Ensure child is set to non-blocking mode,
417 * unless the system supports MSG_DONTWAIT,
418 * if MSG_DONTWAIT is available we should force
422 rc = set_blocking(xconn->transport.sock, true);
424 return NT_STATUS_INTERNAL_ERROR;
427 rc = set_blocking(xconn->transport.sock, false);
429 return NT_STATUS_INTERNAL_ERROR;
436 #define smb2_len(buf) (PVAL(buf,3)|(PVAL(buf,2)<<8)|(PVAL(buf,1)<<16))
437 #define _smb2_setlen(_buf,len) do { \
438 uint8_t *buf = (uint8_t *)_buf; \
440 buf[1] = ((len)&0xFF0000)>>16; \
441 buf[2] = ((len)&0xFF00)>>8; \
442 buf[3] = (len)&0xFF; \
445 static bool smb2_setup_nbt_length(struct iovec *vector, int count)
453 len = iov_buflen(vector+1, count-1);
455 if ((len == -1) || (len > 0xFFFFFF)) {
459 _smb2_setlen(vector[0].iov_base, len);
463 static void smbd_smb2_send_queue_destruction_completion_io_uring(struct samba_io_uring_completion *completion,
464 void *completion_private,
465 const struct io_uring_cqe *cqe)
467 struct smbd_smb2_send_queue *e =
468 (struct smbd_smb2_send_queue *)completion_private;
470 SMB_ASSERT(e->io_uring.pending_snd);
471 e->io_uring.pending_snd = false;
472 talloc_free(e->mem_ctx);
475 static int smbd_smb2_send_queue_destruction(struct smbd_smb2_send_queue *e)
479 if (e->xconn == NULL || e->xconn->smb2.send_io_uring.uring == NULL) {
483 if (e->io_uring.pending_snd) {
484 samba_io_uring_cancel_submissions(e->xconn->smb2.send_io_uring.uring,
485 &e->io_uring.submission);
488 pending = samba_io_uring_completion_pending(&e->io_uring.completion);
490 if (e->mem_ctx == NULL) {
493 if (e->xconn != NULL) {
499 samba_io_uring_completion_update(&e->io_uring.completion,
500 smbd_smb2_send_queue_destruction_completion_io_uring,
503 if (e->xconn != NULL) {
504 talloc_steal(e->xconn->smb2.send_io_uring.uring, e->mem_ctx);
511 static int smbd_smb2_request_destructor(struct smbd_smb2_request *req)
513 TALLOC_FREE(req->first_enc_key);
514 TALLOC_FREE(req->last_sign_key);
516 return smbd_smb2_send_queue_destruction(&req->queue_entry);
519 void smb2_request_set_async_internal(struct smbd_smb2_request *req,
522 req->async_internal = async_internal;
525 static struct smbd_smb2_request *smbd_smb2_request_allocate(struct smbXsrv_connection *xconn)
527 TALLOC_CTX *mem_pool;
528 struct smbd_smb2_request *req;
531 /* Enable this to find subtle valgrind errors. */
532 mem_pool = talloc_init("smbd_smb2_request_allocate");
534 mem_pool = talloc_tos();
536 if (mem_pool == NULL) {
540 req = talloc(mem_pool, struct smbd_smb2_request);
542 talloc_free(mem_pool);
545 talloc_reparent(mem_pool, xconn, req);
547 TALLOC_FREE(mem_pool);
549 *req = (struct smbd_smb2_request) {
550 .sconn = xconn->client->sconn,
552 .last_session_id = UINT64_MAX,
553 .last_tid = UINT32_MAX,
556 talloc_set_destructor(req, smbd_smb2_request_destructor);
561 static NTSTATUS smbd_smb2_inbuf_parse_compound(struct smbXsrv_connection *xconn,
565 struct smbd_smb2_request *req,
569 TALLOC_CTX *mem_ctx = req;
573 uint8_t *first_hdr = buf;
574 size_t verified_buflen = 0;
579 * Note: index '0' is reserved for the transport protocol
581 iov = req->in._vector;
583 while (taken < buflen) {
584 size_t len = buflen - taken;
585 uint8_t *hdr = first_hdr + taken;
588 size_t next_command_ofs;
590 uint8_t *body = NULL;
593 struct iovec *iov_alloc = NULL;
595 if (iov != req->in._vector) {
599 if (verified_buflen > taken) {
600 len = verified_buflen - taken;
607 DEBUG(10, ("%d bytes left, expected at least %d\n",
611 if (IVAL(hdr, 0) == SMB2_TF_MAGIC) {
612 struct smbXsrv_session *s = NULL;
614 struct iovec tf_iov[2];
618 if (xconn->protocol < PROTOCOL_SMB3_00) {
619 DEBUG(10, ("Got SMB2_TRANSFORM header, "
620 "but dialect[0x%04X] is used\n",
621 xconn->smb2.server.dialect));
625 if (xconn->smb2.server.cipher == 0) {
626 DEBUG(10, ("Got SMB2_TRANSFORM header, "
627 "but not negotiated "
628 "client[0x%08X] server[0x%08X]\n",
629 xconn->smb2.client.capabilities,
630 xconn->smb2.server.capabilities));
634 if (len < SMB2_TF_HDR_SIZE) {
635 DEBUG(1, ("%d bytes left, expected at least %d\n",
636 (int)len, SMB2_TF_HDR_SIZE));
640 tf_len = SMB2_TF_HDR_SIZE;
643 hdr = first_hdr + taken;
644 enc_len = IVAL(tf, SMB2_TF_MSG_SIZE);
645 uid = BVAL(tf, SMB2_TF_SESSION_ID);
647 if (len < SMB2_TF_HDR_SIZE + enc_len) {
648 DEBUG(1, ("%d bytes left, expected at least %d\n",
650 (int)(SMB2_TF_HDR_SIZE + enc_len)));
654 status = smb2srv_session_lookup_conn(xconn, uid, now,
656 if (!NT_STATUS_IS_OK(status)) {
657 status = smb2srv_session_lookup_global(xconn->client,
660 if (!NT_STATUS_IS_OK(status)) {
661 DEBUG(1, ("invalid session[%llu] in "
662 "SMB2_TRANSFORM header\n",
663 (unsigned long long)uid));
664 TALLOC_FREE(iov_alloc);
665 return NT_STATUS_USER_SESSION_DELETED;
668 tf_iov[0].iov_base = (void *)tf;
669 tf_iov[0].iov_len = tf_len;
670 tf_iov[1].iov_base = (void *)hdr;
671 tf_iov[1].iov_len = enc_len;
673 status = smb2_signing_decrypt_pdu(s->global->decryption_key,
675 if (!NT_STATUS_IS_OK(status)) {
676 TALLOC_FREE(iov_alloc);
680 verified_buflen = taken + enc_len;
685 * We need the header plus the body length field
688 if (len < SMB2_HDR_BODY + 2) {
691 (IVAL(hdr, 0) == SMB_SUICIDE_PACKET) &&
692 lp_parm_bool(-1, "smbd", "suicide mode", false)) {
693 uint8_t exitcode = CVAL(hdr, 4);
694 DBG_WARNING("SUICIDE: Exiting immediately "
695 "with code %"PRIu8"\n",
700 DEBUG(10, ("%d bytes left, expected at least %d\n",
701 (int)len, SMB2_HDR_BODY));
704 if (IVAL(hdr, 0) != SMB2_MAGIC) {
705 DEBUG(10, ("Got non-SMB2 PDU: %x\n",
709 if (SVAL(hdr, 4) != SMB2_HDR_BODY) {
710 DEBUG(10, ("Got HDR len %d, expected %d\n",
711 SVAL(hdr, 4), SMB2_HDR_BODY));
716 next_command_ofs = IVAL(hdr, SMB2_HDR_NEXT_COMMAND);
717 body_size = SVAL(hdr, SMB2_HDR_BODY);
719 if (next_command_ofs != 0) {
720 if (next_command_ofs < (SMB2_HDR_BODY + 2)) {
723 if (next_command_ofs > full_size) {
726 full_size = next_command_ofs;
733 if (body_size > (full_size - SMB2_HDR_BODY)) {
735 * let the caller handle the error
737 body_size = full_size - SMB2_HDR_BODY;
739 body = hdr + SMB2_HDR_BODY;
740 dyn = body + body_size;
741 dyn_size = full_size - (SMB2_HDR_BODY + body_size);
743 if (num_iov >= ARRAY_SIZE(req->in._vector)) {
744 struct iovec *iov_tmp = NULL;
746 iov_tmp = talloc_realloc(mem_ctx, iov_alloc,
749 SMBD_SMB2_NUM_IOV_PER_REQ);
750 if (iov_tmp == NULL) {
751 TALLOC_FREE(iov_alloc);
752 return NT_STATUS_NO_MEMORY;
755 if (iov_alloc == NULL) {
758 sizeof(req->in._vector));
764 num_iov += SMBD_SMB2_NUM_IOV_PER_REQ;
766 cur[SMBD_SMB2_TF_IOV_OFS].iov_base = tf;
767 cur[SMBD_SMB2_TF_IOV_OFS].iov_len = tf_len;
768 cur[SMBD_SMB2_HDR_IOV_OFS].iov_base = hdr;
769 cur[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
770 cur[SMBD_SMB2_BODY_IOV_OFS].iov_base = body;
771 cur[SMBD_SMB2_BODY_IOV_OFS].iov_len = body_size;
772 cur[SMBD_SMB2_DYN_IOV_OFS].iov_base = dyn;
773 cur[SMBD_SMB2_DYN_IOV_OFS].iov_len = dyn_size;
783 if (iov != req->in._vector) {
786 return NT_STATUS_INVALID_PARAMETER;
789 static NTSTATUS smbd_smb2_request_create(struct smbXsrv_connection *xconn,
790 const uint8_t *_inpdu, size_t size,
791 struct smbd_smb2_request **_req)
793 struct smbd_smb2_request *req;
794 uint32_t protocol_version;
795 uint8_t *inpdu = NULL;
796 const uint8_t *inhdr = NULL;
798 uint32_t next_command_ofs;
802 if (size < (SMB2_HDR_BODY + 2)) {
803 DEBUG(0,("Invalid SMB2 packet length count %ld\n", (long)size));
804 return NT_STATUS_INVALID_PARAMETER;
809 protocol_version = IVAL(inhdr, SMB2_HDR_PROTOCOL_ID);
810 if (protocol_version != SMB2_MAGIC) {
811 DEBUG(0,("Invalid SMB packet: protocol prefix: 0x%08X\n",
813 return NT_STATUS_INVALID_PARAMETER;
816 cmd = SVAL(inhdr, SMB2_HDR_OPCODE);
817 if (cmd != SMB2_OP_NEGPROT) {
818 DEBUG(0,("Invalid SMB packet: first request: 0x%04X\n",
820 return NT_STATUS_INVALID_PARAMETER;
823 next_command_ofs = IVAL(inhdr, SMB2_HDR_NEXT_COMMAND);
824 if (next_command_ofs != 0) {
825 DEBUG(0,("Invalid SMB packet: next_command: 0x%08X\n",
827 return NT_STATUS_INVALID_PARAMETER;
830 req = smbd_smb2_request_allocate(xconn);
832 return NT_STATUS_NO_MEMORY;
835 inpdu = talloc_memdup(req, _inpdu, size);
837 return NT_STATUS_NO_MEMORY;
840 req->request_time = timeval_current();
841 now = timeval_to_nttime(&req->request_time);
843 status = smbd_smb2_inbuf_parse_compound(xconn,
847 req, &req->in.vector,
848 &req->in.vector_count);
849 if (!NT_STATUS_IS_OK(status)) {
854 req->current_idx = 1;
860 static bool smb2_validate_sequence_number(struct smbXsrv_connection *xconn,
861 uint64_t message_id, uint64_t seq_id)
863 struct bitmap *credits_bm = xconn->smb2.credits.bitmap;
867 seq_tmp = xconn->smb2.credits.seq_low;
868 if (seq_id < seq_tmp) {
869 DBGC_ERR(DBGC_SMB2_CREDITS,
870 "smb2_validate_sequence_number: bad message_id "
871 "%llu (sequence id %llu) "
872 "(granted = %u, low = %llu, range = %u)\n",
873 (unsigned long long)message_id,
874 (unsigned long long)seq_id,
875 (unsigned int)xconn->smb2.credits.granted,
876 (unsigned long long)xconn->smb2.credits.seq_low,
877 (unsigned int)xconn->smb2.credits.seq_range);
881 seq_tmp += xconn->smb2.credits.seq_range;
882 if (seq_id >= seq_tmp) {
883 DBGC_ERR(DBGC_SMB2_CREDITS,
884 "smb2_validate_sequence_number: bad message_id "
885 "%llu (sequence id %llu) "
886 "(granted = %u, low = %llu, range = %u)\n",
887 (unsigned long long)message_id,
888 (unsigned long long)seq_id,
889 (unsigned int)xconn->smb2.credits.granted,
890 (unsigned long long)xconn->smb2.credits.seq_low,
891 (unsigned int)xconn->smb2.credits.seq_range);
895 offset = seq_id % xconn->smb2.credits.max;
897 if (bitmap_query(credits_bm, offset)) {
898 DBGC_ERR(DBGC_SMB2_CREDITS,
899 "smb2_validate_sequence_number: duplicate message_id "
900 "%llu (sequence id %llu) "
901 "(granted = %u, low = %llu, range = %u) "
903 (unsigned long long)message_id,
904 (unsigned long long)seq_id,
905 (unsigned int)xconn->smb2.credits.granted,
906 (unsigned long long)xconn->smb2.credits.seq_low,
907 (unsigned int)xconn->smb2.credits.seq_range,
912 /* Mark the message_ids as seen in the bitmap. */
913 bitmap_set(credits_bm, offset);
915 if (seq_id != xconn->smb2.credits.seq_low) {
920 * Move the window forward by all the message_id's
923 while (bitmap_query(credits_bm, offset)) {
924 DBGC_DEBUG(DBGC_SMB2_CREDITS,
925 "smb2_validate_sequence_number: clearing "
926 "id %llu (position %u) from bitmap\n",
927 (unsigned long long)(xconn->smb2.credits.seq_low),
929 bitmap_clear(credits_bm, offset);
931 xconn->smb2.credits.seq_low += 1;
932 xconn->smb2.credits.seq_range -= 1;
933 offset = xconn->smb2.credits.seq_low % xconn->smb2.credits.max;
939 static bool smb2_validate_message_id(struct smbXsrv_connection *xconn,
940 const uint8_t *inhdr)
942 uint64_t message_id = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
943 uint16_t opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
944 uint16_t credit_charge = 1;
947 if (opcode == SMB2_OP_CANCEL) {
948 /* SMB2_CANCEL requests by definition resend messageids. */
952 if (xconn->smb2.credits.multicredit) {
953 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
954 credit_charge = MAX(credit_charge, 1);
959 ("smb2_validate_message_id: mid %llu (charge %llu), "
960 "credits_granted %llu, "
961 "seqnum low/range: %llu/%llu\n",
962 (unsigned long long) message_id,
963 (unsigned long long) credit_charge,
964 (unsigned long long) xconn->smb2.credits.granted,
965 (unsigned long long) xconn->smb2.credits.seq_low,
966 (unsigned long long) xconn->smb2.credits.seq_range));
968 if (xconn->smb2.credits.granted < credit_charge) {
969 DBGC_ERR(DBGC_SMB2_CREDITS,
970 "smb2_validate_message_id: client used more "
971 "credits than granted, mid %llu, charge %llu, "
972 "credits_granted %llu, "
973 "seqnum low/range: %llu/%llu\n",
974 (unsigned long long) message_id,
975 (unsigned long long) credit_charge,
976 (unsigned long long) xconn->smb2.credits.granted,
977 (unsigned long long) xconn->smb2.credits.seq_low,
978 (unsigned long long) xconn->smb2.credits.seq_range);
983 * now check the message ids
985 * for multi-credit requests we need to check all current mid plus
986 * the implicit mids caused by the credit charge
987 * e.g. current mid = 15, charge 5 => mark 15-19 as used
990 for (i = 0; i <= (credit_charge-1); i++) {
991 uint64_t id = message_id + i;
996 ("Iterating mid %llu charge %u (sequence %llu)\n",
997 (unsigned long long)message_id,
999 (unsigned long long)id));
1001 ok = smb2_validate_sequence_number(xconn, message_id, id);
1007 /* subtract used credits */
1008 xconn->smb2.credits.granted -= credit_charge;
1013 static NTSTATUS smbd_smb2_request_validate(struct smbd_smb2_request *req)
1018 count = req->in.vector_count;
1020 if (count < 1 + SMBD_SMB2_NUM_IOV_PER_REQ) {
1021 /* It's not a SMB2 request */
1022 return NT_STATUS_INVALID_PARAMETER;
1025 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
1026 struct iovec *hdr = SMBD_SMB2_IDX_HDR_IOV(req,in,idx);
1027 struct iovec *body = SMBD_SMB2_IDX_BODY_IOV(req,in,idx);
1028 const uint8_t *inhdr = NULL;
1030 if (hdr->iov_len != SMB2_HDR_BODY) {
1031 return NT_STATUS_INVALID_PARAMETER;
1034 if (body->iov_len < 2) {
1035 return NT_STATUS_INVALID_PARAMETER;
1038 inhdr = (const uint8_t *)hdr->iov_base;
1040 /* Check the SMB2 header */
1041 if (IVAL(inhdr, SMB2_HDR_PROTOCOL_ID) != SMB2_MAGIC) {
1042 return NT_STATUS_INVALID_PARAMETER;
1045 if (!smb2_validate_message_id(req->xconn, inhdr)) {
1046 return NT_STATUS_INVALID_PARAMETER;
1050 return NT_STATUS_OK;
1053 static void smb2_set_operation_credit(struct smbXsrv_connection *xconn,
1054 const struct iovec *in_vector,
1055 struct iovec *out_vector)
1057 const uint8_t *inhdr = (const uint8_t *)in_vector->iov_base;
1058 uint8_t *outhdr = (uint8_t *)out_vector->iov_base;
1059 uint16_t credit_charge = 1;
1060 uint16_t credits_requested;
1063 NTSTATUS out_status;
1064 uint16_t credits_granted = 0;
1065 uint64_t credits_possible;
1066 uint16_t current_max_credits;
1069 * first we grant only 1/16th of the max range.
1071 * Windows also starts with the 1/16th and then grants
1072 * more later. I was only able to trigger higher
1073 * values, when using a very high credit charge.
1075 * TODO: scale up depending on load, free memory
1077 * Maybe also on the relationship between number
1078 * of requests and the used sequence number.
1079 * Which means we would grant more credits
1080 * for client which use multi credit requests.
1082 * The above is what Windows Server < 2016 is doing,
1083 * but new servers use all credits (8192 by default).
1085 current_max_credits = xconn->smb2.credits.max;
1086 current_max_credits = MAX(current_max_credits, 1);
1088 if (xconn->smb2.credits.multicredit) {
1089 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
1090 credit_charge = MAX(credit_charge, 1);
1093 cmd = SVAL(inhdr, SMB2_HDR_OPCODE);
1094 credits_requested = SVAL(inhdr, SMB2_HDR_CREDIT);
1095 credits_requested = MAX(credits_requested, 1);
1096 out_flags = IVAL(outhdr, SMB2_HDR_FLAGS);
1097 out_status = NT_STATUS(IVAL(outhdr, SMB2_HDR_STATUS));
1099 SMB_ASSERT(xconn->smb2.credits.max >= xconn->smb2.credits.granted);
1101 if (xconn->smb2.credits.max < credit_charge) {
1102 smbd_server_connection_terminate(xconn,
1103 "client error: credit charge > max credits\n");
1107 if (out_flags & SMB2_HDR_FLAG_ASYNC) {
1109 * In case we already send an async interim
1110 * response, we should not grant
1111 * credits on the final response.
1113 credits_granted = 0;
1115 uint16_t additional_possible =
1116 xconn->smb2.credits.max - credit_charge;
1117 uint16_t additional_max = 0;
1118 uint16_t additional_credits = credits_requested - 1;
1121 case SMB2_OP_NEGPROT:
1123 case SMB2_OP_SESSSETUP:
1125 * Windows 2012 RC1 starts to grant
1126 * additional credits
1127 * with a successful session setup
1129 if (NT_STATUS_IS_OK(out_status)) {
1130 additional_max = xconn->smb2.credits.max;
1135 * Windows Server < 2016 and older Samba versions
1136 * used to only grant additional credits in
1137 * chunks of 32 credits.
1139 * But we match Windows Server 2016 and grant
1140 * all credits as requested.
1142 additional_max = xconn->smb2.credits.max;
1146 additional_max = MIN(additional_max, additional_possible);
1147 additional_credits = MIN(additional_credits, additional_max);
1149 credits_granted = credit_charge + additional_credits;
1153 * sequence numbers should not wrap
1155 * 1. calculate the possible credits until
1156 * the sequence numbers start to wrap on 64-bit.
1158 * 2. UINT64_MAX is used for Break Notifications.
1160 * 2. truncate the possible credits to the maximum
1161 * credits we want to grant to the client in total.
1163 * 3. remove the range we'll already granted to the client
1164 * this makes sure the client consumes the lowest sequence
1165 * number, before we can grant additional credits.
1167 credits_possible = UINT64_MAX - xconn->smb2.credits.seq_low;
1168 if (credits_possible > 0) {
1169 /* remove UINT64_MAX */
1170 credits_possible -= 1;
1172 credits_possible = MIN(credits_possible, current_max_credits);
1173 credits_possible -= xconn->smb2.credits.seq_range;
1175 credits_granted = MIN(credits_granted, credits_possible);
1177 SSVAL(outhdr, SMB2_HDR_CREDIT, credits_granted);
1178 xconn->smb2.credits.granted += credits_granted;
1179 xconn->smb2.credits.seq_range += credits_granted;
1181 DBGC_DEBUG(DBGC_SMB2_CREDITS,
1182 "smb2_set_operation_credit: requested %u, charge %u, "
1183 "granted %u, current possible/max %u/%u, "
1184 "total granted/max/low/range %u/%u/%llu/%u\n",
1185 (unsigned int)credits_requested,
1186 (unsigned int)credit_charge,
1187 (unsigned int)credits_granted,
1188 (unsigned int)credits_possible,
1189 (unsigned int)current_max_credits,
1190 (unsigned int)xconn->smb2.credits.granted,
1191 (unsigned int)xconn->smb2.credits.max,
1192 (unsigned long long)xconn->smb2.credits.seq_low,
1193 (unsigned int)xconn->smb2.credits.seq_range);
1196 static void smb2_calculate_credits(const struct smbd_smb2_request *inreq,
1197 struct smbd_smb2_request *outreq)
1200 uint16_t total_credits = 0;
1202 count = outreq->out.vector_count;
1204 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
1205 struct iovec *inhdr_v = SMBD_SMB2_IDX_HDR_IOV(inreq,in,idx);
1206 struct iovec *outhdr_v = SMBD_SMB2_IDX_HDR_IOV(outreq,out,idx);
1207 uint8_t *outhdr = (uint8_t *)outhdr_v->iov_base;
1209 smb2_set_operation_credit(outreq->xconn, inhdr_v, outhdr_v);
1211 /* To match Windows, count up what we
1213 total_credits += SVAL(outhdr, SMB2_HDR_CREDIT);
1214 /* Set to zero in all but the last reply. */
1215 if (idx + SMBD_SMB2_NUM_IOV_PER_REQ < count) {
1216 SSVAL(outhdr, SMB2_HDR_CREDIT, 0);
1218 SSVAL(outhdr, SMB2_HDR_CREDIT, total_credits);
1223 DATA_BLOB smbd_smb2_generate_outbody(struct smbd_smb2_request *req, size_t size)
1225 if (req->current_idx <= 1) {
1226 if (size <= sizeof(req->out._body)) {
1227 return data_blob_const(req->out._body, size);
1231 return data_blob_talloc(req, NULL, size);
1234 static NTSTATUS smbd_smb2_request_setup_out(struct smbd_smb2_request *req)
1236 struct smbXsrv_connection *xconn = req->xconn;
1237 TALLOC_CTX *mem_ctx;
1238 struct iovec *vector;
1243 count = req->in.vector_count;
1244 if (count <= ARRAY_SIZE(req->out._vector)) {
1246 vector = req->out._vector;
1248 vector = talloc_zero_array(req, struct iovec, count);
1249 if (vector == NULL) {
1250 return NT_STATUS_NO_MEMORY;
1255 vector[0].iov_base = req->out.nbt_hdr;
1256 vector[0].iov_len = 4;
1257 SIVAL(req->out.nbt_hdr, 0, 0);
1259 for (idx=1; idx < count; idx += SMBD_SMB2_NUM_IOV_PER_REQ) {
1260 struct iovec *inhdr_v = SMBD_SMB2_IDX_HDR_IOV(req,in,idx);
1261 const uint8_t *inhdr = (const uint8_t *)inhdr_v->iov_base;
1262 uint8_t *outhdr = NULL;
1263 uint8_t *outbody = NULL;
1264 uint32_t next_command_ofs = 0;
1265 struct iovec *current = &vector[idx];
1267 if ((idx + SMBD_SMB2_NUM_IOV_PER_REQ) < count) {
1268 /* we have a next command -
1269 * setup for the error case. */
1270 next_command_ofs = SMB2_HDR_BODY + 9;
1274 outhdr = req->out._hdr;
1276 outhdr = talloc_zero_array(mem_ctx, uint8_t,
1278 if (outhdr == NULL) {
1279 return NT_STATUS_NO_MEMORY;
1283 outbody = outhdr + SMB2_HDR_BODY;
1286 * SMBD_SMB2_TF_IOV_OFS might be used later
1288 current[SMBD_SMB2_TF_IOV_OFS].iov_base = NULL;
1289 current[SMBD_SMB2_TF_IOV_OFS].iov_len = 0;
1291 current[SMBD_SMB2_HDR_IOV_OFS].iov_base = (void *)outhdr;
1292 current[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
1294 current[SMBD_SMB2_BODY_IOV_OFS].iov_base = (void *)outbody;
1295 current[SMBD_SMB2_BODY_IOV_OFS].iov_len = 8;
1297 current[SMBD_SMB2_DYN_IOV_OFS].iov_base = NULL;
1298 current[SMBD_SMB2_DYN_IOV_OFS].iov_len = 0;
1300 /* setup the SMB2 header */
1301 SIVAL(outhdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
1302 SSVAL(outhdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
1303 SSVAL(outhdr, SMB2_HDR_CREDIT_CHARGE,
1304 SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE));
1305 SIVAL(outhdr, SMB2_HDR_STATUS,
1306 NT_STATUS_V(NT_STATUS_INTERNAL_ERROR));
1307 SSVAL(outhdr, SMB2_HDR_OPCODE,
1308 SVAL(inhdr, SMB2_HDR_OPCODE));
1309 SIVAL(outhdr, SMB2_HDR_FLAGS,
1310 IVAL(inhdr, SMB2_HDR_FLAGS) | SMB2_HDR_FLAG_REDIRECT);
1311 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, next_command_ofs);
1312 SBVAL(outhdr, SMB2_HDR_MESSAGE_ID,
1313 BVAL(inhdr, SMB2_HDR_MESSAGE_ID));
1314 SIVAL(outhdr, SMB2_HDR_PID,
1315 IVAL(inhdr, SMB2_HDR_PID));
1316 SIVAL(outhdr, SMB2_HDR_TID,
1317 IVAL(inhdr, SMB2_HDR_TID));
1318 SBVAL(outhdr, SMB2_HDR_SESSION_ID,
1319 BVAL(inhdr, SMB2_HDR_SESSION_ID));
1320 memcpy(outhdr + SMB2_HDR_SIGNATURE,
1321 inhdr + SMB2_HDR_SIGNATURE, 16);
1323 /* setup error body header */
1324 SSVAL(outbody, 0x00, 0x08 + 1);
1325 SSVAL(outbody, 0x02, 0);
1326 SIVAL(outbody, 0x04, 0);
1329 req->out.vector = vector;
1330 req->out.vector_count = count;
1332 /* setup the length of the NBT packet */
1333 ok = smb2_setup_nbt_length(req->out.vector, req->out.vector_count);
1335 return NT_STATUS_INVALID_PARAMETER_MIX;
1338 DLIST_ADD_END(xconn->smb2.requests, req);
1340 return NT_STATUS_OK;
1343 bool smbXsrv_server_multi_channel_enabled(void)
1345 bool enabled = lp_server_multi_channel_support();
1346 #ifndef __ALLOW_MULTI_CHANNEL_SUPPORT
1347 bool forced = false;
1348 struct loadparm_context *lp_ctx = loadparm_init_s3(NULL, loadparm_s3_helpers());
1349 bool unspecified = lpcfg_parm_is_unspecified(lp_ctx, "server multi channel support");
1354 * If we don't have support from the kernel
1355 * to ask for the un-acked number of bytes
1356 * in the socket send queue, we better
1357 * don't support multi-channel.
1359 forced = lp_parm_bool(-1, "force", "server multi channel support", false);
1360 if (enabled && !forced) {
1361 D_NOTICE("'server multi channel support' enabled "
1362 "but not supported on %s (%s)\n",
1363 SYSTEM_UNAME_SYSNAME, SYSTEM_UNAME_RELEASE);
1364 DEBUGADD(DBGLVL_NOTICE, ("Please report this on "
1365 "https://bugzilla.samba.org/show_bug.cgi?id=11897\n"));
1368 TALLOC_FREE(lp_ctx);
1369 #endif /* ! __ALLOW_MULTI_CHANNEL_SUPPORT */
1373 static NTSTATUS smbXsrv_connection_get_rto_usecs(struct smbXsrv_connection *xconn,
1374 uint32_t *_rto_usecs)
1377 * Define an Retransmission Timeout
1378 * of 1 second, if there's no way for the
1379 * kernel to tell us the current value.
1381 uint32_t rto_usecs = 1000000;
1383 #ifdef __HAVE_TCP_INFO_RTO
1385 struct tcp_info info;
1386 socklen_t ilen = sizeof(info);
1390 ret = getsockopt(xconn->transport.sock,
1391 IPPROTO_TCP, TCP_INFO,
1392 (void *)&info, &ilen);
1394 int saved_errno = errno;
1395 NTSTATUS status = map_nt_error_from_unix(errno);
1396 DBG_ERR("getsockopt(TCP_INFO) errno[%d/%s] -s %s\n",
1397 saved_errno, strerror(saved_errno),
1402 DBG_DEBUG("tcpi_rto[%u] tcpi_rtt[%u] tcpi_rttvar[%u]\n",
1403 (unsigned)info.tcpi_rto,
1404 (unsigned)info.tcpi_rtt,
1405 (unsigned)info.tcpi_rttvar);
1406 rto_usecs = info.tcpi_rto;
1408 #endif /* __HAVE_TCP_INFO_RTO */
1410 rto_usecs = MAX(rto_usecs, 200000); /* at least 0.2s */
1411 rto_usecs = MIN(rto_usecs, 1000000); /* at max 1.0s */
1412 *_rto_usecs = rto_usecs;
1413 return NT_STATUS_OK;
1416 static NTSTATUS smbXsrv_connection_get_acked_bytes(struct smbXsrv_connection *xconn,
1417 uint64_t *_acked_bytes)
1420 * Unless the kernel has an interface
1421 * to reveal the number of un-acked bytes
1422 * in the socket send queue, we'll assume
1423 * everything is already acked.
1425 * But that would mean that we better don't
1426 * pretent to support multi-channel.
1428 uint64_t unacked_bytes = 0;
1432 if (xconn->ack.force_unacked_timeout) {
1434 * Smbtorture tries to test channel failures...
1435 * Just pretend nothing was acked...
1437 DBG_INFO("Simulating channel failure: "
1438 "xconn->ack.unacked_bytes[%llu]\n",
1439 (unsigned long long)xconn->ack.unacked_bytes);
1440 return NT_STATUS_OK;
1443 #ifdef __IOCTL_SEND_QUEUE_SIZE_OPCODE
1449 * If we have kernel support to get
1450 * the number of bytes waiting in
1451 * the socket's send queue, we
1452 * use that in order to find out
1453 * the number of unacked bytes.
1455 ret = ioctl(xconn->transport.sock,
1456 __IOCTL_SEND_QUEUE_SIZE_OPCODE,
1459 int saved_errno = errno;
1460 NTSTATUS status = map_nt_error_from_unix(saved_errno);
1461 DBG_ERR("Failed to get the SEND_QUEUE_SIZE - "
1462 "errno %d (%s) - %s\n",
1463 saved_errno, strerror(saved_errno),
1469 DBG_ERR("xconn->ack.unacked_bytes[%llu] value[%d]\n",
1470 (unsigned long long)xconn->ack.unacked_bytes,
1472 return NT_STATUS_INTERNAL_ERROR;
1474 unacked_bytes = value;
1477 if (xconn->ack.unacked_bytes == 0) {
1478 xconn->ack.unacked_bytes = unacked_bytes;
1479 return NT_STATUS_OK;
1482 if (xconn->ack.unacked_bytes < unacked_bytes) {
1483 DBG_ERR("xconn->ack.unacked_bytes[%llu] unacked_bytes[%llu]\n",
1484 (unsigned long long)xconn->ack.unacked_bytes,
1485 (unsigned long long)unacked_bytes);
1486 return NT_STATUS_INTERNAL_ERROR;
1489 *_acked_bytes = xconn->ack.unacked_bytes - unacked_bytes;
1490 xconn->ack.unacked_bytes = unacked_bytes;
1491 return NT_STATUS_OK;
1494 static void smbd_smb2_send_queue_ack_fail(struct smbd_smb2_send_queue **queue,
1497 struct smbd_smb2_send_queue *e = NULL;
1498 struct smbd_smb2_send_queue *n = NULL;
1500 for (e = *queue; e != NULL; e = n) {
1503 DLIST_REMOVE(*queue, e);
1504 if (e->ack.req != NULL) {
1505 tevent_req_nterror(e->ack.req, status);
1510 static NTSTATUS smbd_smb2_send_queue_ack_bytes(struct smbd_smb2_send_queue **queue,
1511 uint64_t acked_bytes)
1513 struct smbd_smb2_send_queue *e = NULL;
1514 struct smbd_smb2_send_queue *n = NULL;
1516 for (e = *queue; e != NULL; e = n) {
1521 if (e->ack.req == NULL) {
1525 if (e->ack.required_acked_bytes <= acked_bytes) {
1526 e->ack.required_acked_bytes = 0;
1527 DLIST_REMOVE(*queue, e);
1528 tevent_req_done(e->ack.req);
1531 e->ack.required_acked_bytes -= acked_bytes;
1533 expired = timeval_expired(&e->ack.timeout);
1535 return NT_STATUS_IO_TIMEOUT;
1539 return NT_STATUS_OK;
1542 static NTSTATUS smbd_smb2_check_ack_queue(struct smbXsrv_connection *xconn)
1544 uint64_t acked_bytes = 0;
1547 status = smbXsrv_connection_get_acked_bytes(xconn, &acked_bytes);
1548 if (!NT_STATUS_IS_OK(status)) {
1552 status = smbd_smb2_send_queue_ack_bytes(&xconn->ack.queue, acked_bytes);
1553 if (!NT_STATUS_IS_OK(status)) {
1557 status = smbd_smb2_send_queue_ack_bytes(&xconn->smb2.send_queue, 0);
1558 if (!NT_STATUS_IS_OK(status)) {
1562 return NT_STATUS_OK;
1565 static void smbXsrv_connection_ack_checker(struct tevent_req *subreq)
1567 struct smbXsrv_connection *xconn =
1568 tevent_req_callback_data(subreq,
1569 struct smbXsrv_connection);
1570 struct smbXsrv_client *client = xconn->client;
1571 struct timeval next_check;
1575 xconn->ack.checker_subreq = NULL;
1577 ok = tevent_wakeup_recv(subreq);
1578 TALLOC_FREE(subreq);
1580 smbd_server_connection_terminate(xconn,
1581 "tevent_wakeup_recv() failed");
1585 status = smbd_smb2_check_ack_queue(xconn);
1586 if (!NT_STATUS_IS_OK(status)) {
1587 smbd_server_connection_terminate(xconn, nt_errstr(status));
1591 next_check = timeval_current_ofs_usec(xconn->ack.rto_usecs);
1592 xconn->ack.checker_subreq = tevent_wakeup_send(xconn,
1595 if (xconn->ack.checker_subreq == NULL) {
1596 smbd_server_connection_terminate(xconn,
1597 "tevent_wakeup_send() failed");
1600 tevent_req_set_callback(xconn->ack.checker_subreq,
1601 smbXsrv_connection_ack_checker,
1605 static NTSTATUS smbXsrv_client_pending_breaks_updated(struct smbXsrv_client *client)
1607 struct smbXsrv_connection *xconn = NULL;
1609 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
1610 struct timeval next_check;
1611 uint64_t acked_bytes = 0;
1615 * A new 'pending break cycle' starts
1616 * with a first pending break and lasts until
1617 * all pending breaks are finished.
1619 * This is typically a very short time,
1620 * the value of one retransmission timeout.
1623 if (client->pending_breaks == NULL) {
1625 * No more pending breaks, remove a pending
1628 TALLOC_FREE(xconn->ack.checker_subreq);
1632 if (xconn->ack.checker_subreq != NULL) {
1634 * The cycle already started =>
1641 * Get the current retransmission timeout value.
1643 * It may change over time, but fetching it once
1644 * per 'pending break' cycled should be enough.
1646 status = smbXsrv_connection_get_rto_usecs(xconn,
1647 &xconn->ack.rto_usecs);
1648 if (!NT_STATUS_IS_OK(status)) {
1653 * At the start of the cycle we reset the
1654 * unacked_bytes counter (first to 0 and
1655 * within smbXsrv_connection_get_acked_bytes()
1656 * to the current value in the kernel
1659 xconn->ack.unacked_bytes = 0;
1660 status = smbXsrv_connection_get_acked_bytes(xconn, &acked_bytes);
1661 if (!NT_STATUS_IS_OK(status)) {
1666 * We setup a timer in order to check for
1667 * acked bytes after one retransmission timeout.
1669 * The code that sets up the send_queue.ack.timeout
1670 * uses a multiple of the retransmission timeout.
1672 next_check = timeval_current_ofs_usec(xconn->ack.rto_usecs);
1673 xconn->ack.checker_subreq = tevent_wakeup_send(xconn,
1676 if (xconn->ack.checker_subreq == NULL) {
1677 return NT_STATUS_NO_MEMORY;
1679 tevent_req_set_callback(xconn->ack.checker_subreq,
1680 smbXsrv_connection_ack_checker,
1684 return NT_STATUS_OK;
1687 void smbXsrv_connection_disconnect_transport(struct smbXsrv_connection *xconn,
1690 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
1694 xconn->transport.status = status;
1695 TALLOC_FREE(xconn->transport.fde);
1696 if (xconn->transport.sock != -1) {
1697 xconn->transport.sock = -1;
1699 smbd_smb2_send_queue_ack_fail(&xconn->ack.queue, status);
1700 smbd_smb2_send_queue_ack_fail(&xconn->smb2.send_queue, status);
1701 xconn->smb2.send_queue_len = 0;
1702 DO_PROFILE_INC(disconnect);
1705 size_t smbXsrv_client_valid_connections(struct smbXsrv_client *client)
1707 struct smbXsrv_connection *xconn = NULL;
1710 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
1711 if (NT_STATUS_IS_OK(xconn->transport.status)) {
1719 struct smbXsrv_connection_shutdown_state {
1720 struct smbXsrv_connection *xconn;
1723 static void smbXsrv_connection_shutdown_wait_done(struct tevent_req *subreq);
1725 static struct tevent_req *smbXsrv_connection_shutdown_send(TALLOC_CTX *mem_ctx,
1726 struct tevent_context *ev,
1727 struct smbXsrv_connection *xconn)
1729 struct tevent_req *req = NULL;
1730 struct smbXsrv_connection_shutdown_state *state = NULL;
1731 struct tevent_req *subreq = NULL;
1733 struct smbd_smb2_request *preq = NULL;
1737 * The caller should have called
1738 * smbXsrv_connection_disconnect_transport() before.
1740 SMB_ASSERT(!NT_STATUS_IS_OK(xconn->transport.status));
1741 SMB_ASSERT(xconn->transport.terminating);
1742 SMB_ASSERT(xconn->transport.shutdown_wait_queue == NULL);
1744 req = tevent_req_create(mem_ctx, &state,
1745 struct smbXsrv_connection_shutdown_state);
1750 state->xconn = xconn;
1751 tevent_req_defer_callback(req, ev);
1753 xconn->transport.shutdown_wait_queue =
1754 tevent_queue_create(state, "smbXsrv_connection_shutdown_queue");
1755 if (tevent_req_nomem(xconn->transport.shutdown_wait_queue, req)) {
1756 return tevent_req_post(req, ev);
1759 for (preq = xconn->smb2.requests; preq != NULL; preq = preq->next) {
1761 * Now wait until the request is finished.
1763 * We don't set a callback, as we just want to block the
1764 * wait queue and the talloc_free() of the request will
1765 * remove the item from the wait queue.
1767 * Note that we don't cancel the requests here
1768 * in order to keep the replay detection logic correct.
1770 * However if we teardown the last channel of
1771 * a connection, we'll call some logic via
1772 * smbXsrv_session_disconnect_xconn()
1773 * -> smbXsrv_session_disconnect_xconn_callback()
1774 * -> smbXsrv_session_remove_channel()
1775 * -> smb2srv_session_shutdown_send()
1776 * will indeed cancel the request.
1778 subreq = tevent_queue_wait_send(preq, ev,
1779 xconn->transport.shutdown_wait_queue);
1780 if (tevent_req_nomem(subreq, req)) {
1781 return tevent_req_post(req, ev);
1786 * This may attach sessions with num_channels == 0
1787 * to xconn->transport.shutdown_wait_queue.
1789 status = smbXsrv_session_disconnect_xconn(xconn);
1790 if (tevent_req_nterror(req, status)) {
1791 return tevent_req_post(req, ev);
1794 len = tevent_queue_length(xconn->transport.shutdown_wait_queue);
1796 tevent_req_done(req);
1797 return tevent_req_post(req, ev);
1801 * Now we add our own waiter to the end of the queue,
1802 * this way we get notified when all pending requests are finished
1803 * and send to the socket.
1805 subreq = tevent_queue_wait_send(state, ev, xconn->transport.shutdown_wait_queue);
1806 if (tevent_req_nomem(subreq, req)) {
1807 return tevent_req_post(req, ev);
1809 tevent_req_set_callback(subreq, smbXsrv_connection_shutdown_wait_done, req);
1814 static void smbXsrv_connection_shutdown_wait_done(struct tevent_req *subreq)
1816 struct tevent_req *req =
1817 tevent_req_callback_data(subreq,
1819 struct smbXsrv_connection_shutdown_state *state =
1820 tevent_req_data(req,
1821 struct smbXsrv_connection_shutdown_state);
1822 struct smbXsrv_connection *xconn = state->xconn;
1824 tevent_queue_wait_recv(subreq);
1825 TALLOC_FREE(subreq);
1827 tevent_req_done(req);
1829 * make sure the xconn pointer is still valid,
1830 * it should as we used tevent_req_defer_callback()
1832 SMB_ASSERT(xconn->transport.terminating);
1835 static NTSTATUS smbXsrv_connection_shutdown_recv(struct tevent_req *req)
1837 struct smbXsrv_connection_shutdown_state *state =
1838 tevent_req_data(req,
1839 struct smbXsrv_connection_shutdown_state);
1840 struct smbXsrv_connection *xconn = state->xconn;
1842 * make sure the xconn pointer is still valid,
1843 * it should as we used tevent_req_defer_callback()
1845 SMB_ASSERT(xconn->transport.terminating);
1846 return tevent_req_simple_recv_ntstatus(req);
1849 static void smbd_server_connection_terminate_done(struct tevent_req *subreq)
1851 struct smbXsrv_connection *xconn =
1852 tevent_req_callback_data(subreq,
1853 struct smbXsrv_connection);
1854 struct smbXsrv_client *client = xconn->client;
1857 status = smbXsrv_connection_shutdown_recv(subreq);
1858 TALLOC_FREE(subreq);
1859 if (!NT_STATUS_IS_OK(status)) {
1860 exit_server("smbXsrv_connection_shutdown_recv failed");
1863 DLIST_REMOVE(client->connections, xconn);
1867 void smbd_server_connection_terminate_ex(struct smbXsrv_connection *xconn,
1869 const char *location)
1871 struct smbXsrv_client *client = xconn->client;
1875 * Make sure that no new request will be able to use this session.
1877 * smbXsrv_connection_disconnect_transport() might be called already,
1878 * but calling it again is a no-op.
1880 smbXsrv_connection_disconnect_transport(xconn,
1881 NT_STATUS_CONNECTION_DISCONNECTED);
1883 num_ok = smbXsrv_client_valid_connections(client);
1885 if (xconn->transport.terminating) {
1886 DBG_DEBUG("skip recursion conn[%s] num_ok[%zu] reason[%s] at %s\n",
1887 smbXsrv_connection_dbg(xconn), num_ok,
1891 xconn->transport.terminating = true;
1893 DBG_DEBUG("conn[%s] num_ok[%zu] reason[%s] at %s\n",
1894 smbXsrv_connection_dbg(xconn), num_ok,
1897 if (xconn->has_cluster_movable_ip) {
1899 * If the connection has a movable cluster public address
1900 * we disconnect all client connections,
1901 * as the public address might be moved to
1904 * In future we may recheck which node currently
1905 * holds this address, but for now we keep it simple.
1907 smbd_server_disconnect_client_ex(xconn->client,
1914 struct tevent_req *subreq = NULL;
1916 subreq = smbXsrv_connection_shutdown_send(client,
1919 if (subreq == NULL) {
1920 exit_server("smbXsrv_connection_shutdown_send failed");
1922 tevent_req_set_callback(subreq,
1923 smbd_server_connection_terminate_done,
1929 * The last connection was disconnected
1931 exit_server_cleanly(reason);
1934 void smbd_server_disconnect_client_ex(struct smbXsrv_client *client,
1936 const char *location)
1940 num_ok = smbXsrv_client_valid_connections(client);
1942 DBG_WARNING("client[%s] num_ok[%zu] reason[%s] at %s\n",
1943 client->global->remote_address, num_ok,
1947 * Something bad happened we need to disconnect all connections.
1949 exit_server_cleanly(reason);
1952 static bool dup_smb2_vec4(TALLOC_CTX *ctx,
1953 struct iovec *outvec,
1954 const struct iovec *srcvec)
1956 const uint8_t *srctf;
1958 const uint8_t *srchdr;
1960 const uint8_t *srcbody;
1962 const uint8_t *expected_srcbody;
1963 const uint8_t *srcdyn;
1965 const uint8_t *expected_srcdyn;
1971 srctf = (const uint8_t *)srcvec[SMBD_SMB2_TF_IOV_OFS].iov_base;
1972 srctf_len = srcvec[SMBD_SMB2_TF_IOV_OFS].iov_len;
1973 srchdr = (const uint8_t *)srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base;
1974 srchdr_len = srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_len;
1975 srcbody = (const uint8_t *)srcvec[SMBD_SMB2_BODY_IOV_OFS].iov_base;
1976 srcbody_len = srcvec[SMBD_SMB2_BODY_IOV_OFS].iov_len;
1977 expected_srcbody = srchdr + SMB2_HDR_BODY;
1978 srcdyn = (const uint8_t *)srcvec[SMBD_SMB2_DYN_IOV_OFS].iov_base;
1979 srcdyn_len = srcvec[SMBD_SMB2_DYN_IOV_OFS].iov_len;
1980 expected_srcdyn = srcbody + 8;
1982 if ((srctf_len != SMB2_TF_HDR_SIZE) && (srctf_len != 0)) {
1986 if (srchdr_len != SMB2_HDR_BODY) {
1990 if (srctf_len == SMB2_TF_HDR_SIZE) {
1991 dsttf = talloc_memdup(ctx, srctf, SMB2_TF_HDR_SIZE);
1992 if (dsttf == NULL) {
1998 outvec[SMBD_SMB2_TF_IOV_OFS].iov_base = (void *)dsttf;
1999 outvec[SMBD_SMB2_TF_IOV_OFS].iov_len = srctf_len;
2001 /* vec[SMBD_SMB2_HDR_IOV_OFS] is always boilerplate and must
2002 * be allocated with size OUTVEC_ALLOC_SIZE. */
2004 dsthdr = talloc_memdup(ctx, srchdr, OUTVEC_ALLOC_SIZE);
2005 if (dsthdr == NULL) {
2008 outvec[SMBD_SMB2_HDR_IOV_OFS].iov_base = (void *)dsthdr;
2009 outvec[SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
2012 * If this is a "standard" vec[SMBD_SMB2_BOFY_IOV_OFS] of length 8,
2013 * pointing to srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base + SMB2_HDR_BODY,
2014 * then duplicate this. Else use talloc_memdup().
2017 if ((srcbody == expected_srcbody) && (srcbody_len == 8)) {
2018 dstbody = dsthdr + SMB2_HDR_BODY;
2020 dstbody = talloc_memdup(ctx, srcbody, srcbody_len);
2021 if (dstbody == NULL) {
2025 outvec[SMBD_SMB2_BODY_IOV_OFS].iov_base = (void *)dstbody;
2026 outvec[SMBD_SMB2_BODY_IOV_OFS].iov_len = srcbody_len;
2029 * If this is a "standard" vec[SMBD_SMB2_DYN_IOV_OFS] of length 1,
2031 * srcvec[SMBD_SMB2_HDR_IOV_OFS].iov_base + 8
2032 * then duplicate this. Else use talloc_memdup().
2035 if ((srcdyn == expected_srcdyn) && (srcdyn_len == 1)) {
2036 dstdyn = dsthdr + SMB2_HDR_BODY + 8;
2037 } else if (srcdyn == NULL) {
2040 dstdyn = talloc_memdup(ctx, srcdyn, srcdyn_len);
2041 if (dstdyn == NULL) {
2045 outvec[SMBD_SMB2_DYN_IOV_OFS].iov_base = (void *)dstdyn;
2046 outvec[SMBD_SMB2_DYN_IOV_OFS].iov_len = srcdyn_len;
2051 static struct smbd_smb2_request *dup_smb2_req(const struct smbd_smb2_request *req)
2053 struct smbd_smb2_request *newreq = NULL;
2054 struct iovec *outvec = NULL;
2055 int count = req->out.vector_count;
2059 newreq = smbd_smb2_request_allocate(req->xconn);
2064 newreq->session = req->session;
2065 newreq->do_encryption = req->do_encryption;
2066 newreq->do_signing = req->do_signing;
2067 newreq->current_idx = req->current_idx;
2069 outvec = talloc_zero_array(newreq, struct iovec, count);
2071 TALLOC_FREE(newreq);
2074 newreq->out.vector = outvec;
2075 newreq->out.vector_count = count;
2077 /* Setup the outvec's identically to req. */
2078 outvec[0].iov_base = newreq->out.nbt_hdr;
2079 outvec[0].iov_len = 4;
2080 memcpy(newreq->out.nbt_hdr, req->out.nbt_hdr, 4);
2082 /* Setup the vectors identically to the ones in req. */
2083 for (i = 1; i < count; i += SMBD_SMB2_NUM_IOV_PER_REQ) {
2084 if (!dup_smb2_vec4(outvec, &outvec[i], &req->out.vector[i])) {
2091 TALLOC_FREE(newreq);
2095 ok = smb2_setup_nbt_length(newreq->out.vector,
2096 newreq->out.vector_count);
2098 TALLOC_FREE(newreq);
2105 static NTSTATUS smb2_send_async_interim_response(const struct smbd_smb2_request *req)
2107 struct smbXsrv_connection *xconn = req->xconn;
2109 struct iovec *firsttf = NULL;
2110 struct iovec *outhdr_v = NULL;
2111 uint8_t *outhdr = NULL;
2112 struct smbd_smb2_request *nreq = NULL;
2116 /* Create a new smb2 request we'll use
2117 for the interim return. */
2118 nreq = dup_smb2_req(req);
2120 return NT_STATUS_NO_MEMORY;
2123 /* Lose the last X out vectors. They're the
2124 ones we'll be using for the async reply. */
2125 nreq->out.vector_count -= SMBD_SMB2_NUM_IOV_PER_REQ;
2127 ok = smb2_setup_nbt_length(nreq->out.vector,
2128 nreq->out.vector_count);
2130 return NT_STATUS_INVALID_PARAMETER_MIX;
2133 /* Step back to the previous reply. */
2134 nreq->current_idx -= SMBD_SMB2_NUM_IOV_PER_REQ;
2135 firsttf = SMBD_SMB2_IDX_TF_IOV(nreq,out,first_idx);
2136 outhdr_v = SMBD_SMB2_OUT_HDR_IOV(nreq);
2137 outhdr = SMBD_SMB2_OUT_HDR_PTR(nreq);
2138 /* And end the chain. */
2139 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, 0);
2141 /* Calculate outgoing credits */
2142 smb2_calculate_credits(req, nreq);
2144 if (DEBUGLEVEL >= 10) {
2145 dbgtext("smb2_send_async_interim_response: nreq->current_idx = %u\n",
2146 (unsigned int)nreq->current_idx );
2147 dbgtext("smb2_send_async_interim_response: returning %u vectors\n",
2148 (unsigned int)nreq->out.vector_count );
2149 print_req_vectors(nreq);
2153 * As we have changed the header (SMB2_HDR_NEXT_COMMAND),
2154 * we need to sign/encrypt here with the last/first key we remembered
2156 if (firsttf->iov_len == SMB2_TF_HDR_SIZE) {
2157 status = smb2_signing_encrypt_pdu(req->first_enc_key,
2159 nreq->out.vector_count - first_idx);
2160 if (!NT_STATUS_IS_OK(status)) {
2163 } else if (smb2_signing_key_valid(req->last_sign_key)) {
2164 status = smb2_signing_sign_pdu(req->last_sign_key,
2166 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
2167 if (!NT_STATUS_IS_OK(status)) {
2172 nreq->queue_entry.mem_ctx = nreq;
2173 nreq->queue_entry.vector = nreq->out.vector;
2174 nreq->queue_entry.count = nreq->out.vector_count;
2175 nreq->queue_entry.xconn = xconn;
2176 DLIST_ADD_END(xconn->smb2.send_queue, &nreq->queue_entry);
2177 xconn->smb2.send_queue_len++;
2179 status = smbd_smb2_flush_send_queue(xconn);
2180 if (!NT_STATUS_IS_OK(status)) {
2184 return NT_STATUS_OK;
2187 struct smbd_smb2_request_pending_state {
2188 struct smbd_smb2_send_queue queue_entry;
2189 uint8_t buf[NBT_HDR_SIZE + SMB2_TF_HDR_SIZE + SMB2_HDR_BODY + 0x08 + 1];
2190 struct iovec vector[1 + SMBD_SMB2_NUM_IOV_PER_REQ];
2193 static void smbd_smb2_request_pending_timer(struct tevent_context *ev,
2194 struct tevent_timer *te,
2195 struct timeval current_time,
2196 void *private_data);
2198 NTSTATUS smbd_smb2_request_pending_queue(struct smbd_smb2_request *req,
2199 struct tevent_req *subreq,
2200 uint32_t defer_time)
2203 struct timeval defer_endtime;
2204 uint8_t *outhdr = NULL;
2207 if (!tevent_req_is_in_progress(subreq)) {
2209 * This is a performance optimization,
2210 * it avoids one tevent_loop iteration,
2211 * which means we avoid one
2212 * talloc_stackframe_pool/talloc_free pair.
2214 tevent_req_notify_callback(subreq);
2215 return NT_STATUS_OK;
2218 req->subreq = subreq;
2221 if (req->async_te) {
2222 /* We're already async. */
2223 return NT_STATUS_OK;
2226 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2227 flags = IVAL(outhdr, SMB2_HDR_FLAGS);
2228 if (flags & SMB2_HDR_FLAG_ASYNC) {
2229 /* We're already async. */
2230 return NT_STATUS_OK;
2233 if (req->async_internal || defer_time == 0) {
2235 * An SMB2 request implementation wants to handle the request
2236 * asynchronously "internally" while keeping synchronous
2237 * behaviour for the SMB2 request. This means we don't send an
2238 * interim response and we can allow processing of compound SMB2
2239 * requests (cf the subsequent check) for all cases.
2241 return NT_STATUS_OK;
2244 if (req->in.vector_count > req->current_idx + SMBD_SMB2_NUM_IOV_PER_REQ) {
2246 * We're trying to go async in a compound request
2247 * chain. This is only allowed for opens that cause an
2248 * oplock break or for the last operation in the
2249 * chain, otherwise it is not allowed. See
2250 * [MS-SMB2].pdf note <206> on Section 3.3.5.2.7.
2252 const uint8_t *inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2254 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_CREATE) {
2256 * Cancel the outstanding request.
2258 bool ok = tevent_req_cancel(req->subreq);
2260 return NT_STATUS_OK;
2262 TALLOC_FREE(req->subreq);
2263 return smbd_smb2_request_error(req,
2264 NT_STATUS_INTERNAL_ERROR);
2268 if (DEBUGLEVEL >= 10) {
2269 dbgtext("smbd_smb2_request_pending_queue: req->current_idx = %u\n",
2270 (unsigned int)req->current_idx );
2271 print_req_vectors(req);
2274 if (req->current_idx > 1) {
2276 * We're going async in a compound
2277 * chain after the first request has
2278 * already been processed. Send an
2279 * interim response containing the
2280 * set of replies already generated.
2282 int idx = req->current_idx;
2284 status = smb2_send_async_interim_response(req);
2285 if (!NT_STATUS_IS_OK(status)) {
2288 TALLOC_FREE(req->first_enc_key);
2290 req->current_idx = 1;
2293 * Re-arrange the in.vectors to remove what
2296 memmove(&req->in.vector[1],
2297 &req->in.vector[idx],
2298 sizeof(req->in.vector[0])*(req->in.vector_count - idx));
2299 req->in.vector_count = 1 + (req->in.vector_count - idx);
2301 /* Re-arrange the out.vectors to match. */
2302 memmove(&req->out.vector[1],
2303 &req->out.vector[idx],
2304 sizeof(req->out.vector[0])*(req->out.vector_count - idx));
2305 req->out.vector_count = 1 + (req->out.vector_count - idx);
2307 if (req->in.vector_count == 1 + SMBD_SMB2_NUM_IOV_PER_REQ) {
2309 * We only have one remaining request as
2310 * we've processed everything else.
2311 * This is no longer a compound request.
2313 req->compound_related = false;
2314 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2315 flags = (IVAL(outhdr, SMB2_HDR_FLAGS) & ~SMB2_HDR_FLAG_CHAINED);
2316 SIVAL(outhdr, SMB2_HDR_FLAGS, flags);
2319 TALLOC_FREE(req->last_sign_key);
2322 * smbd_smb2_request_pending_timer() just send a packet
2323 * to the client and doesn't need any impersonation.
2324 * So we use req->xconn->client->raw_ev_ctx instead
2325 * of req->ev_ctx here.
2327 defer_endtime = timeval_current_ofs_usec(defer_time);
2328 req->async_te = tevent_add_timer(req->xconn->client->raw_ev_ctx,
2330 smbd_smb2_request_pending_timer,
2332 if (req->async_te == NULL) {
2333 return NT_STATUS_NO_MEMORY;
2336 return NT_STATUS_OK;
2340 struct smb2_signing_key *smbd_smb2_signing_key(struct smbXsrv_session *session,
2341 struct smbXsrv_connection *xconn,
2344 struct smbXsrv_channel_global0 *c = NULL;
2346 struct smb2_signing_key *key = NULL;
2347 bool has_channel = false;
2349 status = smbXsrv_session_find_channel(session, xconn, &c);
2350 if (NT_STATUS_IS_OK(status)) {
2351 key = c->signing_key;
2355 if (!smb2_signing_key_valid(key)) {
2356 key = session->global->signing_key;
2357 has_channel = false;
2360 if (_has_channel != NULL) {
2361 *_has_channel = has_channel;
2367 static NTSTATUS smb2_get_new_nonce(struct smbXsrv_session *session,
2368 uint64_t *new_nonce_high,
2369 uint64_t *new_nonce_low)
2371 uint64_t nonce_high;
2374 session->nonce_low += 1;
2375 if (session->nonce_low == 0) {
2376 session->nonce_low += 1;
2377 session->nonce_high += 1;
2381 * CCM and GCM algorithms must never have their
2382 * nonce wrap, or the security of the whole
2383 * communication and the keys is destroyed.
2384 * We must drop the connection once we have
2385 * transferred too much data.
2387 * NOTE: We assume nonces greater than 8 bytes.
2389 if (session->nonce_high >= session->nonce_high_max) {
2390 return NT_STATUS_ENCRYPTION_FAILED;
2393 nonce_high = session->nonce_high_random;
2394 nonce_high += session->nonce_high;
2395 nonce_low = session->nonce_low;
2397 *new_nonce_high = nonce_high;
2398 *new_nonce_low = nonce_low;
2399 return NT_STATUS_OK;
2402 static int smbd_smb2_request_pending_state_destructor(struct smbd_smb2_request_pending_state *state)
2404 return smbd_smb2_send_queue_destruction(&state->queue_entry);
2407 static void smbd_smb2_request_pending_timer(struct tevent_context *ev,
2408 struct tevent_timer *te,
2409 struct timeval current_time,
2412 struct smbd_smb2_request *req =
2413 talloc_get_type_abort(private_data,
2414 struct smbd_smb2_request);
2415 struct smbXsrv_connection *xconn = req->xconn;
2416 struct smbd_smb2_request_pending_state *state = NULL;
2417 uint8_t *outhdr = NULL;
2418 const uint8_t *inhdr = NULL;
2420 uint8_t *hdr = NULL;
2421 uint8_t *body = NULL;
2422 uint8_t *dyn = NULL;
2424 uint64_t message_id = 0;
2425 uint64_t async_id = 0;
2429 TALLOC_FREE(req->async_te);
2431 /* Ensure our final reply matches the interim one. */
2432 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2433 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
2434 flags = IVAL(outhdr, SMB2_HDR_FLAGS);
2435 message_id = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
2437 async_id = message_id; /* keep it simple for now... */
2439 SIVAL(outhdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
2440 SBVAL(outhdr, SMB2_HDR_ASYNC_ID, async_id);
2442 DEBUG(10,("smbd_smb2_request_pending_queue: opcode[%s] mid %llu "
2444 smb2_opcode_name(SVAL(inhdr, SMB2_HDR_OPCODE)),
2445 (unsigned long long)async_id ));
2448 * What we send is identical to a smbd_smb2_request_error
2449 * packet with an error status of STATUS_PENDING. Make use
2450 * of this fact sometime when refactoring. JRA.
2453 state = talloc_zero(req->xconn, struct smbd_smb2_request_pending_state);
2454 if (state == NULL) {
2455 smbd_server_connection_terminate(xconn,
2456 nt_errstr(NT_STATUS_NO_MEMORY));
2459 talloc_set_destructor(state, smbd_smb2_request_pending_state_destructor);
2461 tf = state->buf + NBT_HDR_SIZE;
2463 hdr = tf + SMB2_TF_HDR_SIZE;
2464 body = hdr + SMB2_HDR_BODY;
2467 if (req->do_encryption) {
2468 uint64_t nonce_high = 0;
2469 uint64_t nonce_low = 0;
2470 uint64_t session_id = req->session->global->session_wire_id;
2472 status = smb2_get_new_nonce(req->session,
2475 if (!NT_STATUS_IS_OK(status)) {
2476 smbd_server_connection_terminate(xconn,
2481 SIVAL(tf, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
2482 SBVAL(tf, SMB2_TF_NONCE+0, nonce_low);
2483 SBVAL(tf, SMB2_TF_NONCE+8, nonce_high);
2484 SBVAL(tf, SMB2_TF_SESSION_ID, session_id);
2487 SIVAL(hdr, SMB2_HDR_PROTOCOL_ID, SMB2_MAGIC);
2488 SSVAL(hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
2489 SSVAL(hdr, SMB2_HDR_EPOCH, 0);
2490 SIVAL(hdr, SMB2_HDR_STATUS, NT_STATUS_V(NT_STATUS_PENDING));
2491 SSVAL(hdr, SMB2_HDR_OPCODE, SVAL(outhdr, SMB2_HDR_OPCODE));
2494 * The STATUS_PENDING response has SMB2_HDR_FLAG_SIGNED
2495 * clearedm, but echoes the signature field.
2497 flags &= ~SMB2_HDR_FLAG_SIGNED;
2498 SIVAL(hdr, SMB2_HDR_FLAGS, flags);
2499 SIVAL(hdr, SMB2_HDR_NEXT_COMMAND, 0);
2500 SBVAL(hdr, SMB2_HDR_MESSAGE_ID, message_id);
2501 SBVAL(hdr, SMB2_HDR_PID, async_id);
2502 SBVAL(hdr, SMB2_HDR_SESSION_ID,
2503 BVAL(outhdr, SMB2_HDR_SESSION_ID));
2504 memcpy(hdr+SMB2_HDR_SIGNATURE,
2505 outhdr+SMB2_HDR_SIGNATURE, 16);
2507 SSVAL(body, 0x00, 0x08 + 1);
2509 SCVAL(body, 0x02, 0);
2510 SCVAL(body, 0x03, 0);
2511 SIVAL(body, 0x04, 0);
2512 /* Match W2K8R2... */
2513 SCVAL(dyn, 0x00, 0x21);
2515 state->vector[0].iov_base = (void *)state->buf;
2516 state->vector[0].iov_len = NBT_HDR_SIZE;
2518 if (req->do_encryption) {
2519 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_base = tf;
2520 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_len =
2523 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_base = NULL;
2524 state->vector[1+SMBD_SMB2_TF_IOV_OFS].iov_len = 0;
2527 state->vector[1+SMBD_SMB2_HDR_IOV_OFS].iov_base = hdr;
2528 state->vector[1+SMBD_SMB2_HDR_IOV_OFS].iov_len = SMB2_HDR_BODY;
2530 state->vector[1+SMBD_SMB2_BODY_IOV_OFS].iov_base = body;
2531 state->vector[1+SMBD_SMB2_BODY_IOV_OFS].iov_len = 8;
2533 state->vector[1+SMBD_SMB2_DYN_IOV_OFS].iov_base = dyn;
2534 state->vector[1+SMBD_SMB2_DYN_IOV_OFS].iov_len = 1;
2536 ok = smb2_setup_nbt_length(state->vector,
2537 1 + SMBD_SMB2_NUM_IOV_PER_REQ);
2539 smbd_server_connection_terminate(
2540 xconn, nt_errstr(NT_STATUS_INTERNAL_ERROR));
2544 /* Ensure we correctly go through crediting. Grant
2545 the credits now, and zero credits on the final
2547 smb2_set_operation_credit(req->xconn,
2548 SMBD_SMB2_IN_HDR_IOV(req),
2549 &state->vector[1+SMBD_SMB2_HDR_IOV_OFS]);
2552 * We add SMB2_HDR_FLAG_ASYNC after smb2_set_operation_credit()
2553 * as it reacts on it
2555 SIVAL(hdr, SMB2_HDR_FLAGS, flags | SMB2_HDR_FLAG_ASYNC);
2560 for (i = 0; i < ARRAY_SIZE(state->vector); i++) {
2561 dbgtext("\tstate->vector[%u/%u].iov_len = %u\n",
2563 (unsigned int)ARRAY_SIZE(state->vector),
2564 (unsigned int)state->vector[i].iov_len);
2568 if (req->do_encryption) {
2569 struct smbXsrv_session *x = req->session;
2570 struct smb2_signing_key *encryption_key = x->global->encryption_key;
2572 status = smb2_signing_encrypt_pdu(encryption_key,
2573 &state->vector[1+SMBD_SMB2_TF_IOV_OFS],
2574 SMBD_SMB2_NUM_IOV_PER_REQ);
2575 if (!NT_STATUS_IS_OK(status)) {
2576 smbd_server_connection_terminate(xconn,
2582 state->queue_entry.mem_ctx = state;
2583 state->queue_entry.vector = state->vector;
2584 state->queue_entry.count = ARRAY_SIZE(state->vector);
2585 state->queue_entry.xconn = xconn;
2586 DLIST_ADD_END(xconn->smb2.send_queue, &state->queue_entry);
2587 xconn->smb2.send_queue_len++;
2589 status = smbd_smb2_flush_send_queue(xconn);
2590 if (!NT_STATUS_IS_OK(status)) {
2591 smbd_server_connection_terminate(xconn,
2597 static NTSTATUS smbd_smb2_request_process_cancel(struct smbd_smb2_request *req)
2599 struct smbXsrv_connection *xconn = req->xconn;
2600 struct smbd_smb2_request *cur;
2601 const uint8_t *inhdr;
2603 uint64_t search_message_id;
2604 uint64_t search_async_id;
2605 uint64_t found_id = 0;
2607 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2609 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2610 search_message_id = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
2611 search_async_id = BVAL(inhdr, SMB2_HDR_PID);
2614 * We don't need the request anymore cancel requests never
2617 * We defer the TALLOC_FREE(req) to the caller.
2619 DLIST_REMOVE(xconn->smb2.requests, req);
2621 for (cur = xconn->smb2.requests; cur; cur = cur->next) {
2622 const uint8_t *outhdr;
2623 uint64_t message_id;
2626 if (cur->session != req->session) {
2630 if (cur->compound_related) {
2632 * Never cancel anything in a compound request.
2633 * Way too hard to deal with the result.
2638 outhdr = SMBD_SMB2_OUT_HDR_PTR(cur);
2640 message_id = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
2641 async_id = BVAL(outhdr, SMB2_HDR_PID);
2643 if (flags & SMB2_HDR_FLAG_ASYNC) {
2644 if (search_async_id == async_id) {
2645 found_id = async_id;
2649 if (search_message_id == message_id) {
2650 found_id = message_id;
2656 if (cur && cur->subreq) {
2657 inhdr = SMBD_SMB2_IN_HDR_PTR(cur);
2658 DEBUG(10,("smbd_smb2_request_process_cancel: attempting to "
2659 "cancel opcode[%s] mid %llu\n",
2660 smb2_opcode_name(SVAL(inhdr, SMB2_HDR_OPCODE)),
2661 (unsigned long long)found_id ));
2662 tevent_req_cancel(cur->subreq);
2665 return NT_STATUS_OK;
2668 /*************************************************************
2669 Ensure an incoming tid is a valid one for us to access.
2670 Change to the associated uid credentials and chdir to the
2671 valid tid directory.
2672 *************************************************************/
2674 static NTSTATUS smbd_smb2_request_check_tcon(struct smbd_smb2_request *req)
2676 const uint8_t *inhdr;
2679 struct smbXsrv_tcon *tcon;
2681 NTTIME now = timeval_to_nttime(&req->request_time);
2685 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2687 in_flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2688 in_tid = IVAL(inhdr, SMB2_HDR_TID);
2690 if (in_flags & SMB2_HDR_FLAG_CHAINED) {
2691 in_tid = req->last_tid;
2696 status = smb2srv_tcon_lookup(req->session,
2697 in_tid, now, &tcon);
2698 if (!NT_STATUS_IS_OK(status)) {
2702 if (!change_to_user_and_service(
2704 req->session->global->session_wire_id))
2706 return NT_STATUS_ACCESS_DENIED;
2710 req->last_tid = in_tid;
2712 return NT_STATUS_OK;
2715 /*************************************************************
2716 Ensure an incoming session_id is a valid one for us to access.
2717 *************************************************************/
2719 static NTSTATUS smbd_smb2_request_check_session(struct smbd_smb2_request *req)
2721 const uint8_t *inhdr;
2724 uint64_t in_session_id;
2725 struct smbXsrv_session *session = NULL;
2726 struct auth_session_info *session_info;
2728 NTTIME now = timeval_to_nttime(&req->request_time);
2730 req->session = NULL;
2733 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2735 in_flags = IVAL(inhdr, SMB2_HDR_FLAGS);
2736 in_opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2737 in_session_id = BVAL(inhdr, SMB2_HDR_SESSION_ID);
2739 if (in_flags & SMB2_HDR_FLAG_CHAINED) {
2740 in_session_id = req->last_session_id;
2743 req->last_session_id = 0;
2745 /* look an existing session up */
2746 switch (in_opcode) {
2747 case SMB2_OP_SESSSETUP:
2749 * For a session bind request, we don't have the
2750 * channel set up at this point yet, so we defer
2751 * the verification that the connection belongs
2752 * to the session to the session setup code, which
2753 * can look at the session binding flags.
2755 status = smb2srv_session_lookup_client(req->xconn->client,
2760 status = smb2srv_session_lookup_conn(req->xconn,
2766 req->session = session;
2767 req->last_session_id = in_session_id;
2769 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
2770 switch (in_opcode) {
2771 case SMB2_OP_SESSSETUP:
2772 status = smb2srv_session_lookup_global(req->xconn->client,
2776 if (NT_STATUS_IS_OK(status)) {
2778 * We fallback to a session of
2779 * another process in order to
2780 * get the signing correct.
2782 * We don't set req->last_session_id here.
2784 req->session = session;
2791 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
2792 switch (in_opcode) {
2793 case SMB2_OP_SESSSETUP:
2794 status = NT_STATUS_OK;
2796 case SMB2_OP_LOGOFF:
2799 case SMB2_OP_CANCEL:
2800 case SMB2_OP_KEEPALIVE:
2802 * [MS-SMB2] 3.3.5.2.9 Verifying the Session
2803 * specifies that LOGOFF, CLOSE and (UN)LOCK
2804 * should always be processed even on expired sessions.
2806 * Also see the logic in
2807 * smbd_smb2_request_process_lock().
2809 * The smb2.session.expire2 test shows that
2810 * CANCEL and KEEPALIVE/ECHO should also
2813 status = NT_STATUS_OK;
2819 if (NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
2820 switch (in_opcode) {
2822 case SMB2_OP_CREATE:
2823 case SMB2_OP_GETINFO:
2824 case SMB2_OP_SETINFO:
2825 return NT_STATUS_INVALID_HANDLE;
2828 * Notice the check for
2829 * (session_info == NULL)
2832 status = NT_STATUS_OK;
2836 if (!NT_STATUS_IS_OK(status)) {
2840 session_info = session->global->auth_session_info;
2841 if (session_info == NULL) {
2842 return NT_STATUS_INVALID_HANDLE;
2845 return NT_STATUS_OK;
2848 NTSTATUS smbd_smb2_request_verify_creditcharge(struct smbd_smb2_request *req,
2849 uint32_t data_length)
2851 struct smbXsrv_connection *xconn = req->xconn;
2852 uint16_t needed_charge;
2853 uint16_t credit_charge = 1;
2854 const uint8_t *inhdr;
2856 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2858 if (xconn->smb2.credits.multicredit) {
2859 credit_charge = SVAL(inhdr, SMB2_HDR_CREDIT_CHARGE);
2860 credit_charge = MAX(credit_charge, 1);
2863 needed_charge = (data_length - 1)/ 65536 + 1;
2865 DBGC_DEBUG(DBGC_SMB2_CREDITS,
2866 "mid %llu, CreditCharge: %d, NeededCharge: %d\n",
2867 (unsigned long long) BVAL(inhdr, SMB2_HDR_MESSAGE_ID),
2868 credit_charge, needed_charge);
2870 if (needed_charge > credit_charge) {
2871 DBGC_WARNING(DBGC_SMB2_CREDITS,
2872 "CreditCharge too low, given %d, needed %d\n",
2873 credit_charge, needed_charge);
2874 return NT_STATUS_INVALID_PARAMETER;
2877 return NT_STATUS_OK;
2880 NTSTATUS smbd_smb2_request_verify_sizes(struct smbd_smb2_request *req,
2881 size_t expected_body_size)
2883 struct iovec *inhdr_v;
2884 const uint8_t *inhdr;
2886 const uint8_t *inbody;
2888 size_t min_dyn_size = expected_body_size & 0x00000001;
2889 int max_idx = req->in.vector_count - SMBD_SMB2_NUM_IOV_PER_REQ;
2892 * The following should be checked already.
2894 if (req->in.vector_count < SMBD_SMB2_NUM_IOV_PER_REQ) {
2895 return NT_STATUS_INTERNAL_ERROR;
2897 if (req->current_idx > max_idx) {
2898 return NT_STATUS_INTERNAL_ERROR;
2901 inhdr_v = SMBD_SMB2_IN_HDR_IOV(req);
2902 if (inhdr_v->iov_len != SMB2_HDR_BODY) {
2903 return NT_STATUS_INTERNAL_ERROR;
2905 if (SMBD_SMB2_IN_BODY_LEN(req) < 2) {
2906 return NT_STATUS_INTERNAL_ERROR;
2909 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
2910 opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
2914 case SMB2_OP_GETINFO:
2921 * Now check the expected body size,
2922 * where the last byte might be in the
2925 if (SMBD_SMB2_IN_BODY_LEN(req) != (expected_body_size & 0xFFFFFFFE)) {
2926 return NT_STATUS_INVALID_PARAMETER;
2928 if (SMBD_SMB2_IN_DYN_LEN(req) < min_dyn_size) {
2929 return NT_STATUS_INVALID_PARAMETER;
2932 inbody = SMBD_SMB2_IN_BODY_PTR(req);
2934 body_size = SVAL(inbody, 0x00);
2935 if (body_size != expected_body_size) {
2936 return NT_STATUS_INVALID_PARAMETER;
2939 return NT_STATUS_OK;
2942 bool smbXsrv_is_encrypted(uint8_t encryption_flags)
2944 return (!(encryption_flags & SMBXSRV_PROCESSED_UNENCRYPTED_PACKET)
2946 (encryption_flags & (SMBXSRV_PROCESSED_ENCRYPTED_PACKET |
2947 SMBXSRV_ENCRYPTION_DESIRED |
2948 SMBXSRV_ENCRYPTION_REQUIRED)));
2951 bool smbXsrv_is_partially_encrypted(uint8_t encryption_flags)
2953 return ((encryption_flags & SMBXSRV_PROCESSED_ENCRYPTED_PACKET) &&
2954 (encryption_flags & SMBXSRV_PROCESSED_UNENCRYPTED_PACKET));
2957 /* Set a flag if not already set, return true if set */
2958 bool smbXsrv_set_crypto_flag(uint8_t *flags, uint8_t flag)
2960 if ((flag == 0) || (*flags & flag)) {
2969 * Update encryption state tracking flags, this can be used to
2970 * determine whether whether the session or tcon is "encrypted".
2972 static void smb2srv_update_crypto_flags(struct smbd_smb2_request *req,
2974 bool *update_session_globalp,
2975 bool *update_tcon_globalp)
2977 /* Default: assume unecrypted and unsigned */
2978 struct smbXsrv_session *session = req->session;
2979 struct smbXsrv_tcon *tcon = req->tcon;
2980 uint8_t encrypt_flag = SMBXSRV_PROCESSED_UNENCRYPTED_PACKET;
2981 uint8_t sign_flag = SMBXSRV_PROCESSED_UNSIGNED_PACKET;
2982 bool update_session = false;
2983 bool update_tcon = false;
2985 if (session->table == NULL) {
2987 * sessions from smb2srv_session_lookup_global()
2988 * have NT_STATUS_BAD_LOGON_SESSION_STATE
2989 * and session->table == NULL.
2991 * They only used to give the correct error
2992 * status, we should not update any state.
2997 if (req->was_encrypted && req->do_encryption) {
2998 encrypt_flag = SMBXSRV_PROCESSED_ENCRYPTED_PACKET;
2999 sign_flag = SMBXSRV_PROCESSED_SIGNED_PACKET;
3001 /* Unencrypted packet, can be signed */
3002 if (req->do_signing) {
3003 sign_flag = SMBXSRV_PROCESSED_SIGNED_PACKET;
3007 update_session |= smbXsrv_set_crypto_flag(
3008 &session->global->encryption_flags, encrypt_flag);
3009 update_session |= smbXsrv_set_crypto_flag(
3010 &session->global->signing_flags, sign_flag);
3013 update_tcon |= smbXsrv_set_crypto_flag(
3014 &tcon->global->encryption_flags, encrypt_flag);
3015 update_tcon |= smbXsrv_set_crypto_flag(
3016 &tcon->global->signing_flags, sign_flag);
3020 *update_session_globalp = update_session;
3021 *update_tcon_globalp = update_tcon;
3025 bool smbXsrv_is_signed(uint8_t signing_flags)
3028 * Signing is always enabled, so unless we got an unsigned
3029 * packet and at least one signed packet that was not
3030 * encrypted, the session or tcon is "signed".
3032 return (!(signing_flags & SMBXSRV_PROCESSED_UNSIGNED_PACKET) &&
3033 (signing_flags & SMBXSRV_PROCESSED_SIGNED_PACKET));
3036 bool smbXsrv_is_partially_signed(uint8_t signing_flags)
3038 return ((signing_flags & SMBXSRV_PROCESSED_UNSIGNED_PACKET) &&
3039 (signing_flags & SMBXSRV_PROCESSED_SIGNED_PACKET));
3042 static NTSTATUS smbd_smb2_request_dispatch_update_counts(
3043 struct smbd_smb2_request *req,
3046 struct smbXsrv_connection *xconn = req->xconn;
3047 const uint8_t *inhdr;
3048 uint16_t channel_sequence;
3049 uint8_t generation_wrap = 0;
3052 struct smbXsrv_open *op;
3053 bool update_open = false;
3054 NTSTATUS status = NT_STATUS_OK;
3056 SMB_ASSERT(!req->request_counters_updated);
3058 if (xconn->protocol < PROTOCOL_SMB3_00) {
3059 return NT_STATUS_OK;
3062 if (req->compat_chain_fsp == NULL) {
3063 return NT_STATUS_OK;
3066 op = req->compat_chain_fsp->op;
3068 return NT_STATUS_OK;
3071 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
3072 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
3073 channel_sequence = SVAL(inhdr, SMB2_HDR_CHANNEL_SEQUENCE);
3075 cmp = channel_sequence - op->global->channel_sequence;
3078 * csn wrap. We need to watch out for long-running
3079 * requests that are still sitting on a previously
3080 * used csn. SMB2_OP_NOTIFY can take VERY long.
3082 generation_wrap += 1;
3085 if (abs(cmp) > INT16_MAX) {
3087 * [MS-SMB2] 3.3.5.2.10 - Verifying the Channel Sequence Number:
3089 * If the channel sequence number of the request and the one
3090 * known to the server are not equal, the channel sequence
3091 * number and outstanding request counts are only updated
3092 * "... if the unsigned difference using 16-bit arithmetic
3093 * between ChannelSequence and Open.ChannelSequence is less than
3094 * or equal to 0x7FFF ...".
3095 * Otherwise, an error is returned for the modifying
3096 * calls write, set_info, and ioctl.
3098 * There are currently two issues with the description:
3100 * * For the other calls, the document seems to imply
3101 * that processing continues without adapting the
3102 * counters (if the sequence numbers are not equal).
3104 * TODO: This needs clarification!
3106 * * Also, the behaviour if the difference is larger
3107 * than 0x7FFF is not clear. The document seems to
3108 * imply that if such a difference is reached,
3109 * the server starts to ignore the counters or
3110 * in the case of the modifying calls, return errors.
3112 * TODO: This needs clarification!
3114 * At this point Samba tries to be a little more
3115 * clever than the description in the MS-SMB2 document
3116 * by heuristically detecting and properly treating
3117 * a 16 bit overflow of the client-submitted sequence
3120 * If the stored channel sequence number is more than
3121 * 0x7FFF larger than the one from the request, then
3122 * the client-provided sequence number has likely
3123 * overflown. We treat this case as valid instead
3126 * The MS-SMB2 behaviour would be setting cmp = -1.
3131 if (flags & SMB2_HDR_FLAG_REPLAY_OPERATION) {
3132 if (cmp == 0 && op->pre_request_count == 0) {
3133 op->request_count += 1;
3134 req->request_counters_updated = true;
3135 } else if (cmp > 0 && op->pre_request_count == 0) {
3136 op->pre_request_count += op->request_count;
3137 op->request_count = 1;
3138 op->global->channel_sequence = channel_sequence;
3139 op->global->channel_generation += generation_wrap;
3141 req->request_counters_updated = true;
3142 } else if (modify_call) {
3143 return NT_STATUS_FILE_NOT_AVAILABLE;
3147 op->request_count += 1;
3148 req->request_counters_updated = true;
3149 } else if (cmp > 0) {
3150 op->pre_request_count += op->request_count;
3151 op->request_count = 1;
3152 op->global->channel_sequence = channel_sequence;
3153 op->global->channel_generation += generation_wrap;
3155 req->request_counters_updated = true;
3156 } else if (modify_call) {
3157 return NT_STATUS_FILE_NOT_AVAILABLE;
3160 req->channel_generation = op->global->channel_generation;
3163 status = smbXsrv_open_update(op);
3169 NTSTATUS smbd_smb2_request_dispatch(struct smbd_smb2_request *req)
3171 struct smbXsrv_connection *xconn = req->xconn;
3172 const struct smbd_smb2_dispatch_table *call = NULL;
3173 const struct iovec *intf_v = SMBD_SMB2_IN_TF_IOV(req);
3174 const uint8_t *inhdr;
3179 NTSTATUS session_status;
3180 uint32_t allowed_flags;
3181 NTSTATUS return_value;
3182 struct smbXsrv_session *x = NULL;
3183 bool signing_required = false;
3184 bool encryption_desired = false;
3185 bool encryption_required = false;
3187 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
3189 DO_PROFILE_INC(request);
3191 SMB_ASSERT(!req->request_counters_updated);
3193 /* TODO: verify more things */
3195 flags = IVAL(inhdr, SMB2_HDR_FLAGS);
3196 opcode = SVAL(inhdr, SMB2_HDR_OPCODE);
3197 mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
3198 DBG_DEBUG("opcode[%s] mid = %"PRIu64"\n",
3199 smb2_opcode_name(opcode),
3202 if (xconn->protocol >= PROTOCOL_SMB2_02) {
3204 * once the protocol is negotiated
3205 * SMB2_OP_NEGPROT is not allowed anymore
3207 if (opcode == SMB2_OP_NEGPROT) {
3208 /* drop the connection */
3209 return NT_STATUS_INVALID_PARAMETER;
3213 * if the protocol is not negotiated yet
3214 * only SMB2_OP_NEGPROT is allowed.
3216 if (opcode != SMB2_OP_NEGPROT) {
3217 /* drop the connection */
3218 return NT_STATUS_INVALID_PARAMETER;
3223 * Check if the client provided a valid session id.
3225 * As some command don't require a valid session id
3226 * we defer the check of the session_status
3228 session_status = smbd_smb2_request_check_session(req);
3231 signing_required = x->global->signing_flags & SMBXSRV_SIGNING_REQUIRED;
3232 encryption_desired = x->global->encryption_flags & SMBXSRV_ENCRYPTION_DESIRED;
3233 encryption_required = x->global->encryption_flags & SMBXSRV_ENCRYPTION_REQUIRED;
3236 req->async_internal = false;
3237 req->do_signing = false;
3238 if (opcode != SMB2_OP_SESSSETUP) {
3239 req->do_encryption = encryption_desired;
3241 req->do_encryption = false;
3243 req->was_encrypted = false;
3244 if (intf_v->iov_len == SMB2_TF_HDR_SIZE) {
3245 const uint8_t *intf = SMBD_SMB2_IN_TF_PTR(req);
3246 uint64_t tf_session_id = BVAL(intf, SMB2_TF_SESSION_ID);
3248 if (x != NULL && x->global->session_wire_id != tf_session_id) {
3249 DEBUG(0,("smbd_smb2_request_dispatch: invalid session_id"
3250 "in SMB2_HDR[%llu], SMB2_TF[%llu]\n",
3251 (unsigned long long)x->global->session_wire_id,
3252 (unsigned long long)tf_session_id));
3254 * TODO: windows allows this...
3255 * should we drop the connection?
3257 * For now we just return ACCESS_DENIED
3258 * (Windows clients never trigger this)
3259 * and wait for an update of [MS-SMB2].
3261 return smbd_smb2_request_error(req,
3262 NT_STATUS_ACCESS_DENIED);
3265 req->was_encrypted = true;
3266 req->do_encryption = true;
3269 if (encryption_required && !req->was_encrypted) {
3270 req->do_encryption = true;
3271 return smbd_smb2_request_error(req,
3272 NT_STATUS_ACCESS_DENIED);
3275 call = smbd_smb2_call(opcode);
3277 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3280 allowed_flags = SMB2_HDR_FLAG_CHAINED |
3281 SMB2_HDR_FLAG_SIGNED |
3283 if (xconn->protocol >= PROTOCOL_SMB3_11) {
3284 allowed_flags |= SMB2_HDR_FLAG_PRIORITY_MASK;
3286 if (opcode == SMB2_OP_NEGPROT) {
3287 if (lp_server_max_protocol() >= PROTOCOL_SMB3_11) {
3288 allowed_flags |= SMB2_HDR_FLAG_PRIORITY_MASK;
3291 if (opcode == SMB2_OP_CANCEL) {
3292 allowed_flags |= SMB2_HDR_FLAG_ASYNC;
3294 if (xconn->protocol >= PROTOCOL_SMB3_00) {
3295 allowed_flags |= SMB2_HDR_FLAG_REPLAY_OPERATION;
3297 if ((flags & ~allowed_flags) != 0) {
3298 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3301 if (flags & SMB2_HDR_FLAG_CHAINED) {
3303 * This check is mostly for giving the correct error code
3304 * for compounded requests.
3306 if (!NT_STATUS_IS_OK(session_status)) {
3307 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3310 req->compat_chain_fsp = NULL;
3313 if (req->was_encrypted) {
3314 signing_required = false;
3315 } else if (signing_required || (flags & SMB2_HDR_FLAG_SIGNED)) {
3316 struct smb2_signing_key *signing_key = NULL;
3317 bool has_channel = false;
3321 * MS-SMB2: 3.3.5.2.4 Verifying the Signature.
3322 * If the SMB2 header of the SMB2 NEGOTIATE
3323 * request has the SMB2_FLAGS_SIGNED bit set in the
3324 * Flags field, the server MUST fail the request
3325 * with STATUS_INVALID_PARAMETER.
3327 * Microsoft test tool checks this.
3330 if ((opcode == SMB2_OP_NEGPROT) &&
3331 (flags & SMB2_HDR_FLAG_SIGNED)) {
3332 status = NT_STATUS_INVALID_PARAMETER;
3334 status = NT_STATUS_USER_SESSION_DELETED;
3336 return smbd_smb2_request_error(req, status);
3339 signing_key = smbd_smb2_signing_key(x, xconn, &has_channel);
3342 * If we have a signing key, we should
3345 if (smb2_signing_key_valid(signing_key) && opcode != SMB2_OP_CANCEL) {
3346 req->do_signing = true;
3349 status = smb2_signing_check_pdu(signing_key,
3350 SMBD_SMB2_IN_HDR_IOV(req),
3351 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3352 if (NT_STATUS_EQUAL(status, NT_STATUS_ACCESS_DENIED) &&
3353 opcode == SMB2_OP_SESSSETUP && !has_channel &&
3354 NT_STATUS_IS_OK(session_status))
3356 if (!NT_STATUS_EQUAL(x->status, NT_STATUS_BAD_LOGON_SESSION_STATE)) {
3357 struct smbXsrv_session *session = NULL;
3360 error = smb2srv_session_lookup_global(req->xconn->client,
3361 x->global->session_wire_id,
3364 if (!NT_STATUS_IS_OK(error)) {
3365 return smbd_smb2_request_error(req, error);
3369 * We fallback to a session of
3370 * another process in order to
3371 * get the signing correct.
3373 * We don't set req->last_session_id here.
3375 req->session = x = session;
3377 goto skipped_signing;
3379 if (!NT_STATUS_IS_OK(status)) {
3380 return smbd_smb2_request_error(req, status);
3384 * Now that we know the request was correctly signed
3385 * we have to sign the response too.
3387 if (opcode != SMB2_OP_CANCEL) {
3388 req->do_signing = true;
3391 if (!NT_STATUS_IS_OK(session_status)) {
3392 return smbd_smb2_request_error(req, session_status);
3396 if (opcode == SMB2_OP_IOCTL) {
3398 * Some special IOCTL calls don't require
3399 * file, tcon nor session.
3401 * They typically don't do any real action
3402 * on behalf of the client.
3404 * They are mainly used to alter the behavior
3405 * of the connection for testing. So we can
3406 * run as root and skip all file, tcon and session
3409 static const struct smbd_smb2_dispatch_table _root_ioctl_call = {
3410 .opcode = SMB2_OP_IOCTL,
3413 const uint8_t *body = SMBD_SMB2_IN_BODY_PTR(req);
3414 size_t body_size = SMBD_SMB2_IN_BODY_LEN(req);
3415 uint32_t in_ctl_code;
3418 if (needed > body_size) {
3419 return smbd_smb2_request_error(req,
3420 NT_STATUS_INVALID_PARAMETER);
3423 in_ctl_code = IVAL(body, 0x04);
3425 * Only add trusted IOCTL codes here!
3427 switch (in_ctl_code) {
3428 case FSCTL_SMBTORTURE_FORCE_UNACKED_TIMEOUT:
3429 call = &_root_ioctl_call;
3431 case FSCTL_VALIDATE_NEGOTIATE_INFO:
3432 call = &_root_ioctl_call;
3434 case FSCTL_QUERY_NETWORK_INTERFACE_INFO:
3435 call = &_root_ioctl_call;
3442 if (flags & SMB2_HDR_FLAG_CHAINED) {
3443 req->compound_related = true;
3446 if (call->need_session) {
3447 if (!NT_STATUS_IS_OK(session_status)) {
3448 return smbd_smb2_request_error(req, session_status);
3452 if (call->need_tcon) {
3453 SMB_ASSERT(call->need_session);
3456 * This call needs to be run as user.
3458 * smbd_smb2_request_check_tcon()
3459 * calls change_to_user() on success.
3460 * Which implies set_current_user_info()
3461 * and chdir_current_service().
3463 status = smbd_smb2_request_check_tcon(req);
3464 if (!NT_STATUS_IS_OK(status)) {
3465 return smbd_smb2_request_error(req, status);
3467 if (req->tcon->global->encryption_flags & SMBXSRV_ENCRYPTION_DESIRED) {
3468 encryption_desired = true;
3470 if (req->tcon->global->encryption_flags & SMBXSRV_ENCRYPTION_REQUIRED) {
3471 encryption_required = true;
3473 if (encryption_required && !req->was_encrypted) {
3474 req->do_encryption = true;
3475 return smbd_smb2_request_error(req,
3476 NT_STATUS_ACCESS_DENIED);
3477 } else if (encryption_desired) {
3478 req->do_encryption = true;
3480 } else if (call->need_session) {
3481 struct auth_session_info *session_info = NULL;
3484 * Unless we also have need_tcon (see above),
3485 * we still need to call set_current_user_info().
3488 session_info = req->session->global->auth_session_info;
3489 if (session_info == NULL) {
3490 return NT_STATUS_INVALID_HANDLE;
3493 set_current_user_info(session_info->unix_info->sanitized_username,
3494 session_info->unix_info->unix_name,
3495 session_info->info->domain_name);
3499 bool update_session_global = false;
3500 bool update_tcon_global = false;
3502 smb2srv_update_crypto_flags(req, opcode,
3503 &update_session_global,
3504 &update_tcon_global);
3506 if (update_session_global) {
3507 status = smbXsrv_session_update(x);
3508 if (!NT_STATUS_IS_OK(status)) {
3509 return smbd_smb2_request_error(req, status);
3512 if (update_tcon_global) {
3513 status = smbXsrv_tcon_update(req->tcon);
3514 if (!NT_STATUS_IS_OK(status)) {
3515 return smbd_smb2_request_error(req, status);
3520 if (call->fileid_ofs != 0) {
3521 size_t needed = call->fileid_ofs + 16;
3522 const uint8_t *body = SMBD_SMB2_IN_BODY_PTR(req);
3523 size_t body_size = SMBD_SMB2_IN_BODY_LEN(req);
3524 uint64_t file_id_persistent;
3525 uint64_t file_id_volatile;
3526 struct files_struct *fsp;
3528 SMB_ASSERT(call->need_tcon);
3530 if (needed > body_size) {
3531 return smbd_smb2_request_error(req,
3532 NT_STATUS_INVALID_PARAMETER);
3535 file_id_persistent = BVAL(body, call->fileid_ofs + 0);
3536 file_id_volatile = BVAL(body, call->fileid_ofs + 8);
3538 fsp = file_fsp_smb2(req, file_id_persistent, file_id_volatile);
3540 if (req->compound_related &&
3541 !NT_STATUS_IS_OK(req->compound_create_err))
3543 return smbd_smb2_request_error(req,
3544 req->compound_create_err);
3547 * smbd_smb2_request_process_ioctl()
3548 * has more checks in order to return more
3549 * detailed error codes...
3551 if (opcode != SMB2_OP_IOCTL) {
3552 return smbd_smb2_request_error(req,
3553 NT_STATUS_FILE_CLOSED);
3556 if (fsp->fsp_flags.encryption_required && !req->was_encrypted) {
3557 return smbd_smb2_request_error(req,
3558 NT_STATUS_ACCESS_DENIED);
3563 status = smbd_smb2_request_dispatch_update_counts(req, call->modify);
3564 if (!NT_STATUS_IS_OK(status)) {
3565 return smbd_smb2_request_error(req, status);
3568 if (call->as_root) {
3569 SMB_ASSERT(call->fileid_ofs == 0);
3570 /* This call needs to be run as root */
3571 change_to_root_user();
3572 } else if (opcode != SMB2_OP_KEEPALIVE) {
3573 SMB_ASSERT(call->need_tcon);
3576 #define _INBYTES(_r) \
3577 iov_buflen(SMBD_SMB2_IN_HDR_IOV(_r), SMBD_SMB2_NUM_IOV_PER_REQ-1)
3580 case SMB2_OP_NEGPROT:
3581 SMBPROFILE_IOBYTES_ASYNC_START(smb2_negprot, profile_p,
3582 req->profile, _INBYTES(req));
3583 return_value = smbd_smb2_request_process_negprot(req);
3586 case SMB2_OP_SESSSETUP:
3587 SMBPROFILE_IOBYTES_ASYNC_START(smb2_sesssetup, profile_p,
3588 req->profile, _INBYTES(req));
3589 return_value = smbd_smb2_request_process_sesssetup(req);
3592 case SMB2_OP_LOGOFF:
3593 SMBPROFILE_IOBYTES_ASYNC_START(smb2_logoff, profile_p,
3594 req->profile, _INBYTES(req));
3595 return_value = smbd_smb2_request_process_logoff(req);
3599 SMBPROFILE_IOBYTES_ASYNC_START(smb2_tcon, profile_p,
3600 req->profile, _INBYTES(req));
3601 return_value = smbd_smb2_request_process_tcon(req);
3605 SMBPROFILE_IOBYTES_ASYNC_START(smb2_tdis, profile_p,
3606 req->profile, _INBYTES(req));
3607 return_value = smbd_smb2_request_process_tdis(req);
3610 case SMB2_OP_CREATE:
3611 if (req->subreq == NULL) {
3612 SMBPROFILE_IOBYTES_ASYNC_START(smb2_create, profile_p,
3613 req->profile, _INBYTES(req));
3615 SMBPROFILE_IOBYTES_ASYNC_SET_BUSY(req->profile);
3617 return_value = smbd_smb2_request_process_create(req);
3621 SMBPROFILE_IOBYTES_ASYNC_START(smb2_close, profile_p,
3622 req->profile, _INBYTES(req));
3623 return_value = smbd_smb2_request_process_close(req);
3627 SMBPROFILE_IOBYTES_ASYNC_START(smb2_flush, profile_p,
3628 req->profile, _INBYTES(req));
3629 return_value = smbd_smb2_request_process_flush(req);
3633 SMBPROFILE_IOBYTES_ASYNC_START(smb2_read, profile_p,
3634 req->profile, _INBYTES(req));
3635 return_value = smbd_smb2_request_process_read(req);
3639 SMBPROFILE_IOBYTES_ASYNC_START(smb2_write, profile_p,
3640 req->profile, _INBYTES(req));
3641 return_value = smbd_smb2_request_process_write(req);
3645 SMBPROFILE_IOBYTES_ASYNC_START(smb2_lock, profile_p,
3646 req->profile, _INBYTES(req));
3647 return_value = smbd_smb2_request_process_lock(req);
3651 SMBPROFILE_IOBYTES_ASYNC_START(smb2_ioctl, profile_p,
3652 req->profile, _INBYTES(req));
3653 return_value = smbd_smb2_request_process_ioctl(req);
3656 case SMB2_OP_CANCEL:
3657 SMBPROFILE_IOBYTES_ASYNC_START(smb2_cancel, profile_p,
3658 req->profile, _INBYTES(req));
3659 return_value = smbd_smb2_request_process_cancel(req);
3660 SMBPROFILE_IOBYTES_ASYNC_END(req->profile, 0);
3663 * We don't need the request anymore cancel requests never
3666 * smbd_smb2_request_process_cancel() already called
3667 * DLIST_REMOVE(xconn->smb2.requests, req);
3673 case SMB2_OP_KEEPALIVE:
3674 SMBPROFILE_IOBYTES_ASYNC_START(smb2_keepalive, profile_p,
3675 req->profile, _INBYTES(req));
3676 return_value = smbd_smb2_request_process_keepalive(req);
3679 case SMB2_OP_QUERY_DIRECTORY:
3680 SMBPROFILE_IOBYTES_ASYNC_START(smb2_find, profile_p,
3681 req->profile, _INBYTES(req));
3682 return_value = smbd_smb2_request_process_query_directory(req);
3685 case SMB2_OP_NOTIFY:
3686 SMBPROFILE_IOBYTES_ASYNC_START(smb2_notify, profile_p,
3687 req->profile, _INBYTES(req));
3688 return_value = smbd_smb2_request_process_notify(req);
3691 case SMB2_OP_GETINFO:
3692 SMBPROFILE_IOBYTES_ASYNC_START(smb2_getinfo, profile_p,
3693 req->profile, _INBYTES(req));
3694 return_value = smbd_smb2_request_process_getinfo(req);
3697 case SMB2_OP_SETINFO:
3698 SMBPROFILE_IOBYTES_ASYNC_START(smb2_setinfo, profile_p,
3699 req->profile, _INBYTES(req));
3700 return_value = smbd_smb2_request_process_setinfo(req);
3704 SMBPROFILE_IOBYTES_ASYNC_START(smb2_break, profile_p,
3705 req->profile, _INBYTES(req));
3706 return_value = smbd_smb2_request_process_break(req);
3710 return_value = smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
3713 return return_value;
3716 static void smbd_smb2_request_reply_update_counts(struct smbd_smb2_request *req)
3718 struct smbXsrv_connection *xconn = req->xconn;
3719 const uint8_t *inhdr;
3720 uint16_t channel_sequence;
3721 struct smbXsrv_open *op;
3723 if (!req->request_counters_updated) {
3727 req->request_counters_updated = false;
3729 if (xconn->protocol < PROTOCOL_SMB3_00) {
3733 if (req->compat_chain_fsp == NULL) {
3737 op = req->compat_chain_fsp->op;
3742 inhdr = SMBD_SMB2_IN_HDR_PTR(req);
3743 channel_sequence = SVAL(inhdr, SMB2_HDR_CHANNEL_SEQUENCE);
3745 if ((op->global->channel_sequence == channel_sequence) &&
3746 (op->global->channel_generation == req->channel_generation)) {
3747 SMB_ASSERT(op->request_count > 0);
3748 op->request_count -= 1;
3750 SMB_ASSERT(op->pre_request_count > 0);
3751 op->pre_request_count -= 1;
3755 static NTSTATUS smbd_smb2_request_reply(struct smbd_smb2_request *req)
3757 struct smbXsrv_connection *xconn = req->xconn;
3759 struct iovec *firsttf = SMBD_SMB2_IDX_TF_IOV(req,out,first_idx);
3760 struct iovec *outhdr = SMBD_SMB2_OUT_HDR_IOV(req);
3761 struct iovec *outdyn = SMBD_SMB2_OUT_DYN_IOV(req);
3766 TALLOC_FREE(req->async_te);
3768 /* MS-SMB2: 3.3.4.1 Sending Any Outgoing Message */
3769 smbd_smb2_request_reply_update_counts(req);
3771 if (req->do_encryption &&
3772 (firsttf->iov_len == 0) &&
3773 (!smb2_signing_key_valid(req->first_enc_key)) &&
3774 (req->session != NULL) &&
3775 smb2_signing_key_valid(req->session->global->encryption_key))
3777 struct smb2_signing_key *encryption_key =
3778 req->session->global->encryption_key;
3780 uint64_t session_id = req->session->global->session_wire_id;
3781 uint64_t nonce_high;
3784 status = smb2_get_new_nonce(req->session,
3787 if (!NT_STATUS_IS_OK(status)) {
3792 * We need to place the SMB2_TRANSFORM header before the
3797 * we need to remember the encryption key
3798 * and defer the signing/encryption until
3799 * we are sure that we do not change
3802 status = smb2_signing_key_copy(req,
3804 &req->first_enc_key);
3805 if (!NT_STATUS_IS_OK(status)) {
3809 tf = talloc_zero_array(req, uint8_t,
3812 return NT_STATUS_NO_MEMORY;
3815 SIVAL(tf, SMB2_TF_PROTOCOL_ID, SMB2_TF_MAGIC);
3816 SBVAL(tf, SMB2_TF_NONCE+0, nonce_low);
3817 SBVAL(tf, SMB2_TF_NONCE+8, nonce_high);
3818 SBVAL(tf, SMB2_TF_SESSION_ID, session_id);
3820 firsttf->iov_base = (void *)tf;
3821 firsttf->iov_len = SMB2_TF_HDR_SIZE;
3824 if ((req->current_idx > SMBD_SMB2_NUM_IOV_PER_REQ) &&
3825 (smb2_signing_key_valid(req->last_sign_key)) &&
3826 (firsttf->iov_len == 0))
3828 int last_idx = req->current_idx - SMBD_SMB2_NUM_IOV_PER_REQ;
3829 struct iovec *lasthdr = SMBD_SMB2_IDX_HDR_IOV(req,out,last_idx);
3832 * As we are sure the header of the last request in the
3833 * compound chain will not change, we can to sign here
3834 * with the last signing key we remembered.
3836 status = smb2_signing_sign_pdu(req->last_sign_key,
3838 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3839 if (!NT_STATUS_IS_OK(status)) {
3843 TALLOC_FREE(req->last_sign_key);
3845 SMBPROFILE_IOBYTES_ASYNC_END(req->profile,
3846 iov_buflen(outhdr, SMBD_SMB2_NUM_IOV_PER_REQ-1));
3848 req->current_idx += SMBD_SMB2_NUM_IOV_PER_REQ;
3850 if (req->current_idx < req->out.vector_count) {
3852 * We must process the remaining compound
3853 * SMB2 requests before any new incoming SMB2
3854 * requests. This is because incoming SMB2
3855 * requests may include a cancel for a
3856 * compound request we haven't processed
3859 struct tevent_immediate *im = tevent_create_immediate(req);
3861 return NT_STATUS_NO_MEMORY;
3864 if (req->do_signing && firsttf->iov_len == 0) {
3865 struct smbXsrv_session *x = req->session;
3866 struct smb2_signing_key *signing_key =
3867 smbd_smb2_signing_key(x, xconn, NULL);
3870 * we need to remember the signing key
3871 * and defer the signing until
3872 * we are sure that we do not change
3875 status = smb2_signing_key_copy(req,
3877 &req->last_sign_key);
3878 if (!NT_STATUS_IS_OK(status)) {
3884 * smbd_smb2_request_dispatch() will redo the impersonation.
3885 * So we use req->xconn->client->raw_ev_ctx instead
3886 * of req->ev_ctx here.
3888 tevent_schedule_immediate(im,
3889 req->xconn->client->raw_ev_ctx,
3890 smbd_smb2_request_dispatch_immediate,
3892 return NT_STATUS_OK;
3895 if (req->compound_related) {
3896 req->compound_related = false;
3899 ok = smb2_setup_nbt_length(req->out.vector, req->out.vector_count);
3901 return NT_STATUS_INVALID_PARAMETER_MIX;
3904 /* Set credit for these operations (zero credits if this
3905 is a final reply for an async operation). */
3906 smb2_calculate_credits(req, req);
3909 * now check if we need to sign the current response
3911 if (firsttf->iov_len == SMB2_TF_HDR_SIZE) {
3912 status = smb2_signing_encrypt_pdu(req->first_enc_key,
3914 req->out.vector_count - first_idx);
3915 if (!NT_STATUS_IS_OK(status)) {
3918 } else if (req->do_signing) {
3919 struct smbXsrv_session *x = req->session;
3920 struct smb2_signing_key *signing_key =
3921 smbd_smb2_signing_key(x, xconn, NULL);
3923 status = smb2_signing_sign_pdu(signing_key,
3925 SMBD_SMB2_NUM_IOV_PER_REQ - 1);
3926 if (!NT_STATUS_IS_OK(status)) {
3930 TALLOC_FREE(req->first_enc_key);
3932 if (req->preauth != NULL) {
3933 gnutls_hash_hd_t hash_hnd = NULL;
3937 rc = gnutls_hash_init(&hash_hnd, GNUTLS_DIG_SHA512);
3939 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3941 rc = gnutls_hash(hash_hnd,
3942 req->preauth->sha512_value,
3943 sizeof(req->preauth->sha512_value));
3945 gnutls_hash_deinit(hash_hnd, NULL);
3946 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3948 for (i = 1; i < req->in.vector_count; i++) {
3949 rc = gnutls_hash(hash_hnd,
3950 req->in.vector[i].iov_base,
3951 req->in.vector[i].iov_len);
3953 gnutls_hash_deinit(hash_hnd, NULL);
3954 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3957 gnutls_hash_output(hash_hnd, req->preauth->sha512_value);
3959 rc = gnutls_hash(hash_hnd,
3960 req->preauth->sha512_value,
3961 sizeof(req->preauth->sha512_value));
3963 gnutls_hash_deinit(hash_hnd, NULL);
3964 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3966 for (i = 1; i < req->out.vector_count; i++) {
3967 rc = gnutls_hash(hash_hnd,
3968 req->out.vector[i].iov_base,
3969 req->out.vector[i].iov_len);
3971 gnutls_hash_deinit(hash_hnd, NULL);
3972 return gnutls_error_to_ntstatus(rc, NT_STATUS_HASH_NOT_SUPPORTED);
3976 gnutls_hash_deinit(hash_hnd, req->preauth->sha512_value);
3978 req->preauth = NULL;
3981 /* I am a sick, sick man... :-). Sendfile hack ... JRA. */
3982 if (req->out.vector_count < (2*SMBD_SMB2_NUM_IOV_PER_REQ) &&
3983 outdyn->iov_base == NULL && outdyn->iov_len != 0) {
3984 /* Dynamic part is NULL. Chop it off,
3985 We're going to send it via sendfile. */
3986 req->out.vector_count -= 1;
3990 * We're done with this request -
3991 * move it off the "being processed" queue.
3993 DLIST_REMOVE(xconn->smb2.requests, req);
3995 req->queue_entry.mem_ctx = req;
3996 req->queue_entry.vector = req->out.vector;
3997 req->queue_entry.count = req->out.vector_count;
3998 req->queue_entry.xconn = xconn;
3999 DLIST_ADD_END(xconn->smb2.send_queue, &req->queue_entry);
4000 xconn->smb2.send_queue_len++;
4002 status = smbd_smb2_flush_send_queue(xconn);
4003 if (!NT_STATUS_IS_OK(status)) {
4007 return NT_STATUS_OK;
4010 static NTSTATUS smbd_smb2_request_next_incoming(struct smbXsrv_connection *xconn);
4012 void smbd_smb2_request_dispatch_immediate(struct tevent_context *ctx,
4013 struct tevent_immediate *im,
4016 struct smbd_smb2_request *req = talloc_get_type_abort(private_data,
4017 struct smbd_smb2_request);
4018 struct smbXsrv_connection *xconn = req->xconn;
4023 if (DEBUGLEVEL >= 10) {
4024 DEBUG(10,("smbd_smb2_request_dispatch_immediate: idx[%d] of %d vectors\n",
4025 req->current_idx, req->in.vector_count));
4026 print_req_vectors(req);
4029 status = smbd_smb2_request_dispatch(req);
4030 if (!NT_STATUS_IS_OK(status)) {
4031 smbd_server_connection_terminate(xconn, nt_errstr(status));
4035 status = smbd_smb2_request_next_incoming(xconn);
4036 if (!NT_STATUS_IS_OK(status)) {
4037 smbd_server_connection_terminate(xconn, nt_errstr(status));
4042 NTSTATUS smbd_smb2_request_done_ex(struct smbd_smb2_request *req,
4044 DATA_BLOB body, DATA_BLOB *dyn,
4045 const char *location)
4048 struct iovec *outbody_v;
4049 struct iovec *outdyn_v;
4050 uint32_t next_command_ofs;
4053 outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
4054 mid = BVAL(outhdr, SMB2_HDR_MESSAGE_ID);
4056 DBG_DEBUG("mid [%"PRIu64"] idx[%d] status[%s] "
4057 "body[%u] dyn[%s:%u] at %s\n",
4061 (unsigned int)body.length,
4063 (unsigned int)(dyn ? dyn->length : 0),
4066 if (body.length < 2) {
4067 return smbd_smb2_request_error(req, NT_STATUS_INTERNAL_ERROR);
4070 if ((body.length % 2) != 0) {
4071 return smbd_smb2_request_error(req, NT_STATUS_INTERNAL_ERROR);
4074 outbody_v = SMBD_SMB2_OUT_BODY_IOV(req);
4075 outdyn_v = SMBD_SMB2_OUT_DYN_IOV(req);
4077 next_command_ofs = IVAL(outhdr, SMB2_HDR_NEXT_COMMAND);
4078 SIVAL(outhdr, SMB2_HDR_STATUS, NT_STATUS_V(status));
4080 outbody_v->iov_base = (void *)body.data;
4081 outbody_v->iov_len = body.length;
4084 outdyn_v->iov_base = (void *)dyn->data;
4085 outdyn_v->iov_len = dyn->length;
4087 outdyn_v->iov_base = NULL;
4088 outdyn_v->iov_len = 0;
4092 * See if we need to recalculate the offset to the next response
4094 * Note that all responses may require padding (including the very last
4097 if (req->out.vector_count >= (2 * SMBD_SMB2_NUM_IOV_PER_REQ)) {
4098 next_command_ofs = SMB2_HDR_BODY;
4099 next_command_ofs += SMBD_SMB2_OUT_BODY_LEN(req);
4100 next_command_ofs += SMBD_SMB2_OUT_DYN_LEN(req);
4103 if ((next_command_ofs % 8) != 0) {
4104 size_t pad_size = 8 - (next_command_ofs % 8);
4105 if (SMBD_SMB2_OUT_DYN_LEN(req) == 0) {
4107 * if the dyn buffer is empty
4108 * we can use it to add padding
4112 pad = talloc_zero_array(req,
4115 return smbd_smb2_request_error(req,
4116 NT_STATUS_NO_MEMORY);
4119 outdyn_v->iov_base = (void *)pad;
4120 outdyn_v->iov_len = pad_size;
4123 * For now we copy the dynamic buffer
4124 * and add the padding to the new buffer
4131 old_size = SMBD_SMB2_OUT_DYN_LEN(req);
4132 old_dyn = SMBD_SMB2_OUT_DYN_PTR(req);
4134 new_size = old_size + pad_size;
4135 new_dyn = talloc_zero_array(req,
4137 if (new_dyn == NULL) {
4138 return smbd_smb2_request_error(req,
4139 NT_STATUS_NO_MEMORY);
4142 memcpy(new_dyn, old_dyn, old_size);
4143 memset(new_dyn + old_size, 0, pad_size);
4145 outdyn_v->iov_base = (void *)new_dyn;
4146 outdyn_v->iov_len = new_size;
4148 next_command_ofs += pad_size;
4151 if ((req->current_idx + SMBD_SMB2_NUM_IOV_PER_REQ) >= req->out.vector_count) {
4152 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, 0);
4154 SIVAL(outhdr, SMB2_HDR_NEXT_COMMAND, next_command_ofs);
4156 return smbd_smb2_request_reply(req);
4159 NTSTATUS smbd_smb2_request_error_ex(struct smbd_smb2_request *req,
4161 uint8_t error_context_count,
4163 const char *location)
4165 struct smbXsrv_connection *xconn = req->xconn;
4168 uint8_t *outhdr = SMBD_SMB2_OUT_HDR_PTR(req);
4169 size_t unread_bytes = smbd_smb2_unread_bytes(req);
4171 DBG_NOTICE("smbd_smb2_request_error_ex: idx[%d] status[%s] |%s| "
4172 "at %s\n", req->current_idx, nt_errstr(status),
4173 info ? " +info" : "", location);
4176 /* Recvfile error. Drain incoming socket. */
4180 ret = drain_socket(xconn->transport.sock, unread_bytes);
4181 if (ret != unread_bytes) {
4185 error = NT_STATUS_IO_DEVICE_ERROR;
4187 error = map_nt_error_from_unix_common(errno);
4190 DEBUG(2, ("Failed to drain %u bytes from SMB2 socket: "
4191 "ret[%u] errno[%d] => %s\n",
4192 (unsigned)unread_bytes,
4193 (unsigned)ret, errno, nt_errstr(error)));
4198 body.data = outhdr + SMB2_HDR_BODY;
4200 SSVAL(body.data, 0, 9);
4201 SCVAL(body.data, 2, error_context_count);
4204 SIVAL(body.data, 0x04, info->length);
4206 /* Allocated size of req->out.vector[i].iov_base
4207 * *MUST BE* OUTVEC_ALLOC_SIZE. So we have room for
4208 * 1 byte without having to do an alloc.
4211 info->data = ((uint8_t *)outhdr) +
4212 OUTVEC_ALLOC_SIZE - 1;
4214 SCVAL(info->data, 0, 0);
4218 * Note: Even if there is an error, continue to process the request.
4222 return smbd_smb2_request_done_ex(req, status, body, info, __location__);
4225 struct smbd_smb2_break_state {
4226 struct tevent_req *req;
4227 struct smbd_smb2_send_queue queue_entry;
4228 uint8_t nbt_hdr[NBT_HDR_SIZE];
4229 uint8_t hdr[SMB2_HDR_BODY];
4230 struct iovec vector[1+SMBD_SMB2_NUM_IOV_PER_REQ];
4233 static struct tevent_req *smbd_smb2_break_send(TALLOC_CTX *mem_ctx,
4234 struct tevent_context *ev,
4235 struct smbXsrv_connection *xconn,
4236 uint64_t session_id,
4237 const uint8_t *body,
4240 struct tevent_req *req = NULL;
4241 struct smbd_smb2_break_state *state = NULL;
4245 req = tevent_req_create(mem_ctx, &state,
4246 struct smbd_smb2_break_state);
4252 tevent_req_defer_callback(req, ev);
4254 SIVAL(state->hdr, 0, SMB2_MAGIC);
4255 SSVAL(state->hdr, SMB2_HDR_LENGTH, SMB2_HDR_BODY);
4256 SSVAL(state->hdr, SMB2_HDR_EPOCH, 0);
4257 SIVAL(state->hdr, SMB2_HDR_STATUS, 0);
4258 SSVAL(state->hdr, SMB2_HDR_OPCODE, SMB2_OP_BREAK);
4259 SSVAL(state->hdr, SMB2_HDR_CREDIT, 0);
4260 SIVAL(state->hdr, SMB2_HDR_FLAGS, SMB2_HDR_FLAG_REDIRECT);
4261 SIVAL(state->hdr, SMB2_HDR_NEXT_COMMAND, 0);
4262 SBVAL(state->hdr, SMB2_HDR_MESSAGE_ID, UINT64_MAX);
4263 SIVAL(state->hdr, SMB2_HDR_PID, 0);
4264 SIVAL(state->hdr, SMB2_HDR_TID, 0);
4265 SBVAL(state->hdr, SMB2_HDR_SESSION_ID, session_id);
4266 memset(state->hdr+SMB2_HDR_SIGNATURE, 0, 16);
4268 state->vector[0] = (struct iovec) {
4269 .iov_base = state->nbt_hdr,
4270 .iov_len = sizeof(state->nbt_hdr)
4273 state->vector[1+SMBD_SMB2_TF_IOV_OFS] = (struct iovec) {
4278 state->vector[1+SMBD_SMB2_HDR_IOV_OFS] = (struct iovec) {
4279 .iov_base = state->hdr,
4280 .iov_len = sizeof(state->hdr)
4283 state->vector[1+SMBD_SMB2_BODY_IOV_OFS] = (struct iovec) {
4284 .iov_base = discard_const_p(uint8_t, body),
4285 .iov_len = body_len,
4289 * state->vector[1+SMBD_SMB2_DYN_IOV_OFS] is NULL by talloc_zero above
4292 ok = smb2_setup_nbt_length(state->vector,
4293 1 + SMBD_SMB2_NUM_IOV_PER_REQ);
4295 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER_MIX);
4296 return tevent_req_post(req, ev);
4300 * We require TCP acks for this PDU to the client!
4301 * We want 5 retransmissions and timeout when the
4302 * retransmission timeout (rto) passed 6 times.
4304 * required_acked_bytes gets a dummy value of
4305 * UINT64_MAX, as long it's in xconn->smb2.send_queue,
4306 * it'll get the real value when it's moved to
4309 * state->queue_entry.ack.req gets completed with
4310 * 1. tevent_req_done(), when all bytes are acked.
4311 * 2a. tevent_req_nterror(NT_STATUS_IO_TIMEOUT), when
4312 * the timeout expired before all bytes were acked.
4313 * 2b. tevent_req_nterror(transport_error), when the
4314 * connection got a disconnect from the kernel.
4316 state->queue_entry.ack.timeout =
4317 timeval_current_ofs_usec(xconn->ack.rto_usecs * 6);
4318 state->queue_entry.ack.required_acked_bytes = UINT64_MAX;
4319 state->queue_entry.ack.req = req;
4320 state->queue_entry.mem_ctx = state;
4321 state->queue_entry.vector = state->vector;
4322 state->queue_entry.count = ARRAY_SIZE(state->vector);
4323 state->queue_entry.xconn = xconn;
4324 DLIST_ADD_END(xconn->smb2.send_queue, &state->queue_entry);
4325 xconn->smb2.send_queue_len++;
4327 status = smbd_smb2_flush_send_queue(xconn);
4328 if (tevent_req_nterror(req, status)) {
4329 return tevent_req_post(req, ev);
4335 static NTSTATUS smbd_smb2_break_recv(struct tevent_req *req)
4337 return tevent_req_simple_recv_ntstatus(req);
4340 struct smbXsrv_pending_break {
4341 struct smbXsrv_pending_break *prev, *next;
4342 struct smbXsrv_client *client;
4343 bool disable_oplock_break_retries;
4344 uint64_t session_id;
4345 uint64_t last_channel_id;
4348 uint8_t oplock[0x18];
4349 uint8_t lease[0x2c];
4354 static void smbXsrv_pending_break_done(struct tevent_req *subreq);
4356 static struct smbXsrv_pending_break *smbXsrv_pending_break_create(
4357 struct smbXsrv_client *client,
4358 uint64_t session_id)
4360 struct smbXsrv_pending_break *pb = NULL;
4362 pb = talloc_zero(client, struct smbXsrv_pending_break);
4366 pb->client = client;
4367 pb->session_id = session_id;
4368 pb->disable_oplock_break_retries = lp_smb2_disable_oplock_break_retry();
4373 static NTSTATUS smbXsrv_pending_break_submit(struct smbXsrv_pending_break *pb);
4375 static NTSTATUS smbXsrv_pending_break_schedule(struct smbXsrv_pending_break *pb)
4377 struct smbXsrv_client *client = pb->client;
4380 DLIST_ADD_END(client->pending_breaks, pb);
4381 status = smbXsrv_client_pending_breaks_updated(client);
4382 if (!NT_STATUS_IS_OK(status)) {
4386 status = smbXsrv_pending_break_submit(pb);
4387 if (!NT_STATUS_IS_OK(status)) {
4391 return NT_STATUS_OK;
4394 static NTSTATUS smbXsrv_pending_break_submit(struct smbXsrv_pending_break *pb)
4396 struct smbXsrv_client *client = pb->client;
4397 struct smbXsrv_session *session = NULL;
4398 struct smbXsrv_connection *xconn = NULL;
4399 struct smbXsrv_connection *oplock_xconn = NULL;
4400 struct tevent_req *subreq = NULL;
4403 if (pb->session_id != 0) {
4404 status = get_valid_smbXsrv_session(client,
4407 if (NT_STATUS_EQUAL(status, NT_STATUS_USER_SESSION_DELETED)) {
4408 return NT_STATUS_ABANDONED;
4410 if (!NT_STATUS_IS_OK(status)) {
4414 if (pb->last_channel_id != 0) {
4416 * This is what current Windows servers
4417 * do, they don't retry on all available
4418 * channels. They only use the last channel.
4420 * But it doesn't match the specification in
4421 * [MS-SMB2] "3.3.4.6 Object Store Indicates an
4424 * Per default disable_oplock_break_retries is false
4425 * and we behave like the specification.
4427 if (pb->disable_oplock_break_retries) {
4428 return NT_STATUS_ABANDONED;
4433 for (xconn = client->connections; xconn != NULL; xconn = xconn->next) {
4434 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
4438 if (xconn->channel_id == 0) {
4440 * non-multichannel case
4445 if (session != NULL) {
4446 struct smbXsrv_channel_global0 *c = NULL;
4449 * Having a session means we're handling
4450 * an oplock break and we only need to
4451 * use channels available on the
4454 status = smbXsrv_session_find_channel(session, xconn, &c);
4455 if (!NT_STATUS_IS_OK(status)) {
4460 * This is what current Windows servers
4461 * do, they don't retry on all available
4462 * channels. They only use the last channel.
4464 * But it doesn't match the specification
4465 * in [MS-SMB2] "3.3.4.6 Object Store Indicates an
4468 * Per default disable_oplock_break_retries is false
4469 * and we behave like the specification.
4471 if (pb->disable_oplock_break_retries) {
4472 oplock_xconn = xconn;
4477 if (xconn->channel_id > pb->last_channel_id) {
4485 if (xconn == NULL) {
4486 xconn = oplock_xconn;
4489 if (xconn == NULL) {
4491 * If there's no remaining connection available
4492 * tell the caller to stop...
4494 return NT_STATUS_ABANDONED;
4497 pb->last_channel_id = xconn->channel_id;
4499 subreq = smbd_smb2_break_send(pb,
4505 if (subreq == NULL) {
4506 return NT_STATUS_NO_MEMORY;
4508 tevent_req_set_callback(subreq,
4509 smbXsrv_pending_break_done,
4512 return NT_STATUS_OK;
4515 static void smbXsrv_pending_break_done(struct tevent_req *subreq)
4517 struct smbXsrv_pending_break *pb =
4518 tevent_req_callback_data(subreq,
4519 struct smbXsrv_pending_break);
4520 struct smbXsrv_client *client = pb->client;
4523 status = smbd_smb2_break_recv(subreq);
4524 TALLOC_FREE(subreq);
4525 if (!NT_STATUS_IS_OK(status)) {
4526 status = smbXsrv_pending_break_submit(pb);
4527 if (NT_STATUS_EQUAL(status, NT_STATUS_ABANDONED)) {
4529 * If there's no remaining connection
4530 * there's no need to send a break again.
4534 if (!NT_STATUS_IS_OK(status)) {
4535 smbd_server_disconnect_client(client, nt_errstr(status));
4542 DLIST_REMOVE(client->pending_breaks, pb);
4545 status = smbXsrv_client_pending_breaks_updated(client);
4546 if (!NT_STATUS_IS_OK(status)) {
4547 smbd_server_disconnect_client(client, nt_errstr(status));
4552 NTSTATUS smbd_smb2_send_oplock_break(struct smbXsrv_client *client,
4553 struct smbXsrv_open *op,
4554 uint8_t oplock_level)
4556 struct smbXsrv_pending_break *pb = NULL;
4557 uint8_t *body = NULL;
4559 pb = smbXsrv_pending_break_create(client,
4562 return NT_STATUS_NO_MEMORY;
4564 pb->body_len = sizeof(pb->body.oplock);
4565 body = pb->body.oplock;
4567 SSVAL(body, 0x00, pb->body_len);
4568 SCVAL(body, 0x02, oplock_level);
4569 SCVAL(body, 0x03, 0); /* reserved */
4570 SIVAL(body, 0x04, 0); /* reserved */
4571 SBVAL(body, 0x08, op->global->open_persistent_id);
4572 SBVAL(body, 0x10, op->global->open_volatile_id);
4574 return smbXsrv_pending_break_schedule(pb);
4577 NTSTATUS smbd_smb2_send_lease_break(struct smbXsrv_client *client,
4579 uint32_t lease_flags,
4580 struct smb2_lease_key *lease_key,
4581 uint32_t current_lease_state,
4582 uint32_t new_lease_state)
4584 struct smbXsrv_pending_break *pb = NULL;
4585 uint8_t *body = NULL;
4587 pb = smbXsrv_pending_break_create(client,
4588 0); /* no session_id */
4590 return NT_STATUS_NO_MEMORY;
4592 pb->body_len = sizeof(pb->body.lease);
4593 body = pb->body.lease;
4595 SSVAL(body, 0x00, pb->body_len);
4596 SSVAL(body, 0x02, new_epoch);
4597 SIVAL(body, 0x04, lease_flags);
4598 SBVAL(body, 0x08, lease_key->data[0]);
4599 SBVAL(body, 0x10, lease_key->data[1]);
4600 SIVAL(body, 0x18, current_lease_state);
4601 SIVAL(body, 0x1c, new_lease_state);
4602 SIVAL(body, 0x20, 0); /* BreakReason, MUST be 0 */
4603 SIVAL(body, 0x24, 0); /* AccessMaskHint, MUST be 0 */
4604 SIVAL(body, 0x28, 0); /* ShareMaskHint, MUST be 0 */
4606 return smbXsrv_pending_break_schedule(pb);
4609 static bool is_smb2_recvfile_write(struct smbd_smb2_request_read_state *state)
4613 uint64_t file_id_persistent;
4614 uint64_t file_id_volatile;
4615 struct smbXsrv_open *op = NULL;
4616 struct files_struct *fsp = NULL;
4617 const uint8_t *body = NULL;
4620 * This is only called with a pktbuf
4621 * of at least SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN
4625 if (IVAL(state->pktbuf, 0) == SMB2_TF_MAGIC) {
4626 /* Transform header. Cannot recvfile. */
4629 if (IVAL(state->pktbuf, 0) != SMB2_MAGIC) {
4630 /* Not SMB2. Normal error path will cope. */
4633 if (SVAL(state->pktbuf, 4) != SMB2_HDR_BODY) {
4634 /* Not SMB2. Normal error path will cope. */
4637 if (SVAL(state->pktbuf, SMB2_HDR_OPCODE) != SMB2_OP_WRITE) {
4638 /* Needs to be a WRITE. */
4641 if (IVAL(state->pktbuf, SMB2_HDR_NEXT_COMMAND) != 0) {
4642 /* Chained. Cannot recvfile. */
4645 flags = IVAL(state->pktbuf, SMB2_HDR_FLAGS);
4646 if (flags & SMB2_HDR_FLAG_CHAINED) {
4647 /* Chained. Cannot recvfile. */
4650 if (flags & SMB2_HDR_FLAG_SIGNED) {
4651 /* Signed. Cannot recvfile. */
4655 body = &state->pktbuf[SMB2_HDR_BODY];
4657 file_id_persistent = BVAL(body, 0x10);
4658 file_id_volatile = BVAL(body, 0x18);
4660 status = smb2srv_open_lookup(state->req->xconn,
4665 if (!NT_STATUS_IS_OK(status)) {
4673 if (fsp->conn == NULL) {
4677 if (IS_IPC(fsp->conn)) {
4680 if (IS_PRINT(fsp->conn)) {
4683 if (fsp_is_alternate_stream(fsp)) {
4687 DEBUG(10,("Doing recvfile write len = %u\n",
4688 (unsigned int)(state->pktfull - state->pktlen)));
4693 static void smbd_smb2_noop_submission_io_uring(struct samba_io_uring *ring,
4694 struct samba_io_uring_submission *submission,
4695 void *submission_private)
4699 static void smbd_smb2_recv_completion_io_uring(struct samba_io_uring_completion *completion,
4700 void *completion_private,
4701 const struct io_uring_cqe *cqe);
4703 static NTSTATUS smbd_smb2_request_next_uring_recvmsg(struct smbXsrv_connection *xconn)
4705 struct smbd_smb2_request_read_state *state = xconn->smb2.request_read_state;
4706 struct samba_io_uring_submission *submission = NULL;
4708 ZERO_STRUCT(state->io_uring);
4710 if (state->count > 0) {
4711 unsigned recvmsg_flags = MSG_WAITALL;
4712 struct io_uring_sqe *sqe = NULL;
4715 blen = iov_buflen(state->vector, state->count);
4716 SMB_ASSERT(blen >= 0);
4718 state->msg = (struct msghdr) {
4719 .msg_iov = state->vector,
4720 .msg_iovlen = state->count,
4723 samba_io_uring_completion_prepare(&state->io_uring.completion,
4724 smbd_smb2_recv_completion_io_uring,
4726 samba_io_uring_submission_prepare(&state->io_uring.submission,
4727 smbd_smb2_noop_submission_io_uring,
4729 &state->io_uring.completion);
4730 sqe = samba_io_uring_submission_sqe(&state->io_uring.submission);
4731 io_uring_prep_recvmsg(sqe,
4732 xconn->transport.sock,
4735 if (blen >= xconn->smb2.recv_io_uring.async_threshhold) {
4736 sqe->flags |= IOSQE_ASYNC;
4739 submission = &state->io_uring.submission;
4741 smb_panic(__location__);
4744 samba_io_uring_queue_submissions(xconn->smb2.recv_io_uring.uring,
4747 return NT_STATUS_OK;
4750 static NTSTATUS smbd_smb2_advance_incoming(struct smbXsrv_connection *xconn, size_t n);
4752 static void smbd_smb2_recv_completion_io_uring(struct samba_io_uring_completion *completion,
4753 void *completion_private,
4754 const struct io_uring_cqe *cqe)
4756 struct smbXsrv_connection *xconn =
4757 talloc_get_type_abort(completion_private,
4758 struct smbXsrv_connection);
4759 NTSTATUS status = NT_STATUS_OK;
4762 samba_io_uring_completion_assert_unused(completion);
4766 status = NT_STATUS_OK;
4767 } else if (ret == 0) {
4768 /* propagate end of file */
4769 status = NT_STATUS_END_OF_FILE;
4770 } else if (ret < 0) {
4771 status = map_nt_error_from_unix_common(-ret);
4773 if (!NT_STATUS_IS_OK(status)) {
4774 smbd_server_connection_terminate(xconn, nt_errstr(status));
4778 status = smbd_smb2_advance_incoming(xconn, ret);
4779 if (NT_STATUS_IS_OK(status)) {
4782 if (NT_STATUS_EQUAL(status, NT_STATUS_PENDING)) {
4783 status = NT_STATUS_RETRY;
4785 if (!NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
4786 smbd_server_connection_terminate(xconn, nt_errstr(status));
4791 * we need to receive more data.
4793 status = smbd_smb2_request_next_uring_recvmsg(xconn);
4794 if (!NT_STATUS_IS_OK(status)) {
4795 smbd_server_connection_terminate(xconn, nt_errstr(status));
4802 static NTSTATUS smbd_smb2_request_next_incoming(struct smbXsrv_connection *xconn)
4804 struct smbd_smb2_request_read_state *state = xconn->smb2.request_read_state;
4805 struct smbd_smb2_request *req = NULL;
4806 size_t max_send_queue_len;
4807 size_t cur_send_queue_len;
4809 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
4811 * we're not supposed to do any io
4813 return NT_STATUS_OK;
4816 if (state->req != NULL) {
4818 * if there is already a tstream_readv_pdu
4819 * pending, we are done.
4821 return NT_STATUS_OK;
4824 max_send_queue_len = MAX(1, xconn->smb2.credits.max/16);
4825 cur_send_queue_len = xconn->smb2.send_queue_len;
4827 if (cur_send_queue_len > max_send_queue_len) {
4829 * if we have a lot of requests to send,
4830 * we wait until they are on the wire until we
4831 * ask for the next request.
4833 return NT_STATUS_OK;
4836 /* ask for the next request */
4837 req = smbd_smb2_request_allocate(xconn);
4839 return NT_STATUS_NO_MEMORY;
4841 *state = (struct smbd_smb2_request_read_state) {
4843 .min_recv_size = lp_min_receive_file_size(),
4845 [0] = (struct iovec) {
4846 .iov_base = (void *)state->hdr.nbt,
4847 .iov_len = NBT_HDR_SIZE,
4850 .vector = state->_vector,
4854 if (xconn->smb2.recv_io_uring.uring != NULL) {
4855 return smbd_smb2_request_next_uring_recvmsg(xconn);
4858 TEVENT_FD_READABLE(xconn->transport.fde);
4860 return NT_STATUS_OK;
4863 NTSTATUS smbd_smb2_process_negprot(struct smbXsrv_connection *xconn,
4864 uint64_t expected_seq_low,
4865 const uint8_t *inpdu, size_t size)
4867 struct smbd_server_connection *sconn = xconn->client->sconn;
4869 struct smbd_smb2_request *req = NULL;
4871 DEBUG(10,("smbd_smb2_first_negprot: packet length %u\n",
4872 (unsigned int)size));
4874 status = smbd_initialize_smb2(xconn, expected_seq_low);
4875 if (!NT_STATUS_IS_OK(status)) {
4876 smbd_server_connection_terminate(xconn, nt_errstr(status));
4881 * If a new connection joins the process, when we're
4882 * already in a "pending break cycle", we need to
4883 * turn on the ack checker on the new connection.
4885 status = smbXsrv_client_pending_breaks_updated(xconn->client);
4886 if (!NT_STATUS_IS_OK(status)) {
4888 * If there's a problem, we disconnect the whole
4889 * client with all connections here!
4891 * Instead of just the new connection.
4893 smbd_server_disconnect_client(xconn->client, nt_errstr(status));
4897 status = smbd_smb2_request_create(xconn, inpdu, size, &req);
4898 if (!NT_STATUS_IS_OK(status)) {
4899 smbd_server_connection_terminate(xconn, nt_errstr(status));
4903 status = smbd_smb2_request_validate(req);
4904 if (!NT_STATUS_IS_OK(status)) {
4905 smbd_server_connection_terminate(xconn, nt_errstr(status));
4909 status = smbd_smb2_request_setup_out(req);
4910 if (!NT_STATUS_IS_OK(status)) {
4911 smbd_server_connection_terminate(xconn, nt_errstr(status));
4917 * this was already counted at the SMB1 layer =>
4918 * smbd_smb2_request_dispatch() should not count it twice.
4920 if (profile_p->values.request_stats.count > 0) {
4921 profile_p->values.request_stats.count--;
4924 status = smbd_smb2_request_dispatch(req);
4925 if (!NT_STATUS_IS_OK(status)) {
4926 smbd_server_connection_terminate(xconn, nt_errstr(status));
4930 status = smbd_smb2_request_next_incoming(xconn);
4931 if (!NT_STATUS_IS_OK(status)) {
4932 smbd_server_connection_terminate(xconn, nt_errstr(status));
4936 sconn->num_requests++;
4937 return NT_STATUS_OK;
4940 static int socket_error_from_errno(int ret,
4954 if (sys_errno == 0) {
4958 if (sys_errno == EINTR) {
4963 if (sys_errno == EINPROGRESS) {
4968 if (sys_errno == EAGAIN) {
4973 /* ENOMEM is retryable on Solaris/illumos, and possibly other systems. */
4974 if (sys_errno == ENOMEM) {
4980 #if EWOULDBLOCK != EAGAIN
4981 if (sys_errno == EWOULDBLOCK) {
4991 static NTSTATUS smbd_smb2_advance_send_queue(struct smbXsrv_connection *xconn,
4992 struct smbd_smb2_send_queue **_e,
4995 struct smbd_smb2_send_queue *e = *_e;
4998 xconn->ack.unacked_bytes += n;
5000 DBG_DEBUG("n[%zu] e[%p]->io_uring.pending_snd[%u] e->io_uring.pending_zc[%zu]\n",
5001 n, e, e->io_uring.pending_snd, e->io_uring.pending_zc);
5002 debug_iovec(e->vector, e->count);
5005 ok = iov_advance(&e->vector, &e->count, n);
5007 return NT_STATUS_INTERNAL_ERROR;
5009 } else if (e->vfs_io_size > 0) {
5010 if (n > e->vfs_io_size) {
5011 return NT_STATUS_INTERNAL_ERROR;
5013 e->vfs_io_size -= n;
5017 return NT_STATUS_RETRY;
5018 } else if (e->vfs_io_size > 0) {
5019 return NT_STATUS_RETRY;
5022 xconn->smb2.send_queue_len--;
5023 DLIST_REMOVE(xconn->smb2.send_queue, e);
5025 if (e->ack.req == NULL) {
5027 talloc_free(e->mem_ctx);
5028 return NT_STATUS_OK;
5031 e->ack.required_acked_bytes = xconn->ack.unacked_bytes;
5032 DLIST_ADD_END(xconn->ack.queue, e);
5034 return NT_STATUS_OK;
5037 static NTSTATUS smbd_smb2_flush_with_sendmsg(struct smbXsrv_connection *xconn)
5044 if (xconn->smb2.send_queue == NULL) {
5045 TEVENT_FD_NOT_WRITEABLE(xconn->transport.fde);
5046 return NT_STATUS_OK;
5049 while (xconn->smb2.send_queue != NULL) {
5050 struct smbd_smb2_send_queue *e = xconn->smb2.send_queue;
5051 unsigned sendmsg_flags = 0;
5053 if (e->sendfile_header != NULL) {
5058 status = NT_STATUS_INTERNAL_ERROR;
5060 for (i=0; i < e->count; i++) {
5061 size += e->vector[i].iov_len;
5064 if (size <= e->sendfile_header->length) {
5065 buf = e->sendfile_header->data;
5067 buf = talloc_array(e->mem_ctx, uint8_t, size);
5069 return NT_STATUS_NO_MEMORY;
5074 for (i=0; i < e->count; i++) {
5076 e->vector[i].iov_base,
5077 e->vector[i].iov_len);
5078 size += e->vector[i].iov_len;
5081 e->sendfile_header->data = buf;
5082 e->sendfile_header->length = size;
5083 e->sendfile_status = &status;
5086 xconn->smb2.send_queue_len--;
5087 DLIST_REMOVE(xconn->smb2.send_queue, e);
5089 size += e->sendfile_body_size;
5092 * This triggers the sendfile path via
5095 talloc_free(e->mem_ctx);
5097 if (!NT_STATUS_IS_OK(status)) {
5098 smbXsrv_connection_disconnect_transport(xconn,
5102 xconn->ack.unacked_bytes += size;
5106 e->msg = (struct msghdr) {
5107 .msg_iov = e->vector,
5108 .msg_iovlen = e->count,
5112 sendmsg_flags |= MSG_NOSIGNAL;
5115 sendmsg_flags |= MSG_DONTWAIT;
5118 ret = sendmsg(xconn->transport.sock, &e->msg, sendmsg_flags);
5120 /* propagate end of file */
5121 return NT_STATUS_INTERNAL_ERROR;
5123 err = socket_error_from_errno(ret, errno, &retry);
5126 TEVENT_FD_WRITEABLE(xconn->transport.fde);
5127 return NT_STATUS_OK;
5130 status = map_nt_error_from_unix_common(err);
5131 smbXsrv_connection_disconnect_transport(xconn,
5136 status = smbd_smb2_advance_send_queue(xconn, &e, ret);
5137 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
5139 TEVENT_FD_WRITEABLE(xconn->transport.fde);
5140 return NT_STATUS_OK;
5142 if (!NT_STATUS_IS_OK(status)) {
5143 smbXsrv_connection_disconnect_transport(xconn,
5149 return NT_STATUS_MORE_PROCESSING_REQUIRED;
5152 static void smbd_smb2_flush_completion_io_uring(struct samba_io_uring_completion *completion,
5153 void *completion_private,
5154 const struct io_uring_cqe *cqe);
5156 static NTSTATUS smbd_smb2_flush_with_io_uring(struct smbXsrv_connection *xconn)
5158 if (xconn->smb2.send_queue != NULL) {
5159 struct smbd_smb2_send_queue *e = xconn->smb2.send_queue;
5160 struct samba_io_uring_submission *submission = NULL;
5162 SMB_ASSERT(e->sendfile_header == NULL);
5164 if (e->io_uring.pending_snd) {
5166 * This is already in progress
5168 return NT_STATUS_OK;
5172 unsigned sendmsg_flags = MSG_WAITALL;
5173 struct io_uring_sqe *sqe = NULL;
5176 if (e->vfs_io_size > 0) {
5177 sendmsg_flags |= MSG_MORE;
5180 e->msg = (struct msghdr) {
5181 .msg_iov = e->vector,
5182 .msg_iovlen = e->count,
5185 blen = iov_buflen(e->vector, e->count);
5186 SMB_ASSERT(blen >= 0);
5188 samba_io_uring_completion_prepare(&e->io_uring.completion,
5189 smbd_smb2_flush_completion_io_uring,
5191 samba_io_uring_completion_bypass_epoll(&e->io_uring.completion,
5193 samba_io_uring_submission_prepare(&e->io_uring.submission,
5194 smbd_smb2_noop_submission_io_uring,
5196 &e->io_uring.completion);
5197 sqe = samba_io_uring_submission_sqe(&e->io_uring.submission);
5198 io_uring_prep_sendmsg(sqe,
5199 xconn->transport.sock,
5202 if (blen >= xconn->smb2.send_io_uring.async_threshhold) {
5203 sqe->flags |= IOSQE_ASYNC;
5206 e->io_uring.pending_snd = true;
5207 submission = &e->io_uring.submission;
5208 } else if (e->vfs_io_size > 0) {
5209 struct samba_io_uring_qe *splice_qe = NULL;
5210 int io_output_fd = smb_vfs_io_output_fd(e->vfs_io);
5212 splice_qe = &e->io_uring.qes[e->io_uring.num_qes++];
5213 io_uring_prep_splice(&splice_qe->sqe,
5215 xconn->transport.sock, -1,
5219 * Note splice always runs in an async helper thread
5221 splice_qe->private_data = e;
5222 splice_qe->submission_fn = smbd_smb2_noop_submission_io_uring;
5223 splice_qe->completion_fn = smbd_smb2_flush_completion_io_uring;
5225 smb_panic(__location__);
5228 samba_io_uring_queue_submissions(xconn->smb2.send_io_uring.uring,
5230 return NT_STATUS_OK;
5233 return NT_STATUS_MORE_PROCESSING_REQUIRED;
5236 static void smbd_smb2_flush_completion_io_uring(struct samba_io_uring_completion *completion,
5237 void *completion_private,
5238 const struct io_uring_cqe *cqe)
5240 struct smbd_smb2_send_queue *e =
5241 (struct smbd_smb2_send_queue *)completion_private;
5242 struct smbXsrv_connection *xconn = e->xconn;
5243 NTSTATUS status = NT_STATUS_OK;
5246 SMB_ASSERT(e->io_uring.pending_snd);
5247 e->io_uring.pending_snd = false;
5251 status = NT_STATUS_OK;
5252 } else if (ret == 0) {
5253 /* propagate end of file */
5254 status = NT_STATUS_INTERNAL_ERROR;
5255 } else if (ret < 0) {
5256 status = map_nt_error_from_unix_common(-ret);
5258 if (!NT_STATUS_IS_OK(status)) {
5259 smbd_server_connection_terminate(xconn, nt_errstr(status));
5263 status = smbd_smb2_advance_send_queue(xconn, &e, ret);
5264 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
5267 if (!NT_STATUS_IS_OK(status)) {
5268 smbd_server_connection_terminate(xconn, nt_errstr(status));
5273 * Unlike smbd_smb2_flush_with_sendmsg()
5274 * smbd_smb2_flush_with_io_uring() doesn't
5275 * loop over all entries in the queue,
5276 * so we needs to continue here instead.
5279 status = smbd_smb2_flush_send_queue(xconn);
5280 if (NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
5283 if (!NT_STATUS_IS_OK(status)) {
5284 smbd_server_connection_terminate(xconn, nt_errstr(status));
5289 static NTSTATUS smbd_smb2_flush_send_queue(struct smbXsrv_connection *xconn)
5293 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
5295 * we're not supposed to do any io
5296 * just flush all pending stuff.
5298 while (xconn->smb2.send_queue != NULL) {
5299 struct smbd_smb2_send_queue *e = xconn->smb2.send_queue;
5301 xconn->smb2.send_queue_len--;
5302 DLIST_REMOVE(xconn->smb2.send_queue, e);
5305 * It's important to avoid TALLOC_FREE()
5306 * as the destructor may want to keep
5307 * the memory arround
5309 talloc_free(e->mem_ctx);
5312 return NT_STATUS_OK;
5315 if (xconn->smb2.send_io_uring.uring != NULL) {
5316 status = smbd_smb2_flush_with_io_uring(xconn);
5318 status = smbd_smb2_flush_with_sendmsg(xconn);
5320 if (!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
5325 * Restart reads if we were blocked on
5326 * draining the send queue.
5329 status = smbd_smb2_request_next_incoming(xconn);
5330 if (!NT_STATUS_IS_OK(status)) {
5334 return NT_STATUS_OK;
5337 static NTSTATUS smbd_smb2_advance_incoming(struct smbXsrv_connection *xconn, size_t n)
5339 struct smbd_server_connection *sconn = xconn->client->sconn;
5340 struct smbd_smb2_request_read_state *state = xconn->smb2.request_read_state;
5341 struct smbd_smb2_request *req = NULL;
5342 size_t min_recvfile_size = UINT32_MAX;
5347 ok = iov_advance(&state->vector, &state->count, n);
5349 return NT_STATUS_INTERNAL_ERROR;
5352 if (state->count > 0) {
5353 return NT_STATUS_PENDING;
5356 if (state->pktlen > 0) {
5357 if (!state->doing_receivefile) {
5359 * we have all the data.
5364 if (!is_smb2_recvfile_write(state)) {
5365 size_t ofs = state->pktlen;
5368 * Not a possible receivefile write.
5369 * Read the rest of the data.
5371 state->doing_receivefile = false;
5373 state->pktbuf = talloc_realloc(state->req,
5377 if (state->pktbuf == NULL) {
5378 return NT_STATUS_NO_MEMORY;
5381 state->_vector[0] = (struct iovec) {
5382 .iov_base = (void *)(state->pktbuf + ofs),
5383 .iov_len = (state->pktfull - ofs),
5385 state->vector = state->_vector;
5388 state->pktlen = state->pktfull;
5389 return NT_STATUS_RETRY;
5393 * This is a receivefile write so we've
5394 * done a short read.
5400 * Now we analyze the NBT header
5402 if (state->hdr.nbt[0] != 0x00) {
5403 state->min_recv_size = 0;
5405 state->pktfull = smb2_len(state->hdr.nbt);
5406 if (state->pktfull == 0) {
5410 if (state->min_recv_size != 0) {
5411 min_recvfile_size = SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN;
5412 min_recvfile_size += state->min_recv_size;
5415 if (state->pktfull > min_recvfile_size) {
5417 * Might be a receivefile write. Read the SMB2 HEADER +
5418 * SMB2_WRITE header first. Set 'doing_receivefile'
5419 * as we're *attempting* receivefile write. If this
5420 * turns out not to be a SMB2_WRITE request or otherwise
5421 * not suitable then we'll just read the rest of the data
5422 * the next time this function is called.
5424 state->pktlen = SMBD_SMB2_SHORT_RECEIVEFILE_WRITE_LEN;
5425 state->doing_receivefile = true;
5427 state->pktlen = state->pktfull;
5430 state->pktbuf = talloc_array(state->req, uint8_t, state->pktlen);
5431 if (state->pktbuf == NULL) {
5432 return NT_STATUS_NO_MEMORY;
5435 state->_vector[0] = (struct iovec) {
5436 .iov_base = (void *)state->pktbuf,
5437 .iov_len = state->pktlen,
5439 state->vector = state->_vector;
5442 return NT_STATUS_RETRY;
5446 if (state->hdr.nbt[0] != 0x00) {
5447 DEBUG(1,("ignore NBT[0x%02X] msg\n",
5448 state->hdr.nbt[0]));
5451 *state = (struct smbd_smb2_request_read_state) {
5453 .min_recv_size = lp_min_receive_file_size(),
5455 [0] = (struct iovec) {
5456 .iov_base = (void *)state->hdr.nbt,
5457 .iov_len = NBT_HDR_SIZE,
5460 .vector = state->_vector,
5463 return NT_STATUS_RETRY;
5468 req->request_time = timeval_current();
5469 now = timeval_to_nttime(&req->request_time);
5471 status = smbd_smb2_inbuf_parse_compound(xconn,
5477 &req->in.vector_count);
5478 if (!NT_STATUS_IS_OK(status)) {
5482 if (state->doing_receivefile) {
5483 req->smb1req = talloc_zero(req, struct smb_request);
5484 if (req->smb1req == NULL) {
5485 return NT_STATUS_NO_MEMORY;
5487 req->smb1req->unread_bytes = state->pktfull - state->pktlen;
5490 *state = (struct smbd_smb2_request_read_state) {
5494 req->current_idx = 1;
5496 DEBUG(10,("smbd_smb2_request idx[%d] of %d vectors\n",
5497 req->current_idx, req->in.vector_count));
5499 status = smbd_smb2_request_validate(req);
5500 if (!NT_STATUS_IS_OK(status)) {
5504 status = smbd_smb2_request_setup_out(req);
5505 if (!NT_STATUS_IS_OK(status)) {
5509 status = smbd_smb2_request_dispatch(req);
5510 if (!NT_STATUS_IS_OK(status)) {
5514 sconn->num_requests++;
5516 /* The timeout_processing function isn't run nearly
5517 often enough to implement 'max log size' without
5518 overrunning the size of the file by many megabytes.
5519 This is especially true if we are running at debug
5520 level 10. Checking every 50 SMB2s is a nice
5521 tradeoff of performance vs log file size overrun. */
5523 if ((sconn->num_requests % 50) == 0 &&
5524 need_to_check_log_size()) {
5525 change_to_root_user();
5529 status = smbd_smb2_request_next_incoming(xconn);
5530 if (!NT_STATUS_IS_OK(status)) {
5534 return NT_STATUS_OK;
5537 static NTSTATUS smbd_smb2_io_handler(struct smbXsrv_connection *xconn,
5540 struct smbd_smb2_request_read_state *state = xconn->smb2.request_read_state;
5541 unsigned recvmsg_flags = 0;
5547 if (!NT_STATUS_IS_OK(xconn->transport.status)) {
5549 * we're not supposed to do any io
5551 TEVENT_FD_NOT_READABLE(xconn->transport.fde);
5552 TEVENT_FD_NOT_WRITEABLE(xconn->transport.fde);
5553 TEVENT_FD_NOT_WANTERROR(xconn->transport.fde);
5554 return NT_STATUS_OK;
5557 if (fde_flags & TEVENT_FD_ERROR) {
5558 ret = samba_socket_poll_or_sock_error(xconn->transport.sock);
5561 status = map_nt_error_from_unix_common(err);
5562 smbXsrv_connection_disconnect_transport(xconn,
5566 /* This should not happen */
5567 status = NT_STATUS_REMOTE_DISCONNECT;
5568 smbXsrv_connection_disconnect_transport(xconn,
5573 if (fde_flags & TEVENT_FD_WRITE) {
5574 status = smbd_smb2_flush_send_queue(xconn);
5575 if (!NT_STATUS_IS_OK(status)) {
5580 if (!(fde_flags & TEVENT_FD_READ)) {
5581 return NT_STATUS_OK;
5584 if (state->req == NULL) {
5585 TEVENT_FD_NOT_READABLE(xconn->transport.fde);
5586 return NT_STATUS_OK;
5589 if (xconn->smb2.recv_io_uring.uring != NULL) {
5590 TEVENT_FD_NOT_READABLE(xconn->transport.fde);
5591 return NT_STATUS_OK;
5596 SMB_ASSERT(state->count != 0);
5598 state->msg = (struct msghdr) {
5599 .msg_iov = state->vector,
5600 .msg_iovlen = state->count,
5604 recvmsg_flags |= MSG_NOSIGNAL;
5607 recvmsg_flags |= MSG_DONTWAIT;
5610 ret = recvmsg(xconn->transport.sock, &state->msg, recvmsg_flags);
5612 /* propagate end of file */
5613 status = NT_STATUS_END_OF_FILE;
5614 smbXsrv_connection_disconnect_transport(xconn,
5618 err = socket_error_from_errno(ret, errno, &retry);
5621 TEVENT_FD_READABLE(xconn->transport.fde);
5622 return NT_STATUS_OK;
5625 status = map_nt_error_from_unix_common(err);
5626 smbXsrv_connection_disconnect_transport(xconn,
5631 status = smbd_smb2_advance_incoming(xconn, ret);
5632 if (NT_STATUS_EQUAL(status, NT_STATUS_PENDING)) {
5633 /* we have more to read */
5634 TEVENT_FD_READABLE(xconn->transport.fde);
5635 return NT_STATUS_OK;
5637 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
5639 * smbd_smb2_advance_incoming setup a new vector
5640 * that we should try to read immediately.
5644 if (!NT_STATUS_IS_OK(status)) {
5648 return NT_STATUS_OK;
5651 static void smbd_smb2_connection_handler(struct tevent_context *ev,
5652 struct tevent_fd *fde,
5656 struct smbXsrv_connection *xconn =
5657 talloc_get_type_abort(private_data,
5658 struct smbXsrv_connection);
5661 status = smbd_smb2_io_handler(xconn, flags);
5662 if (!NT_STATUS_IS_OK(status)) {
5663 smbd_server_connection_terminate(xconn, nt_errstr(status));